hamtaa-texttools 1.1.17__py3-none-any.whl → 1.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,21 +4,20 @@ from collections.abc import Callable
4
4
 
5
5
  from openai import AsyncOpenAI
6
6
 
7
- from texttools.tools.internals.async_operator import AsyncOperator
8
- import texttools.tools.internals.models as Models
7
+ from texttools.internals.async_operator import AsyncOperator
8
+ import texttools.internals.models as Models
9
+ from texttools.internals.exceptions import (
10
+ TextToolsError,
11
+ PromptError,
12
+ LLMError,
13
+ ValidationError,
14
+ )
9
15
 
10
16
 
11
17
  class AsyncTheTool:
12
18
  """
13
- Async counterpart to TheTool.
14
-
15
- Each method configures the async operator with a specific YAML prompt,
19
+ Each method configures the operator with a specific YAML prompt,
16
20
  output schema, and flags, then delegates execution to `operator.run()`.
17
-
18
- Usage:
19
- async_client = AsyncOpenAI(...)
20
- tool = TheToolAsync(async_client, model="model-name")
21
- result = await tool.categorize("text ...", with_analysis=True)
22
21
  """
23
22
 
24
23
  def __init__(
@@ -45,6 +44,8 @@ class AsyncTheTool:
45
44
  """
46
45
  Categorize a text into a category / category tree.
47
46
 
47
+ Important Note: category_tree mode is EXPERIMENTAL, you can use it but it isn't reliable.
48
+
48
49
  Arguments:
49
50
  text: The input text to categorize
50
51
  categories: The category / category_tree to give to LLM
@@ -66,42 +67,99 @@ class AsyncTheTool:
66
67
  - processed_at (datetime): Timestamp when the processing occurred
67
68
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
68
69
  - errors (list(str) | None): Errors occured during tool call
70
+
69
71
  """
70
- start = datetime.now()
71
-
72
- if mode == "category_tree":
73
- # Initializations
74
- output = Models.ToolOutput()
75
- levels = categories.get_level_count()
76
- parent_id = 0
77
- final_output = []
78
-
79
- for _ in range(levels):
80
- # Get child nodes for current parent
81
- parent_node = categories.get_node(parent_id)
82
- children = categories.get_children(parent_node)
83
-
84
- # Check if child nodes exist
85
- if not children:
86
- output.errors.append(
87
- f"No categories found for parent_id {parent_id} in the tree"
72
+ output = Models.ToolOutput()
73
+
74
+ try:
75
+ start = datetime.now()
76
+
77
+ if mode == "category_tree":
78
+ # Initializations
79
+ output = Models.ToolOutput()
80
+ levels = categories.get_level_count()
81
+ parent_id = 0
82
+ final_output = []
83
+
84
+ for _ in range(levels):
85
+ # Get child nodes for current parent
86
+ parent_node = categories.get_node(parent_id)
87
+ children = categories.get_children(parent_node)
88
+
89
+ # Check if child nodes exist
90
+ if not children:
91
+ output.errors.append(
92
+ f"No categories found for parent_id {parent_id} in the tree"
93
+ )
94
+ end = datetime.now()
95
+ output.execution_time = (end - start).total_seconds()
96
+ return output
97
+
98
+ # Extract category names and descriptions
99
+ category_list = [
100
+ f"Category Name: {node.name}, Description: {node.description}"
101
+ for node in children
102
+ ]
103
+ category_names = [node.name for node in children]
104
+
105
+ # Run categorization for this level
106
+ level_output = await self._operator.run(
107
+ # User parameters
108
+ text=text,
109
+ category_list=category_list,
110
+ with_analysis=with_analysis,
111
+ user_prompt=user_prompt,
112
+ temperature=temperature,
113
+ logprobs=logprobs,
114
+ top_logprobs=top_logprobs,
115
+ mode=mode,
116
+ validator=validator,
117
+ max_validation_retries=max_validation_retries,
118
+ priority=priority,
119
+ # Internal parameters
120
+ prompt_file="categorize.yaml",
121
+ output_model=Models.create_dynamic_model(category_names),
122
+ output_lang=None,
88
123
  )
89
- end = datetime.now()
90
- output.execution_time = (end - start).total_seconds()
91
- return output
92
-
93
- # Extract category names and descriptions
94
- category_list = [
95
- f"Category Name: {node.name}, Description: {node.description}"
96
- for node in children
97
- ]
98
- category_names = [node.name for node in children]
99
-
100
- # Run categorization for this level
101
- level_output = await self._operator.run(
124
+
125
+ # Check for errors from operator
126
+ if level_output.errors:
127
+ output.errors.extend(level_output.errors)
128
+ end = datetime.now()
129
+ output.execution_time = (end - start).total_seconds()
130
+ return output
131
+
132
+ # Get the chosen category
133
+ chosen_category = level_output.result
134
+
135
+ # Find the corresponding node
136
+ parent_node = categories.get_node(chosen_category)
137
+ if parent_node is None:
138
+ output.errors.append(
139
+ f"Category '{chosen_category}' not found in tree after selection"
140
+ )
141
+ end = datetime.now()
142
+ output.execution_time = (end - start).total_seconds()
143
+ return output
144
+
145
+ parent_id = parent_node.node_id
146
+ final_output.append(parent_node.name)
147
+
148
+ # Copy analysis/logprobs/process from the last level's output
149
+ output.analysis = level_output.analysis
150
+ output.logprobs = level_output.logprobs
151
+ output.process = level_output.process
152
+
153
+ output.result = final_output
154
+ end = datetime.now()
155
+ output.execution_time = (end - start).total_seconds()
156
+ return output
157
+
158
+ else:
159
+ output = await self._operator.run(
102
160
  # User parameters
103
161
  text=text,
104
- category_list=category_list,
162
+ category_list=categories,
105
163
  with_analysis=with_analysis,
106
164
  user_prompt=user_prompt,
107
165
  temperature=temperature,
@@ -110,66 +168,28 @@ class AsyncTheTool:
110
168
  mode=mode,
111
169
  validator=validator,
112
170
  max_validation_retries=max_validation_retries,
171
+ priority=priority,
113
172
  # Internal parameters
114
173
  prompt_file="categorize.yaml",
115
- output_model=Models.create_dynamic_model(category_names),
174
+ output_model=Models.create_dynamic_model(categories),
116
175
  output_lang=None,
117
176
  )
177
+ end = datetime.now()
178
+ output.execution_time = (end - start).total_seconds()
179
+ return output
118
180
 
119
- # Check for errors from operator
120
- if level_output.errors:
121
- output.errors.extend(level_output.errors)
122
- end = datetime.now()
123
- output.execution_time = (end - start).total_seconds()
124
- return output
125
-
126
- # Get the chosen category
127
- chosen_category = level_output.result
128
-
129
- # Find the corresponding node
130
- parent_node = categories.get_node(chosen_category)
131
- if parent_node is None:
132
- output.errors.append(
133
- f"Category '{chosen_category}' not found in tree after selection"
134
- )
135
- end = datetime.now()
136
- output.execution_time = (end - start).total_seconds()
137
- return output
138
-
139
- parent_id = parent_node.node_id
140
- final_output.append(parent_node.name)
141
-
142
- # Copy analysis/logprobs/process from the last level's output
143
- output.analysis = level_output.analysis
144
- output.logprobs = level_output.logprobs
145
- output.process = level_output.process
181
+ except PromptError as e:
182
+ output.errors.append(f"Prompt error: {e}")
183
+ except LLMError as e:
184
+ output.errors.append(f"LLM error: {e}")
185
+ except ValidationError as e:
186
+ output.errors.append(f"Validation error: {e}")
187
+ except TextToolsError as e:
188
+ output.errors.append(f"TextTools error: {e}")
189
+ except Exception as e:
190
+ output.errors.append(f"Unexpected error: {e}")
146
191
 
147
- output.result = final_output
148
- end = datetime.now()
149
- output.execution_time = (end - start).total_seconds()
150
- return output
151
-
152
- else:
153
- output = await self._operator.run(
154
- # User parameters
155
- text=text,
156
- category_list=categories,
157
- with_analysis=with_analysis,
158
- user_prompt=user_prompt,
159
- temperature=temperature,
160
- logprobs=logprobs,
161
- top_logprobs=top_logprobs,
162
- mode=mode,
163
- validator=validator,
164
- max_validation_retries=max_validation_retries,
165
- # Internal parameters
166
- prompt_file="categorize.yaml",
167
- output_model=Models.create_dynamic_model(categories),
168
- output_lang=None,
169
- )
170
- end = datetime.now()
171
- output.execution_time = (end - start).total_seconds()
172
- return output
192
+ return output
173
193
 
174
194
  async def extract_keywords(
175
195
  self,
@@ -211,27 +231,43 @@ class AsyncTheTool:
211
231
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
212
232
  - errors (list(str) | None): Errors occured during tool call
213
233
  """
214
- start = datetime.now()
215
- output = await self._operator.run(
216
- # User parameters
217
- text=text,
218
- with_analysis=with_analysis,
219
- output_lang=output_lang,
220
- user_prompt=user_prompt,
221
- temperature=temperature,
222
- logprobs=logprobs,
223
- top_logprobs=top_logprobs,
224
- mode=mode,
225
- number_of_keywords=number_of_keywords,
226
- validator=validator,
227
- max_validation_retries=max_validation_retries,
228
- priority=priority,
229
- # Internal parameters
230
- prompt_file="extract_keywords.yaml",
231
- output_model=Models.ListStrOutput,
232
- )
233
- end = datetime.now()
234
- output.execution_time = (end - start).total_seconds()
234
+ output = Models.ToolOutput()
235
+
236
+ try:
237
+ start = datetime.now()
238
+ output = await self._operator.run(
239
+ # User parameters
240
+ text=text,
241
+ with_analysis=with_analysis,
242
+ output_lang=output_lang,
243
+ user_prompt=user_prompt,
244
+ temperature=temperature,
245
+ logprobs=logprobs,
246
+ top_logprobs=top_logprobs,
247
+ mode=mode,
248
+ number_of_keywords=number_of_keywords,
249
+ validator=validator,
250
+ max_validation_retries=max_validation_retries,
251
+ priority=priority,
252
+ # Internal parameters
253
+ prompt_file="extract_keywords.yaml",
254
+ output_model=Models.ListStrOutput,
255
+ )
256
+ end = datetime.now()
257
+ output.execution_time = (end - start).total_seconds()
258
+ return output
259
+
260
+ except PromptError as e:
261
+ output.errors.append(f"Prompt error: {e}")
262
+ except LLMError as e:
263
+ output.errors.append(f"LLM error: {e}")
264
+ except ValidationError as e:
265
+ output.errors.append(f"Validation error: {e}")
266
+ except TextToolsError as e:
267
+ output.errors.append(f"TextTools error: {e}")
268
+ except Exception as e:
269
+ output.errors.append(f"Unexpected error: {e}")
270
+
235
271
  return output
236
272
 
237
273
  async def extract_entities(
@@ -272,26 +308,42 @@ class AsyncTheTool:
272
308
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
273
309
  - errors (list(str) | None): Errors occured during tool call
274
310
  """
275
- start = datetime.now()
276
- output = await self._operator.run(
277
- # User parameters
278
- text=text,
279
- with_analysis=with_analysis,
280
- output_lang=output_lang,
281
- user_prompt=user_prompt,
282
- temperature=temperature,
283
- logprobs=logprobs,
284
- top_logprobs=top_logprobs,
285
- validator=validator,
286
- max_validation_retries=max_validation_retries,
287
- priority=priority,
288
- # Internal parameters
289
- prompt_file="extract_entities.yaml",
290
- output_model=Models.ListDictStrStrOutput,
291
- mode=None,
292
- )
293
- end = datetime.now()
294
- output.execution_time = (end - start).total_seconds()
311
+ output = Models.ToolOutput()
312
+
313
+ try:
314
+ start = datetime.now()
315
+ output = await self._operator.run(
316
+ # User parameters
317
+ text=text,
318
+ with_analysis=with_analysis,
319
+ output_lang=output_lang,
320
+ user_prompt=user_prompt,
321
+ temperature=temperature,
322
+ logprobs=logprobs,
323
+ top_logprobs=top_logprobs,
324
+ validator=validator,
325
+ max_validation_retries=max_validation_retries,
326
+ priority=priority,
327
+ # Internal parameters
328
+ prompt_file="extract_entities.yaml",
329
+ output_model=Models.ListDictStrStrOutput,
330
+ mode=None,
331
+ )
332
+ end = datetime.now()
333
+ output.execution_time = (end - start).total_seconds()
334
+ return output
335
+
336
+ except PromptError as e:
337
+ output.errors.append(f"Prompt error: {e}")
338
+ except LLMError as e:
339
+ output.errors.append(f"LLM error: {e}")
340
+ except ValidationError as e:
341
+ output.errors.append(f"Validation error: {e}")
342
+ except TextToolsError as e:
343
+ output.errors.append(f"TextTools error: {e}")
344
+ except Exception as e:
345
+ output.errors.append(f"Unexpected error: {e}")
346
+
295
347
  return output
296
348
 
297
349
  async def is_question(
@@ -330,26 +382,42 @@ class AsyncTheTool:
330
382
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
331
383
  - errors (list(str) | None): Errors occured during tool call
332
384
  """
333
- start = datetime.now()
334
- output = await self._operator.run(
335
- # User parameters
336
- text=text,
337
- with_analysis=with_analysis,
338
- user_prompt=user_prompt,
339
- temperature=temperature,
340
- logprobs=logprobs,
341
- top_logprobs=top_logprobs,
342
- validator=validator,
343
- max_validation_retries=max_validation_retries,
344
- priority=priority,
345
- # Internal parameters
346
- prompt_file="is_question.yaml",
347
- output_model=Models.BoolOutput,
348
- mode=None,
349
- output_lang=None,
350
- )
351
- end = datetime.now()
352
- output.execution_time = (end - start).total_seconds()
385
+ output = Models.ToolOutput()
386
+
387
+ try:
388
+ start = datetime.now()
389
+ output = await self._operator.run(
390
+ # User parameters
391
+ text=text,
392
+ with_analysis=with_analysis,
393
+ user_prompt=user_prompt,
394
+ temperature=temperature,
395
+ logprobs=logprobs,
396
+ top_logprobs=top_logprobs,
397
+ validator=validator,
398
+ max_validation_retries=max_validation_retries,
399
+ priority=priority,
400
+ # Internal parameters
401
+ prompt_file="is_question.yaml",
402
+ output_model=Models.BoolOutput,
403
+ mode=None,
404
+ output_lang=None,
405
+ )
406
+ end = datetime.now()
407
+ output.execution_time = (end - start).total_seconds()
408
+ return output
409
+
410
+ except PromptError as e:
411
+ output.errors.append(f"Prompt error: {e}")
412
+ except LLMError as e:
413
+ output.errors.append(f"LLM error: {e}")
414
+ except ValidationError as e:
415
+ output.errors.append(f"Validation error: {e}")
416
+ except TextToolsError as e:
417
+ output.errors.append(f"TextTools error: {e}")
418
+ except Exception as e:
419
+ output.errors.append(f"Unexpected error: {e}")
420
+
353
421
  return output
354
422
 
355
423
  async def text_to_question(
@@ -390,26 +458,42 @@ class AsyncTheTool:
390
458
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
391
459
  - errors (list(str) | None): Errors occured during tool call
392
460
  """
393
- start = datetime.now()
394
- output = await self._operator.run(
395
- # User parameters
396
- text=text,
397
- with_analysis=with_analysis,
398
- output_lang=output_lang,
399
- user_prompt=user_prompt,
400
- temperature=temperature,
401
- logprobs=logprobs,
402
- top_logprobs=top_logprobs,
403
- validator=validator,
404
- max_validation_retries=max_validation_retries,
405
- priority=priority,
406
- # Internal parameters
407
- prompt_file="text_to_question.yaml",
408
- output_model=Models.StrOutput,
409
- mode=None,
410
- )
411
- end = datetime.now()
412
- output.execution_time = (end - start).total_seconds()
461
+ output = Models.ToolOutput()
462
+
463
+ try:
464
+ start = datetime.now()
465
+ output = await self._operator.run(
466
+ # User parameters
467
+ text=text,
468
+ with_analysis=with_analysis,
469
+ output_lang=output_lang,
470
+ user_prompt=user_prompt,
471
+ temperature=temperature,
472
+ logprobs=logprobs,
473
+ top_logprobs=top_logprobs,
474
+ validator=validator,
475
+ max_validation_retries=max_validation_retries,
476
+ priority=priority,
477
+ # Internal parameters
478
+ prompt_file="text_to_question.yaml",
479
+ output_model=Models.StrOutput,
480
+ mode=None,
481
+ )
482
+ end = datetime.now()
483
+ output.execution_time = (end - start).total_seconds()
484
+ return output
485
+
486
+ except PromptError as e:
487
+ output.errors.append(f"Prompt error: {e}")
488
+ except LLMError as e:
489
+ output.errors.append(f"LLM error: {e}")
490
+ except ValidationError as e:
491
+ output.errors.append(f"Validation error: {e}")
492
+ except TextToolsError as e:
493
+ output.errors.append(f"TextTools error: {e}")
494
+ except Exception as e:
495
+ output.errors.append(f"Unexpected error: {e}")
496
+
413
497
  return output
414
498
 
415
499
  async def merge_questions(
@@ -452,27 +536,43 @@ class AsyncTheTool:
452
536
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
453
537
  - errors (list(str) | None): Errors occured during tool call
454
538
  """
455
- start = datetime.now()
456
- text_combined = ", ".join(text)
457
- output = await self._operator.run(
458
- # User parameters
459
- text=text_combined,
460
- with_analysis=with_analysis,
461
- output_lang=output_lang,
462
- user_prompt=user_prompt,
463
- temperature=temperature,
464
- logprobs=logprobs,
465
- top_logprobs=top_logprobs,
466
- validator=validator,
467
- max_validation_retries=max_validation_retries,
468
- priority=priority,
469
- # Internal parameters
470
- prompt_file="merge_questions.yaml",
471
- output_model=Models.StrOutput,
472
- mode=mode,
473
- )
474
- end = datetime.now()
475
- output.execution_time = (end - start).total_seconds()
539
+ output = Models.ToolOutput()
540
+
541
+ try:
542
+ start = datetime.now()
543
+ text = ", ".join(text)
544
+ output = await self._operator.run(
545
+ # User parameters
546
+ text=text,
547
+ with_analysis=with_analysis,
548
+ output_lang=output_lang,
549
+ user_prompt=user_prompt,
550
+ temperature=temperature,
551
+ logprobs=logprobs,
552
+ top_logprobs=top_logprobs,
553
+ validator=validator,
554
+ max_validation_retries=max_validation_retries,
555
+ priority=priority,
556
+ # Internal parameters
557
+ prompt_file="merge_questions.yaml",
558
+ output_model=Models.StrOutput,
559
+ mode=mode,
560
+ )
561
+ end = datetime.now()
562
+ output.execution_time = (end - start).total_seconds()
563
+ return output
564
+
565
+ except PromptError as e:
566
+ output.errors.append(f"Prompt error: {e}")
567
+ except LLMError as e:
568
+ output.errors.append(f"LLM error: {e}")
569
+ except ValidationError as e:
570
+ output.errors.append(f"Validation error: {e}")
571
+ except TextToolsError as e:
572
+ output.errors.append(f"TextTools error: {e}")
573
+ except Exception as e:
574
+ output.errors.append(f"Unexpected error: {e}")
575
+
476
576
  return output
477
577
 
478
578
  async def rewrite(
@@ -515,26 +615,42 @@ class AsyncTheTool:
515
615
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
516
616
  - errors (list(str) | None): Errors occured during tool call
517
617
  """
518
- start = datetime.now()
519
- output = await self._operator.run(
520
- # User parameters
521
- text=text,
522
- with_analysis=with_analysis,
523
- output_lang=output_lang,
524
- user_prompt=user_prompt,
525
- temperature=temperature,
526
- logprobs=logprobs,
527
- top_logprobs=top_logprobs,
528
- validator=validator,
529
- max_validation_retries=max_validation_retries,
530
- priority=priority,
531
- # Internal parameters
532
- prompt_file="rewrite.yaml",
533
- output_model=Models.StrOutput,
534
- mode=mode,
535
- )
536
- end = datetime.now()
537
- output.execution_time = (end - start).total_seconds()
618
+ output = Models.ToolOutput()
619
+
620
+ try:
621
+ start = datetime.now()
622
+ output = await self._operator.run(
623
+ # User parameters
624
+ text=text,
625
+ with_analysis=with_analysis,
626
+ output_lang=output_lang,
627
+ user_prompt=user_prompt,
628
+ temperature=temperature,
629
+ logprobs=logprobs,
630
+ top_logprobs=top_logprobs,
631
+ validator=validator,
632
+ max_validation_retries=max_validation_retries,
633
+ priority=priority,
634
+ # Internal parameters
635
+ prompt_file="rewrite.yaml",
636
+ output_model=Models.StrOutput,
637
+ mode=mode,
638
+ )
639
+ end = datetime.now()
640
+ output.execution_time = (end - start).total_seconds()
641
+ return output
642
+
643
+ except PromptError as e:
644
+ output.errors.append(f"Prompt error: {e}")
645
+ except LLMError as e:
646
+ output.errors.append(f"LLM error: {e}")
647
+ except ValidationError as e:
648
+ output.errors.append(f"Validation error: {e}")
649
+ except TextToolsError as e:
650
+ output.errors.append(f"TextTools error: {e}")
651
+ except Exception as e:
652
+ output.errors.append(f"Unexpected error: {e}")
653
+
538
654
  return output
539
655
 
540
656
  async def subject_to_question(
@@ -577,27 +693,43 @@ class AsyncTheTool:
577
693
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
578
694
  - errors (list(str) | None): Errors occured during tool call
579
695
  """
580
- start = datetime.now()
581
- output = await self._operator.run(
582
- # User parameters
583
- text=text,
584
- number_of_questions=number_of_questions,
585
- with_analysis=with_analysis,
586
- output_lang=output_lang,
587
- user_prompt=user_prompt,
588
- temperature=temperature,
589
- logprobs=logprobs,
590
- top_logprobs=top_logprobs,
591
- validator=validator,
592
- max_validation_retries=max_validation_retries,
593
- priority=priority,
594
- # Internal parameters
595
- prompt_file="subject_to_question.yaml",
596
- output_model=Models.ReasonListStrOutput,
597
- mode=None,
598
- )
599
- end = datetime.now()
600
- output.execution_time = (end - start).total_seconds()
696
+ output = Models.ToolOutput()
697
+
698
+ try:
699
+ start = datetime.now()
700
+ output = await self._operator.run(
701
+ # User parameters
702
+ text=text,
703
+ number_of_questions=number_of_questions,
704
+ with_analysis=with_analysis,
705
+ output_lang=output_lang,
706
+ user_prompt=user_prompt,
707
+ temperature=temperature,
708
+ logprobs=logprobs,
709
+ top_logprobs=top_logprobs,
710
+ validator=validator,
711
+ max_validation_retries=max_validation_retries,
712
+ priority=priority,
713
+ # Internal parameters
714
+ prompt_file="subject_to_question.yaml",
715
+ output_model=Models.ReasonListStrOutput,
716
+ mode=None,
717
+ )
718
+ end = datetime.now()
719
+ output.execution_time = (end - start).total_seconds()
720
+ return output
721
+
722
+ except PromptError as e:
723
+ output.errors.append(f"Prompt error: {e}")
724
+ except LLMError as e:
725
+ output.errors.append(f"LLM error: {e}")
726
+ except ValidationError as e:
727
+ output.errors.append(f"Validation error: {e}")
728
+ except TextToolsError as e:
729
+ output.errors.append(f"TextTools error: {e}")
730
+ except Exception as e:
731
+ output.errors.append(f"Unexpected error: {e}")
732
+
601
733
  return output
602
734
 
603
735
  async def summarize(
@@ -638,26 +770,42 @@ class AsyncTheTool:
638
770
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
639
771
  - errors (list(str) | None): Errors occured during tool call
640
772
  """
641
- start = datetime.now()
642
- output = await self._operator.run(
643
- # User parameters
644
- text=text,
645
- with_analysis=with_analysis,
646
- output_lang=output_lang,
647
- user_prompt=user_prompt,
648
- temperature=temperature,
649
- logprobs=logprobs,
650
- top_logprobs=top_logprobs,
651
- validator=validator,
652
- max_validation_retries=max_validation_retries,
653
- priority=priority,
654
- # Internal parameters
655
- prompt_file="summarize.yaml",
656
- output_model=Models.StrOutput,
657
- mode=None,
658
- )
659
- end = datetime.now()
660
- output.execution_time = (end - start).total_seconds()
773
+ output = Models.ToolOutput()
774
+
775
+ try:
776
+ start = datetime.now()
777
+ output = await self._operator.run(
778
+ # User parameters
779
+ text=text,
780
+ with_analysis=with_analysis,
781
+ output_lang=output_lang,
782
+ user_prompt=user_prompt,
783
+ temperature=temperature,
784
+ logprobs=logprobs,
785
+ top_logprobs=top_logprobs,
786
+ validator=validator,
787
+ max_validation_retries=max_validation_retries,
788
+ priority=priority,
789
+ # Internal parameters
790
+ prompt_file="summarize.yaml",
791
+ output_model=Models.StrOutput,
792
+ mode=None,
793
+ )
794
+ end = datetime.now()
795
+ output.execution_time = (end - start).total_seconds()
796
+ return output
797
+
798
+ except PromptError as e:
799
+ output.errors.append(f"Prompt error: {e}")
800
+ except LLMError as e:
801
+ output.errors.append(f"LLM error: {e}")
802
+ except ValidationError as e:
803
+ output.errors.append(f"Validation error: {e}")
804
+ except TextToolsError as e:
805
+ output.errors.append(f"TextTools error: {e}")
806
+ except Exception as e:
807
+ output.errors.append(f"Unexpected error: {e}")
808
+
661
809
  return output
662
810
 
663
811
  async def translate(
@@ -676,6 +824,8 @@ class AsyncTheTool:
676
824
  """
677
825
  Translate text between languages.
678
826
 
827
+ Important Note: This tool is EXPERIMENTAL, you can use it but it isn't reliable.
828
+
679
829
  Arguments:
680
830
  text: The input text to translate
681
831
  target_language: The target language for translation
@@ -698,30 +848,46 @@ class AsyncTheTool:
698
848
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
699
849
  - errors (list(str) | None): Errors occured during tool call
700
850
  """
701
- start = datetime.now()
702
- output = await self._operator.run(
703
- # User parameters
704
- text=text,
705
- target_language=target_language,
706
- with_analysis=with_analysis,
707
- user_prompt=user_prompt,
708
- temperature=temperature,
709
- logprobs=logprobs,
710
- top_logprobs=top_logprobs,
711
- validator=validator,
712
- max_validation_retries=max_validation_retries,
713
- priority=priority,
714
- # Internal parameters
715
- prompt_file="translate.yaml",
716
- output_model=Models.StrOutput,
717
- mode=None,
718
- output_lang=None,
719
- )
720
- end = datetime.now()
721
- output.execution_time = (end - start).total_seconds()
851
+ output = Models.ToolOutput()
852
+
853
+ try:
854
+ start = datetime.now()
855
+ output = await self._operator.run(
856
+ # User parameters
857
+ text=text,
858
+ target_language=target_language,
859
+ with_analysis=with_analysis,
860
+ user_prompt=user_prompt,
861
+ temperature=temperature,
862
+ logprobs=logprobs,
863
+ top_logprobs=top_logprobs,
864
+ validator=validator,
865
+ max_validation_retries=max_validation_retries,
866
+ priority=priority,
867
+ # Internal parameters
868
+ prompt_file="translate.yaml",
869
+ output_model=Models.StrOutput,
870
+ mode=None,
871
+ output_lang=None,
872
+ )
873
+ end = datetime.now()
874
+ output.execution_time = (end - start).total_seconds()
875
+ return output
876
+
877
+ except PromptError as e:
878
+ output.errors.append(f"Prompt error: {e}")
879
+ except LLMError as e:
880
+ output.errors.append(f"LLM error: {e}")
881
+ except ValidationError as e:
882
+ output.errors.append(f"Validation error: {e}")
883
+ except TextToolsError as e:
884
+ output.errors.append(f"TextTools error: {e}")
885
+ except Exception as e:
886
+ output.errors.append(f"Unexpected error: {e}")
887
+
722
888
  return output
723
889
 
724
- async def detect_entity(
890
+ async def propositionize(
725
891
  self,
726
892
  text: str,
727
893
  with_analysis: bool = False,
@@ -735,7 +901,9 @@ class AsyncTheTool:
735
901
  priority: int | None = 0,
736
902
  ) -> Models.ToolOutput:
737
903
  """
738
- Detects entities in a given text based on the entity_detector.yaml prompt.
904
+ Proposition input text to meaningful sentences.
905
+
906
+ Important Note: This tool is EXPERIMENTAL, you can use it but it isn't reliable.
739
907
 
740
908
  Arguments:
741
909
  text: The input text
@@ -751,7 +919,7 @@ class AsyncTheTool:
751
919
 
752
920
  Returns:
753
921
  ToolOutput: Object containing:
754
- - result (list[Entity]): The entities
922
+ - result (list[str]): The propositions
755
923
  - logprobs (list | None): Probability data if logprobs enabled
756
924
  - analysis (str | None): Detailed reasoning if with_analysis enabled
757
925
  - process (str | None): Description of the process used
@@ -759,31 +927,48 @@ class AsyncTheTool:
759
927
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
760
928
  - errors (list(str) | None): Errors occured during tool call
761
929
  """
762
- start = datetime.now()
763
- output = await self._operator.run(
764
- # User parameters
765
- text=text,
766
- with_analysis=with_analysis,
767
- output_lang=output_lang,
768
- user_prompt=user_prompt,
769
- temperature=temperature,
770
- logprobs=logprobs,
771
- top_logprobs=top_logprobs,
772
- validator=validator,
773
- max_validation_retries=max_validation_retries,
774
- priority=priority,
775
- # Internal parameters
776
- prompt_file="detect_entity.yaml",
777
- output_model=Models.EntityDetectorOutput,
778
- mode=None,
779
- )
780
- end = datetime.now()
781
- output.execution_time = (end - start).total_seconds()
930
+ output = Models.ToolOutput()
931
+
932
+ try:
933
+ start = datetime.now()
934
+ output = await self._operator.run(
935
+ # User parameters
936
+ text=text,
937
+ with_analysis=with_analysis,
938
+ output_lang=output_lang,
939
+ user_prompt=user_prompt,
940
+ temperature=temperature,
941
+ logprobs=logprobs,
942
+ top_logprobs=top_logprobs,
943
+ validator=validator,
944
+ max_validation_retries=max_validation_retries,
945
+ priority=priority,
946
+ # Internal parameters
947
+ prompt_file="propositionize.yaml",
948
+ output_model=Models.ListStrOutput,
949
+ mode=None,
950
+ )
951
+ end = datetime.now()
952
+ output.execution_time = (end - start).total_seconds()
953
+ return output
954
+
955
+ except PromptError as e:
956
+ output.errors.append(f"Prompt error: {e}")
957
+ except LLMError as e:
958
+ output.errors.append(f"LLM error: {e}")
959
+ except ValidationError as e:
960
+ output.errors.append(f"Validation error: {e}")
961
+ except TextToolsError as e:
962
+ output.errors.append(f"TextTools error: {e}")
963
+ except Exception as e:
964
+ output.errors.append(f"Unexpected error: {e}")
965
+
782
966
  return output
783
967
 
784
- async def propositionize(
968
+ async def check_fact(
785
969
  self,
786
970
  text: str,
971
+ source_text: str,
787
972
  with_analysis: bool = False,
788
973
  output_lang: str | None = None,
789
974
  user_prompt: str | None = None,
@@ -795,10 +980,13 @@ class AsyncTheTool:
795
980
  priority: int | None = 0,
796
981
  ) -> Models.ToolOutput:
797
982
  """
798
- Proposition input text to meaningful sentences.
983
+ Checks wheather a statement is relevant to the source text or not.
984
+
985
+ Important Note: This tool is EXPERIMENTAL, you can use it but it isn't reliable.
799
986
 
800
987
  Arguments:
801
988
  text: The input text
989
+ source_text: the source text that we want to check relation of text to it
802
990
  with_analysis: Whether to include detailed reasoning analysis
803
991
  output_lang: Language for the output summary
804
992
  user_prompt: Additional instructions for summarization
@@ -811,7 +999,7 @@ class AsyncTheTool:
811
999
 
812
1000
  Returns:
813
1001
  ToolOutput: Object containing:
814
- - result (list[str]): The propositions
1002
+ - result (bool): statement is relevant to source text or not
815
1003
  - logprobs (list | None): Probability data if logprobs enabled
816
1004
  - analysis (str | None): Detailed reasoning if with_analysis enabled
817
1005
  - process (str | None): Description of the process used
@@ -819,32 +1007,50 @@ class AsyncTheTool:
819
1007
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
820
1008
  - errors (list(str) | None): Errors occured during tool call
821
1009
  """
822
- start = datetime.now()
823
- output = await self._operator.run(
824
- # User parameters
825
- text=text,
826
- with_analysis=with_analysis,
827
- output_lang=output_lang,
828
- user_prompt=user_prompt,
829
- temperature=temperature,
830
- logprobs=logprobs,
831
- top_logprobs=top_logprobs,
832
- validator=validator,
833
- max_validation_retries=max_validation_retries,
834
- priority=priority,
835
- # Internal parameters
836
- prompt_file="propositionize.yaml",
837
- output_model=Models.ListStrOutput,
838
- mode=None,
839
- )
840
- end = datetime.now()
841
- output.execution_time = (end - start).total_seconds()
1010
+ output = Models.ToolOutput()
1011
+
1012
+ try:
1013
+ start = datetime.now()
1014
+ output = await self._operator.run(
1015
+ # User parameters
1016
+ text=text,
1017
+ with_analysis=with_analysis,
1018
+ output_lang=output_lang,
1019
+ user_prompt=user_prompt,
1020
+ temperature=temperature,
1021
+ logprobs=logprobs,
1022
+ top_logprobs=top_logprobs,
1023
+ validator=validator,
1024
+ max_validation_retries=max_validation_retries,
1025
+ priority=priority,
1026
+ # Internal parameters
1027
+ prompt_file="check_fact.yaml",
1028
+ output_model=Models.BoolOutput,
1029
+ mode=None,
1030
+ source_text=source_text,
1031
+ )
1032
+ end = datetime.now()
1033
+ output.execution_time = (end - start).total_seconds()
1034
+ return output
1035
+ except PromptError as e:
1036
+ output.errors.append(f"Prompt error: {e}")
1037
+ except LLMError as e:
1038
+ output.errors.append(f"LLM error: {e}")
1039
+ except ValidationError as e:
1040
+ output.errors.append(f"Validation error: {e}")
1041
+ except TextToolsError as e:
1042
+ output.errors.append(f"TextTools error: {e}")
1043
+ except Exception as e:
1044
+ output.errors.append(f"Unexpected error: {e}")
1045
+
842
1046
  return output
843
1047
 
844
1048
  async def run_custom(
845
1049
  self,
846
1050
  prompt: str,
847
1051
  output_model: Any,
1052
+ with_analysis: bool = False,
1053
+ analyze_template: str | None = None,
848
1054
  output_lang: str | None = None,
849
1055
  temperature: float | None = None,
850
1056
  logprobs: bool | None = None,
@@ -856,8 +1062,13 @@ class AsyncTheTool:
856
1062
  """
857
1063
  Custom tool that can do almost anything!
858
1064
 
1065
+ Important Note: This tool is EXPERIMENTAL, you can use it but it isn't reliable.
1066
+
859
1067
  Arguments:
860
- text: The user prompt
1068
+ prompt: The user prompt
1069
+ output_model: Pydantic BaseModel used for structured output
1070
+ with_analysis: Whether to include detailed reasoning analysis
1071
+ analyze_template: The analyze template used for reasoning analysis
861
1072
  output_lang: Language for the output summary
862
1073
  temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
863
1074
  logprobs: Whether to return token probability information
@@ -876,25 +1087,42 @@ class AsyncTheTool:
876
1087
  - execution_time (float): Time taken for execution in seconds (-1.0 if not measured)
877
1088
  - errors (list(str) | None): Errors occured during tool call
878
1089
  """
879
- start = datetime.now()
880
- output = await self._operator.run(
881
- # User paramaeters
882
- text=prompt,
883
- output_model=output_model,
884
- output_model_str=output_model.model_json_schema(),
885
- output_lang=output_lang,
886
- temperature=temperature,
887
- logprobs=logprobs,
888
- top_logprobs=top_logprobs,
889
- validator=validator,
890
- max_validation_retries=max_validation_retries,
891
- priority=priority,
892
- # Internal parameters
893
- prompt_file="run_custom.yaml",
894
- user_prompt=None,
895
- with_analysis=False,
896
- mode=None,
897
- )
898
- end = datetime.now()
899
- output.execution_time = (end - start).total_seconds()
1090
+ output = Models.ToolOutput()
1091
+
1092
+ try:
1093
+ start = datetime.now()
1094
+ output = await self._operator.run(
1095
+ # User paramaeters
1096
+ text=prompt,
1097
+ output_model=output_model,
1098
+ with_analysis=with_analysis,
1099
+ analyze_template=analyze_template,
1100
+ output_model_str=output_model.model_json_schema(),
1101
+ output_lang=output_lang,
1102
+ temperature=temperature,
1103
+ logprobs=logprobs,
1104
+ top_logprobs=top_logprobs,
1105
+ validator=validator,
1106
+ max_validation_retries=max_validation_retries,
1107
+ priority=priority,
1108
+ # Internal parameters
1109
+ prompt_file="run_custom.yaml",
1110
+ user_prompt=None,
1111
+ mode=None,
1112
+ )
1113
+ end = datetime.now()
1114
+ output.execution_time = (end - start).total_seconds()
1115
+ return output
1116
+
1117
+ except PromptError as e:
1118
+ output.errors.append(f"Prompt error: {e}")
1119
+ except LLMError as e:
1120
+ output.errors.append(f"LLM error: {e}")
1121
+ except ValidationError as e:
1122
+ output.errors.append(f"Validation error: {e}")
1123
+ except TextToolsError as e:
1124
+ output.errors.append(f"TextTools error: {e}")
1125
+ except Exception as e:
1126
+ output.errors.append(f"Unexpected error: {e}")
1127
+
900
1128
  return output