hamtaa-texttools 1.1.21__py3-none-any.whl → 1.1.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  import sys
2
2
  from time import perf_counter
3
- from typing import Literal
3
+ from typing import Literal, Any
4
4
  from collections.abc import Callable
5
5
 
6
6
  from openai import AsyncOpenAI
@@ -38,9 +38,9 @@ class AsyncTheTool:
38
38
  temperature: float | None = 0.0,
39
39
  logprobs: bool = False,
40
40
  top_logprobs: int = 3,
41
- validator: Callable[[object], bool] | None = None,
41
+ validator: Callable[[Any], bool] | None = None,
42
42
  max_validation_retries: int | None = None,
43
- priority: int = 0,
43
+ priority: int | None = None,
44
44
  ) -> Models.ToolOutput:
45
45
  """
46
46
  Categorize a text into a category / category tree.
@@ -48,11 +48,11 @@ class AsyncTheTool:
48
48
  Important Note: category_tree mode is EXPERIMENTAL, you can use it but it isn't reliable.
49
49
 
50
50
  Arguments:
51
- text: The input text to categorize
51
+ text: The input text
52
52
  categories: The category list / category tree
53
53
  with_analysis: Whether to include detailed reasoning analysis
54
- user_prompt: Additional instructions for the categorization
55
- temperature: Controls randomness
54
+ user_prompt: Additional instructions
55
+ temperature: Controls randomness (0.0 - 2.0)
56
56
  logprobs: Whether to return token probability information
57
57
  top_logprobs: Number of top token alternatives to return if logprobs enabled
58
58
  validator: Custom validation function to validate the output
@@ -64,7 +64,6 @@ class AsyncTheTool:
64
64
 
65
65
  """
66
66
  tool_name = sys._getframe().f_code.co_name
67
- prompt_file = tool_name + ".yaml"
68
67
  start = perf_counter()
69
68
 
70
69
  try:
@@ -82,7 +81,7 @@ class AsyncTheTool:
82
81
  max_validation_retries=max_validation_retries,
83
82
  priority=priority,
84
83
  # Internal parameters
85
- prompt_file=prompt_file,
84
+ tool_name=tool_name,
86
85
  output_model=Models.create_dynamic_model(categories),
87
86
  mode=None,
88
87
  output_lang=None,
@@ -128,7 +127,7 @@ class AsyncTheTool:
128
127
  max_validation_retries=max_validation_retries,
129
128
  priority=priority,
130
129
  # Internal parameters
131
- prompt_file=prompt_file,
130
+ tool_name=tool_name,
132
131
  output_model=Models.create_dynamic_model(category_names),
133
132
  mode=None,
134
133
  output_lang=None,
@@ -174,19 +173,19 @@ class AsyncTheTool:
174
173
  top_logprobs: int = 3,
175
174
  mode: Literal["auto", "threshold", "count"] = "auto",
176
175
  number_of_keywords: int | None = None,
177
- validator: Callable[[object], bool] | None = None,
176
+ validator: Callable[[Any], bool] | None = None,
178
177
  max_validation_retries: int | None = None,
179
- priority: int | None = 0,
178
+ priority: int | None = None,
180
179
  ) -> Models.ToolOutput:
181
180
  """
182
181
  Extract salient keywords from text.
183
182
 
184
183
  Arguments:
185
- text: The input text to extract keywords from
184
+ text: The input text
186
185
  with_analysis: Whether to include detailed reasoning analysis
187
- output_lang: Language for the output response
188
- user_prompt: Additional instructions for keyword extraction
189
- temperature: Controls randomness
186
+ output_lang: Language for the output
187
+ user_prompt: Additional instructions
188
+ temperature: Controls randomness (0.0 - 2.0)
190
189
  logprobs: Whether to return token probability information
191
190
  top_logprobs: Number of top token alternatives to return if logprobs enabled
192
191
  validator: Custom validation function to validate the output
@@ -197,7 +196,6 @@ class AsyncTheTool:
197
196
  ToolOutput
198
197
  """
199
198
  tool_name = sys._getframe().f_code.co_name
200
- prompt_file = tool_name + ".yaml"
201
199
  start = perf_counter()
202
200
 
203
201
  try:
@@ -216,7 +214,7 @@ class AsyncTheTool:
216
214
  max_validation_retries=max_validation_retries,
217
215
  priority=priority,
218
216
  # Internal parameters
219
- prompt_file=prompt_file,
217
+ tool_name=tool_name,
220
218
  output_model=Models.ListStr,
221
219
  )
222
220
 
@@ -248,20 +246,20 @@ class AsyncTheTool:
248
246
  temperature: float | None = 0.0,
249
247
  logprobs: bool = False,
250
248
  top_logprobs: int = 3,
251
- validator: Callable[[object], bool] | None = None,
249
+ validator: Callable[[Any], bool] | None = None,
252
250
  max_validation_retries: int | None = None,
253
- priority: int | None = 0,
251
+ priority: int | None = None,
254
252
  ) -> Models.ToolOutput:
255
253
  """
256
254
  Perform Named Entity Recognition (NER) over the input text.
257
255
 
258
256
  Arguments:
259
- text: The input text to extract entities from
257
+ text: The input text
260
258
  entities: List of entities provided by user (Optional)
261
259
  with_analysis: Whether to include detailed reasoning analysis
262
- output_lang: Language for the output response
263
- user_prompt: Additional instructions for entity extraction
264
- temperature: Controls randomness
260
+ output_lang: Language for the output
261
+ user_prompt: Additional instructions
262
+ temperature: Controls randomness (0.0 - 2.0)
265
263
  logprobs: Whether to return token probability information
266
264
  top_logprobs: Number of top token alternatives to return if logprobs enabled
267
265
  validator: Custom validation function to validate the output
@@ -272,7 +270,6 @@ class AsyncTheTool:
272
270
  ToolOutput
273
271
  """
274
272
  tool_name = sys._getframe().f_code.co_name
275
- prompt_file = tool_name + ".yaml"
276
273
  start = perf_counter()
277
274
 
278
275
  try:
@@ -291,7 +288,7 @@ class AsyncTheTool:
291
288
  max_validation_retries=max_validation_retries,
292
289
  priority=priority,
293
290
  # Internal parameters
294
- prompt_file=prompt_file,
291
+ tool_name=tool_name,
295
292
  output_model=Models.ListDictStrStr,
296
293
  mode=None,
297
294
  )
@@ -322,18 +319,18 @@ class AsyncTheTool:
322
319
  temperature: float | None = 0.0,
323
320
  logprobs: bool = False,
324
321
  top_logprobs: int = 3,
325
- validator: Callable[[object], bool] | None = None,
322
+ validator: Callable[[Any], bool] | None = None,
326
323
  max_validation_retries: int | None = None,
327
- priority: int | None = 0,
324
+ priority: int | None = None,
328
325
  ) -> Models.ToolOutput:
329
326
  """
330
327
  Detect if the input is phrased as a question.
331
328
 
332
329
  Arguments:
333
- text: The input text to analyze
330
+ text: The input text
334
331
  with_analysis: Whether to include detailed reasoning analysis
335
- user_prompt: Additional instructions for question detection
336
- temperature: Controls randomness
332
+ user_prompt: Additional instructions
333
+ temperature: Controls randomness (0.0 - 2.0)
337
334
  logprobs: Whether to return token probability information
338
335
  top_logprobs: Number of top token alternatives to return if logprobs enabled
339
336
  validator: Custom validation function to validate the output
@@ -344,7 +341,7 @@ class AsyncTheTool:
344
341
  ToolOutput
345
342
  """
346
343
  tool_name = sys._getframe().f_code.co_name
347
- prompt_file = tool_name + ".yaml"
344
+
348
345
  start = perf_counter()
349
346
 
350
347
  try:
@@ -360,7 +357,7 @@ class AsyncTheTool:
360
357
  max_validation_retries=max_validation_retries,
361
358
  priority=priority,
362
359
  # Internal parameters
363
- prompt_file=prompt_file,
360
+ tool_name=tool_name,
364
361
  output_model=Models.Bool,
365
362
  mode=None,
366
363
  output_lang=None,
@@ -394,20 +391,20 @@ class AsyncTheTool:
394
391
  temperature: float | None = 0.0,
395
392
  logprobs: bool = False,
396
393
  top_logprobs: int = 3,
397
- validator: Callable[[object], bool] | None = None,
394
+ validator: Callable[[Any], bool] | None = None,
398
395
  max_validation_retries: int | None = None,
399
- priority: int | None = 0,
396
+ priority: int | None = None,
400
397
  ) -> Models.ToolOutput:
401
398
  """
402
399
  Generate a single question from the given text.
403
400
 
404
401
  Arguments:
405
- text: The input text to generate a question from
402
+ text: The input text
406
403
  number_of_questions: Number of questions to generate
407
404
  with_analysis: Whether to include detailed reasoning analysis
408
- output_lang: Language for the output question
409
- user_prompt: Additional instructions for question generation
410
- temperature: Controls randomness
405
+ output_lang: Language for the output
406
+ user_prompt: Additional instructions
407
+ temperature: Controls randomness (0.0 - 2.0)
411
408
  logprobs: Whether to return token probability information
412
409
  top_logprobs: Number of top token alternatives to return if logprobs enabled
413
410
  validator: Custom validation function to validate the output
@@ -418,7 +415,6 @@ class AsyncTheTool:
418
415
  ToolOutput
419
416
  """
420
417
  tool_name = sys._getframe().f_code.co_name
421
- prompt_file = tool_name + ".yaml"
422
418
  start = perf_counter()
423
419
 
424
420
  try:
@@ -436,7 +432,7 @@ class AsyncTheTool:
436
432
  max_validation_retries=max_validation_retries,
437
433
  priority=priority,
438
434
  # Internal parameters
439
- prompt_file=prompt_file,
435
+ tool_name=tool_name,
440
436
  output_model=Models.ReasonListStr,
441
437
  mode=None,
442
438
  )
@@ -469,9 +465,9 @@ class AsyncTheTool:
469
465
  logprobs: bool = False,
470
466
  top_logprobs: int = 3,
471
467
  mode: Literal["default", "reason"] = "default",
472
- validator: Callable[[object], bool] | None = None,
468
+ validator: Callable[[Any], bool] | None = None,
473
469
  max_validation_retries: int | None = None,
474
- priority: int | None = 0,
470
+ priority: int | None = None,
475
471
  ) -> Models.ToolOutput:
476
472
  """
477
473
  Merge multiple questions into a single unified question.
@@ -479,12 +475,11 @@ class AsyncTheTool:
479
475
  Arguments:
480
476
  text: List of questions to merge
481
477
  with_analysis: Whether to include detailed reasoning analysis
482
- output_lang: Language for the output merged question
483
- user_prompt: Additional instructions for question merging
484
- temperature: Controls randomness
478
+ output_lang: Language for the output
479
+ user_prompt: Additional instructions
480
+ temperature: Controls randomness (0.0 - 2.0)
485
481
  logprobs: Whether to return token probability information
486
482
  top_logprobs: Number of top token alternatives to return if logprobs enabled
487
- mode: Merging strategy - 'default' for direct merge, 'reason' for reasoned merge
488
483
  validator: Custom validation function to validate the output
489
484
  max_validation_retries: Maximum number of retry attempts if validation fails
490
485
  priority: Task execution priority (if enabled by vLLM and the model)
@@ -493,7 +488,6 @@ class AsyncTheTool:
493
488
  ToolOutput
494
489
  """
495
490
  tool_name = sys._getframe().f_code.co_name
496
- prompt_file = tool_name + ".yaml"
497
491
  start = perf_counter()
498
492
 
499
493
  try:
@@ -511,7 +505,7 @@ class AsyncTheTool:
511
505
  max_validation_retries=max_validation_retries,
512
506
  priority=priority,
513
507
  # Internal parameters
514
- prompt_file=prompt_file,
508
+ tool_name=tool_name,
515
509
  output_model=Models.Str,
516
510
  mode=mode,
517
511
  )
@@ -544,22 +538,21 @@ class AsyncTheTool:
544
538
  logprobs: bool = False,
545
539
  top_logprobs: int = 3,
546
540
  mode: Literal["positive", "negative", "hard_negative"] = "positive",
547
- validator: Callable[[object], bool] | None = None,
541
+ validator: Callable[[Any], bool] | None = None,
548
542
  max_validation_retries: int | None = None,
549
- priority: int | None = 0,
543
+ priority: int | None = None,
550
544
  ) -> Models.ToolOutput:
551
545
  """
552
546
  Rewrite a text with different modes.
553
547
 
554
548
  Arguments:
555
- text: The input text to rewrite
549
+ text: The input text
556
550
  with_analysis: Whether to include detailed reasoning analysis
557
- output_lang: Language for the output rewritten text
558
- user_prompt: Additional instructions for rewriting
559
- temperature: Controls randomness
551
+ output_lang: Language for the output
552
+ user_prompt: Additional instructions
553
+ temperature: Controls randomness (0.0 - 2.0)
560
554
  logprobs: Whether to return token probability information
561
555
  top_logprobs: Number of top token alternatives to return if logprobs enabled
562
- mode: Rewriting mode - 'positive', 'negative', or 'hard_negative'
563
556
  validator: Custom validation function to validate the output
564
557
  max_validation_retries: Maximum number of retry attempts if validation fails
565
558
  priority: Task execution priority (if enabled by vLLM and the model)
@@ -568,7 +561,6 @@ class AsyncTheTool:
568
561
  ToolOutput
569
562
  """
570
563
  tool_name = sys._getframe().f_code.co_name
571
- prompt_file = tool_name + ".yaml"
572
564
  start = perf_counter()
573
565
 
574
566
  try:
@@ -585,7 +577,7 @@ class AsyncTheTool:
585
577
  max_validation_retries=max_validation_retries,
586
578
  priority=priority,
587
579
  # Internal parameters
588
- prompt_file=prompt_file,
580
+ tool_name=tool_name,
589
581
  output_model=Models.Str,
590
582
  mode=mode,
591
583
  )
@@ -618,9 +610,9 @@ class AsyncTheTool:
618
610
  temperature: float | None = 0.0,
619
611
  logprobs: bool = False,
620
612
  top_logprobs: int = 3,
621
- validator: Callable[[object], bool] | None = None,
613
+ validator: Callable[[Any], bool] | None = None,
622
614
  max_validation_retries: int | None = None,
623
- priority: int | None = 0,
615
+ priority: int | None = None,
624
616
  ) -> Models.ToolOutput:
625
617
  """
626
618
  Generate a list of questions about a subject.
@@ -629,9 +621,9 @@ class AsyncTheTool:
629
621
  text: The subject text to generate questions about
630
622
  number_of_questions: Number of questions to generate
631
623
  with_analysis: Whether to include detailed reasoning analysis
632
- output_lang: Language for the output questions
633
- user_prompt: Additional instructions for question generation
634
- temperature: Controls randomness
624
+ output_lang: Language for the output
625
+ user_prompt: Additional instructions
626
+ temperature: Controls randomness (0.0 - 2.0)
635
627
  logprobs: Whether to return token probability information
636
628
  top_logprobs: Number of top token alternatives to return if logprobs enabled
637
629
  validator: Custom validation function to validate the output
@@ -642,7 +634,6 @@ class AsyncTheTool:
642
634
  ToolOutput
643
635
  """
644
636
  tool_name = sys._getframe().f_code.co_name
645
- prompt_file = tool_name + ".yaml"
646
637
  start = perf_counter()
647
638
 
648
639
  try:
@@ -660,7 +651,7 @@ class AsyncTheTool:
660
651
  max_validation_retries=max_validation_retries,
661
652
  priority=priority,
662
653
  # Internal parameters
663
- prompt_file=prompt_file,
654
+ tool_name=tool_name,
664
655
  output_model=Models.ReasonListStr,
665
656
  mode=None,
666
657
  )
@@ -692,19 +683,19 @@ class AsyncTheTool:
692
683
  temperature: float | None = 0.0,
693
684
  logprobs: bool = False,
694
685
  top_logprobs: int = 3,
695
- validator: Callable[[object], bool] | None = None,
686
+ validator: Callable[[Any], bool] | None = None,
696
687
  max_validation_retries: int | None = None,
697
- priority: int | None = 0,
688
+ priority: int | None = None,
698
689
  ) -> Models.ToolOutput:
699
690
  """
700
691
  Summarize the given subject text.
701
692
 
702
693
  Arguments:
703
- text: The input text to summarize
694
+ text: The input text
704
695
  with_analysis: Whether to include detailed reasoning analysis
705
- output_lang: Language for the output summary
706
- user_prompt: Additional instructions for summarization
707
- temperature: Controls randomness
696
+ output_lang: Language for the output
697
+ user_prompt: Additional instructions
698
+ temperature: Controls randomness (0.0 - 2.0)
708
699
  logprobs: Whether to return token probability information
709
700
  top_logprobs: Number of top token alternatives to return if logprobs enabled
710
701
  validator: Custom validation function to validate the output
@@ -715,7 +706,6 @@ class AsyncTheTool:
715
706
  ToolOutput
716
707
  """
717
708
  tool_name = sys._getframe().f_code.co_name
718
- prompt_file = tool_name + ".yaml"
719
709
  start = perf_counter()
720
710
 
721
711
  try:
@@ -732,7 +722,7 @@ class AsyncTheTool:
732
722
  max_validation_retries=max_validation_retries,
733
723
  priority=priority,
734
724
  # Internal parameters
735
- prompt_file=prompt_file,
725
+ tool_name=tool_name,
736
726
  output_model=Models.Str,
737
727
  mode=None,
738
728
  )
@@ -765,9 +755,9 @@ class AsyncTheTool:
765
755
  temperature: float | None = 0.0,
766
756
  logprobs: bool = False,
767
757
  top_logprobs: int = 3,
768
- validator: Callable[[object], bool] | None = None,
758
+ validator: Callable[[Any], bool] | None = None,
769
759
  max_validation_retries: int | None = None,
770
- priority: int | None = 0,
760
+ priority: int | None = None,
771
761
  ) -> Models.ToolOutput:
772
762
  """
773
763
  Translate text between languages.
@@ -775,12 +765,12 @@ class AsyncTheTool:
775
765
  Important Note: This tool is EXPERIMENTAL, you can use it but it isn't reliable.
776
766
 
777
767
  Arguments:
778
- text: The input text to translate
768
+ text: The input text
779
769
  target_language: The target language for translation
780
770
  use_chunker: Whether to use text chunker for text length bigger than 1500
781
771
  with_analysis: Whether to include detailed reasoning analysis
782
- user_prompt: Additional instructions for translation
783
- temperature: Controls randomness
772
+ user_prompt: Additional instructions
773
+ temperature: Controls randomness (0.0 - 2.0)
784
774
  logprobs: Whether to return token probability information
785
775
  top_logprobs: Number of top token alternatives to return if logprobs enabled
786
776
  validator: Custom validation function to validate the output
@@ -791,7 +781,6 @@ class AsyncTheTool:
791
781
  ToolOutput
792
782
  """
793
783
  tool_name = sys._getframe().f_code.co_name
794
- prompt_file = tool_name + ".yaml"
795
784
  start = perf_counter()
796
785
 
797
786
  try:
@@ -815,7 +804,7 @@ class AsyncTheTool:
815
804
  max_validation_retries=max_validation_retries,
816
805
  priority=priority,
817
806
  # Internal parameters
818
- prompt_file=prompt_file,
807
+ tool_name=tool_name,
819
808
  output_model=Models.Str,
820
809
  mode=None,
821
810
  output_lang=None,
@@ -852,7 +841,7 @@ class AsyncTheTool:
852
841
  max_validation_retries=max_validation_retries,
853
842
  priority=priority,
854
843
  # Internal parameters
855
- prompt_file=prompt_file,
844
+ tool_name=tool_name,
856
845
  output_model=Models.Str,
857
846
  mode=None,
858
847
  output_lang=None,
@@ -885,9 +874,9 @@ class AsyncTheTool:
885
874
  temperature: float | None = 0.0,
886
875
  logprobs: bool = False,
887
876
  top_logprobs: int = 3,
888
- validator: Callable[[object], bool] | None = None,
877
+ validator: Callable[[Any], bool] | None = None,
889
878
  max_validation_retries: int | None = None,
890
- priority: int | None = 0,
879
+ priority: int | None = None,
891
880
  ) -> Models.ToolOutput:
892
881
  """
893
882
  Proposition input text to meaningful sentences.
@@ -897,9 +886,9 @@ class AsyncTheTool:
897
886
  Arguments:
898
887
  text: The input text
899
888
  with_analysis: Whether to include detailed reasoning analysis
900
- output_lang: Language for the output summary
901
- user_prompt: Additional instructions for summarization
902
- temperature: Controls randomness
889
+ output_lang: Language for the output
890
+ user_prompt: Additional instructions
891
+ temperature: Controls randomness (0.0 - 2.0)
903
892
  logprobs: Whether to return token probability information
904
893
  top_logprobs: Number of top token alternatives to return if logprobs enabled
905
894
  validator: Custom validation function to validate the output
@@ -910,7 +899,6 @@ class AsyncTheTool:
910
899
  ToolOutput
911
900
  """
912
901
  tool_name = sys._getframe().f_code.co_name
913
- prompt_file = tool_name + ".yaml"
914
902
  start = perf_counter()
915
903
 
916
904
  try:
@@ -927,7 +915,7 @@ class AsyncTheTool:
927
915
  max_validation_retries=max_validation_retries,
928
916
  priority=priority,
929
917
  # Internal parameters
930
- prompt_file=prompt_file,
918
+ tool_name=tool_name,
931
919
  output_model=Models.ListStr,
932
920
  mode=None,
933
921
  )
@@ -960,9 +948,9 @@ class AsyncTheTool:
960
948
  temperature: float | None = 0.0,
961
949
  logprobs: bool = False,
962
950
  top_logprobs: int = 3,
963
- validator: Callable[[object], bool] | None = None,
951
+ validator: Callable[[Any], bool] | None = None,
964
952
  max_validation_retries: int | None = None,
965
- priority: int | None = 0,
953
+ priority: int | None = None,
966
954
  ) -> Models.ToolOutput:
967
955
  """
968
956
  Checks wheather a statement is relevant to the source text or not.
@@ -973,9 +961,9 @@ class AsyncTheTool:
973
961
  text: The input text
974
962
  source_text: the source text that we want to check relation of text to it
975
963
  with_analysis: Whether to include detailed reasoning analysis
976
- output_lang: Language for the output summary
977
- user_prompt: Additional instructions for summarization
978
- temperature: Controls randomness
964
+ output_lang: Language for the output
965
+ user_prompt: Additional instructions
966
+ temperature: Controls randomness (0.0 - 2.0)
979
967
  logprobs: Whether to return token probability information
980
968
  top_logprobs: Number of top token alternatives to return if logprobs enabled
981
969
  validator: Custom validation function to validate the output
@@ -986,7 +974,6 @@ class AsyncTheTool:
986
974
  ToolOutput
987
975
  """
988
976
  tool_name = sys._getframe().f_code.co_name
989
- prompt_file = tool_name + ".yaml"
990
977
  start = perf_counter()
991
978
 
992
979
  try:
@@ -1003,7 +990,7 @@ class AsyncTheTool:
1003
990
  max_validation_retries=max_validation_retries,
1004
991
  priority=priority,
1005
992
  # Internal parameters
1006
- prompt_file=prompt_file,
993
+ tool_name=tool_name,
1007
994
  output_model=Models.Bool,
1008
995
  mode=None,
1009
996
  source_text=source_text,
@@ -1030,29 +1017,27 @@ class AsyncTheTool:
1030
1017
  async def run_custom(
1031
1018
  self,
1032
1019
  prompt: str,
1033
- output_model: object,
1020
+ output_model: Any,
1034
1021
  with_analysis: bool = False,
1035
1022
  analyze_template: str | None = None,
1036
1023
  output_lang: str | None = None,
1037
1024
  temperature: float | None = None,
1038
1025
  logprobs: bool | None = None,
1039
1026
  top_logprobs: int = 3,
1040
- validator: Callable[[object], bool] | None = None,
1027
+ validator: Callable[[Any], bool] | None = None,
1041
1028
  max_validation_retries: int | None = None,
1042
- priority: int | None = 0,
1029
+ priority: int | None = None,
1043
1030
  ) -> Models.ToolOutput:
1044
1031
  """
1045
1032
  Custom tool that can do almost anything!
1046
1033
 
1047
- Important Note: This tool is EXPERIMENTAL, you can use it but it isn't reliable.
1048
-
1049
1034
  Arguments:
1050
1035
  prompt: The user prompt
1051
1036
  output_model: Pydantic BaseModel used for structured output
1052
1037
  with_analysis: Whether to include detailed reasoning analysis
1053
1038
  analyze_template: The analyze template used for reasoning analysis
1054
- output_lang: Language for the output summary
1055
- temperature: Controls randomness
1039
+ output_lang: Language for the output
1040
+ temperature: Controls randomness (0.0 - 2.0)
1056
1041
  logprobs: Whether to return token probability information
1057
1042
  top_logprobs: Number of top token alternatives to return if logprobs enabled
1058
1043
  validator: Custom validation function to validate the output
@@ -1063,7 +1048,6 @@ class AsyncTheTool:
1063
1048
  ToolOutput
1064
1049
  """
1065
1050
  tool_name = sys._getframe().f_code.co_name
1066
- prompt_file = tool_name + ".yaml"
1067
1051
  start = perf_counter()
1068
1052
 
1069
1053
  try:
@@ -1082,7 +1066,7 @@ class AsyncTheTool:
1082
1066
  max_validation_retries=max_validation_retries,
1083
1067
  priority=priority,
1084
1068
  # Internal parameters
1085
- prompt_file=prompt_file,
1069
+ tool_name=tool_name,
1086
1070
  user_prompt=None,
1087
1071
  mode=None,
1088
1072
  )