hamtaa-texttools 1.1.21__py3-none-any.whl → 1.1.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  import sys
2
2
  from time import perf_counter
3
- from typing import Literal
3
+ from typing import Literal, Any
4
4
  from collections.abc import Callable
5
5
 
6
6
  from openai import OpenAI
@@ -38,9 +38,9 @@ class TheTool:
38
38
  temperature: float | None = 0.0,
39
39
  logprobs: bool = False,
40
40
  top_logprobs: int = 3,
41
- validator: Callable[[object], bool] | None = None,
41
+ validator: Callable[[Any], bool] | None = None,
42
42
  max_validation_retries: int | None = None,
43
- priority: int = 0,
43
+ priority: int | None = None,
44
44
  ) -> Models.ToolOutput:
45
45
  """
46
46
  Categorize a text into a category / category tree.
@@ -48,11 +48,11 @@ class TheTool:
48
48
  Important Note: category_tree mode is EXPERIMENTAL, you can use it but it isn't reliable.
49
49
 
50
50
  Arguments:
51
- text: The input text to categorize
51
+ text: The input text
52
52
  categories: The category list / category tree
53
53
  with_analysis: Whether to include detailed reasoning analysis
54
- user_prompt: Additional instructions for the categorization
55
- temperature: Controls randomness
54
+ user_prompt: Additional instructions
55
+ temperature: Controls randomness (0.0 - 2.0)
56
56
  logprobs: Whether to return token probability information
57
57
  top_logprobs: Number of top token alternatives to return if logprobs enabled
58
58
  validator: Custom validation function to validate the output
@@ -64,7 +64,6 @@ class TheTool:
64
64
 
65
65
  """
66
66
  tool_name = sys._getframe().f_code.co_name
67
- prompt_file = tool_name + ".yaml"
68
67
  start = perf_counter()
69
68
 
70
69
  try:
@@ -82,7 +81,7 @@ class TheTool:
82
81
  max_validation_retries=max_validation_retries,
83
82
  priority=priority,
84
83
  # Internal parameters
85
- prompt_file=prompt_file,
84
+ tool_name=tool_name,
86
85
  output_model=Models.create_dynamic_model(categories),
87
86
  mode=None,
88
87
  output_lang=None,
@@ -128,7 +127,7 @@ class TheTool:
128
127
  max_validation_retries=max_validation_retries,
129
128
  priority=priority,
130
129
  # Internal parameters
131
- prompt_file=prompt_file,
130
+ tool_name=tool_name,
132
131
  output_model=Models.create_dynamic_model(category_names),
133
132
  mode=None,
134
133
  output_lang=None,
@@ -174,19 +173,19 @@ class TheTool:
174
173
  top_logprobs: int = 3,
175
174
  mode: Literal["auto", "threshold", "count"] = "auto",
176
175
  number_of_keywords: int | None = None,
177
- validator: Callable[[object], bool] | None = None,
176
+ validator: Callable[[Any], bool] | None = None,
178
177
  max_validation_retries: int | None = None,
179
- priority: int | None = 0,
178
+ priority: int | None = None,
180
179
  ) -> Models.ToolOutput:
181
180
  """
182
181
  Extract salient keywords from text.
183
182
 
184
183
  Arguments:
185
- text: The input text to extract keywords from
184
+ text: The input text
186
185
  with_analysis: Whether to include detailed reasoning analysis
187
- output_lang: Language for the output response
188
- user_prompt: Additional instructions for keyword extraction
189
- temperature: Controls randomness
186
+ output_lang: Language for the output
187
+ user_prompt: Additional instructions
188
+ temperature: Controls randomness (0.0 - 2.0)
190
189
  logprobs: Whether to return token probability information
191
190
  top_logprobs: Number of top token alternatives to return if logprobs enabled
192
191
  validator: Custom validation function to validate the output
@@ -197,7 +196,6 @@ class TheTool:
197
196
  ToolOutput
198
197
  """
199
198
  tool_name = sys._getframe().f_code.co_name
200
- prompt_file = tool_name + ".yaml"
201
199
  start = perf_counter()
202
200
 
203
201
  try:
@@ -216,7 +214,7 @@ class TheTool:
216
214
  max_validation_retries=max_validation_retries,
217
215
  priority=priority,
218
216
  # Internal parameters
219
- prompt_file=prompt_file,
217
+ tool_name=tool_name,
220
218
  output_model=Models.ListStr,
221
219
  )
222
220
 
@@ -248,20 +246,20 @@ class TheTool:
248
246
  temperature: float | None = 0.0,
249
247
  logprobs: bool = False,
250
248
  top_logprobs: int = 3,
251
- validator: Callable[[object], bool] | None = None,
249
+ validator: Callable[[Any], bool] | None = None,
252
250
  max_validation_retries: int | None = None,
253
- priority: int | None = 0,
251
+ priority: int | None = None,
254
252
  ) -> Models.ToolOutput:
255
253
  """
256
254
  Perform Named Entity Recognition (NER) over the input text.
257
255
 
258
256
  Arguments:
259
- text: The input text to extract entities from
260
- entities: List of entities provided by user (Optional)
257
+ text: The input text
258
+ entities: List of entities provided by user
261
259
  with_analysis: Whether to include detailed reasoning analysis
262
- output_lang: Language for the output response
263
- user_prompt: Additional instructions for entity extraction
264
- temperature: Controls randomness
260
+ output_lang: Language for the output
261
+ user_prompt: Additional instructions
262
+ temperature: Controls randomness (0.0 - 2.0)
265
263
  logprobs: Whether to return token probability information
266
264
  top_logprobs: Number of top token alternatives to return if logprobs enabled
267
265
  validator: Custom validation function to validate the output
@@ -272,7 +270,6 @@ class TheTool:
272
270
  ToolOutput
273
271
  """
274
272
  tool_name = sys._getframe().f_code.co_name
275
- prompt_file = tool_name + ".yaml"
276
273
  start = perf_counter()
277
274
 
278
275
  try:
@@ -291,7 +288,7 @@ class TheTool:
291
288
  max_validation_retries=max_validation_retries,
292
289
  priority=priority,
293
290
  # Internal parameters
294
- prompt_file=prompt_file,
291
+ tool_name=tool_name,
295
292
  output_model=Models.ListDictStrStr,
296
293
  mode=None,
297
294
  )
@@ -322,18 +319,18 @@ class TheTool:
322
319
  temperature: float | None = 0.0,
323
320
  logprobs: bool = False,
324
321
  top_logprobs: int = 3,
325
- validator: Callable[[object], bool] | None = None,
322
+ validator: Callable[[Any], bool] | None = None,
326
323
  max_validation_retries: int | None = None,
327
- priority: int | None = 0,
324
+ priority: int | None = None,
328
325
  ) -> Models.ToolOutput:
329
326
  """
330
327
  Detect if the input is phrased as a question.
331
328
 
332
329
  Arguments:
333
- text: The input text to analyze
330
+ text: The input text
334
331
  with_analysis: Whether to include detailed reasoning analysis
335
- user_prompt: Additional instructions for question detection
336
- temperature: Controls randomness
332
+ user_prompt: Additional instructions
333
+ temperature: Controls randomness (0.0 - 2.0)
337
334
  logprobs: Whether to return token probability information
338
335
  top_logprobs: Number of top token alternatives to return if logprobs enabled
339
336
  validator: Custom validation function to validate the output
@@ -344,7 +341,6 @@ class TheTool:
344
341
  ToolOutput
345
342
  """
346
343
  tool_name = sys._getframe().f_code.co_name
347
- prompt_file = tool_name + ".yaml"
348
344
  start = perf_counter()
349
345
 
350
346
  try:
@@ -360,7 +356,7 @@ class TheTool:
360
356
  max_validation_retries=max_validation_retries,
361
357
  priority=priority,
362
358
  # Internal parameters
363
- prompt_file=prompt_file,
359
+ tool_name=tool_name,
364
360
  output_model=Models.Bool,
365
361
  mode=None,
366
362
  output_lang=None,
@@ -394,20 +390,20 @@ class TheTool:
394
390
  temperature: float | None = 0.0,
395
391
  logprobs: bool = False,
396
392
  top_logprobs: int = 3,
397
- validator: Callable[[object], bool] | None = None,
393
+ validator: Callable[[Any], bool] | None = None,
398
394
  max_validation_retries: int | None = None,
399
- priority: int | None = 0,
395
+ priority: int | None = None,
400
396
  ) -> Models.ToolOutput:
401
397
  """
402
398
  Generate a single question from the given text.
403
399
 
404
400
  Arguments:
405
- text: The input text to generate a question from
401
+ text: The input text
406
402
  number_of_questions: Number of questions to generate
407
403
  with_analysis: Whether to include detailed reasoning analysis
408
- output_lang: Language for the output question
409
- user_prompt: Additional instructions for question generation
410
- temperature: Controls randomness
404
+ output_lang: Language for the output
405
+ user_prompt: Additional instructions
406
+ temperature: Controls randomness (0.0 - 2.0)
411
407
  logprobs: Whether to return token probability information
412
408
  top_logprobs: Number of top token alternatives to return if logprobs enabled
413
409
  validator: Custom validation function to validate the output
@@ -418,7 +414,6 @@ class TheTool:
418
414
  ToolOutput
419
415
  """
420
416
  tool_name = sys._getframe().f_code.co_name
421
- prompt_file = tool_name + ".yaml"
422
417
  start = perf_counter()
423
418
 
424
419
  try:
@@ -436,7 +431,7 @@ class TheTool:
436
431
  max_validation_retries=max_validation_retries,
437
432
  priority=priority,
438
433
  # Internal parameters
439
- prompt_file=prompt_file,
434
+ tool_name=tool_name,
440
435
  output_model=Models.ReasonListStr,
441
436
  mode=None,
442
437
  )
@@ -469,9 +464,9 @@ class TheTool:
469
464
  logprobs: bool = False,
470
465
  top_logprobs: int = 3,
471
466
  mode: Literal["default", "reason"] = "default",
472
- validator: Callable[[object], bool] | None = None,
467
+ validator: Callable[[Any], bool] | None = None,
473
468
  max_validation_retries: int | None = None,
474
- priority: int | None = 0,
469
+ priority: int | None = None,
475
470
  ) -> Models.ToolOutput:
476
471
  """
477
472
  Merge multiple questions into a single unified question.
@@ -479,12 +474,11 @@ class TheTool:
479
474
  Arguments:
480
475
  text: List of questions to merge
481
476
  with_analysis: Whether to include detailed reasoning analysis
482
- output_lang: Language for the output merged question
483
- user_prompt: Additional instructions for question merging
484
- temperature: Controls randomness
477
+ output_lang: Language for the output
478
+ user_prompt: Additional instructions
479
+ temperature: Controls randomness (0.0 - 2.0)
485
480
  logprobs: Whether to return token probability information
486
481
  top_logprobs: Number of top token alternatives to return if logprobs enabled
487
- mode: Merging strategy - 'default' for direct merge, 'reason' for reasoned merge
488
482
  validator: Custom validation function to validate the output
489
483
  max_validation_retries: Maximum number of retry attempts if validation fails
490
484
  priority: Task execution priority (if enabled by vLLM and the model)
@@ -493,7 +487,6 @@ class TheTool:
493
487
  ToolOutput
494
488
  """
495
489
  tool_name = sys._getframe().f_code.co_name
496
- prompt_file = tool_name + ".yaml"
497
490
  start = perf_counter()
498
491
 
499
492
  try:
@@ -511,7 +504,7 @@ class TheTool:
511
504
  max_validation_retries=max_validation_retries,
512
505
  priority=priority,
513
506
  # Internal parameters
514
- prompt_file=prompt_file,
507
+ tool_name=tool_name,
515
508
  output_model=Models.Str,
516
509
  mode=mode,
517
510
  )
@@ -544,22 +537,21 @@ class TheTool:
544
537
  logprobs: bool = False,
545
538
  top_logprobs: int = 3,
546
539
  mode: Literal["positive", "negative", "hard_negative"] = "positive",
547
- validator: Callable[[object], bool] | None = None,
540
+ validator: Callable[[Any], bool] | None = None,
548
541
  max_validation_retries: int | None = None,
549
- priority: int | None = 0,
542
+ priority: int | None = None,
550
543
  ) -> Models.ToolOutput:
551
544
  """
552
545
  Rewrite a text with different modes.
553
546
 
554
547
  Arguments:
555
- text: The input text to rewrite
548
+ text: The input text
556
549
  with_analysis: Whether to include detailed reasoning analysis
557
- output_lang: Language for the output rewritten text
558
- user_prompt: Additional instructions for rewriting
559
- temperature: Controls randomness
550
+ output_lang: Language for the output
551
+ user_prompt: Additional instructions
552
+ temperature: Controls randomness (0.0 - 2.0)
560
553
  logprobs: Whether to return token probability information
561
554
  top_logprobs: Number of top token alternatives to return if logprobs enabled
562
- mode: Rewriting mode - 'positive', 'negative', or 'hard_negative'
563
555
  validator: Custom validation function to validate the output
564
556
  max_validation_retries: Maximum number of retry attempts if validation fails
565
557
  priority: Task execution priority (if enabled by vLLM and the model)
@@ -568,7 +560,6 @@ class TheTool:
568
560
  ToolOutput
569
561
  """
570
562
  tool_name = sys._getframe().f_code.co_name
571
- prompt_file = tool_name + ".yaml"
572
563
  start = perf_counter()
573
564
 
574
565
  try:
@@ -585,7 +576,7 @@ class TheTool:
585
576
  max_validation_retries=max_validation_retries,
586
577
  priority=priority,
587
578
  # Internal parameters
588
- prompt_file=prompt_file,
579
+ tool_name=tool_name,
589
580
  output_model=Models.Str,
590
581
  mode=mode,
591
582
  )
@@ -618,9 +609,9 @@ class TheTool:
618
609
  temperature: float | None = 0.0,
619
610
  logprobs: bool = False,
620
611
  top_logprobs: int = 3,
621
- validator: Callable[[object], bool] | None = None,
612
+ validator: Callable[[Any], bool] | None = None,
622
613
  max_validation_retries: int | None = None,
623
- priority: int | None = 0,
614
+ priority: int | None = None,
624
615
  ) -> Models.ToolOutput:
625
616
  """
626
617
  Generate a list of questions about a subject.
@@ -629,9 +620,9 @@ class TheTool:
629
620
  text: The subject text to generate questions about
630
621
  number_of_questions: Number of questions to generate
631
622
  with_analysis: Whether to include detailed reasoning analysis
632
- output_lang: Language for the output questions
633
- user_prompt: Additional instructions for question generation
634
- temperature: Controls randomness
623
+ output_lang: Language for the output
624
+ user_prompt: Additional instructions
625
+ temperature: Controls randomness (0.0 - 2.0)
635
626
  logprobs: Whether to return token probability information
636
627
  top_logprobs: Number of top token alternatives to return if logprobs enabled
637
628
  validator: Custom validation function to validate the output
@@ -642,7 +633,6 @@ class TheTool:
642
633
  ToolOutput
643
634
  """
644
635
  tool_name = sys._getframe().f_code.co_name
645
- prompt_file = tool_name + ".yaml"
646
636
  start = perf_counter()
647
637
 
648
638
  try:
@@ -660,7 +650,7 @@ class TheTool:
660
650
  max_validation_retries=max_validation_retries,
661
651
  priority=priority,
662
652
  # Internal parameters
663
- prompt_file=prompt_file,
653
+ tool_name=tool_name,
664
654
  output_model=Models.ReasonListStr,
665
655
  mode=None,
666
656
  )
@@ -692,19 +682,19 @@ class TheTool:
692
682
  temperature: float | None = 0.0,
693
683
  logprobs: bool = False,
694
684
  top_logprobs: int = 3,
695
- validator: Callable[[object], bool] | None = None,
685
+ validator: Callable[[Any], bool] | None = None,
696
686
  max_validation_retries: int | None = None,
697
- priority: int | None = 0,
687
+ priority: int | None = None,
698
688
  ) -> Models.ToolOutput:
699
689
  """
700
690
  Summarize the given subject text.
701
691
 
702
692
  Arguments:
703
- text: The input text to summarize
693
+ text: The input text
704
694
  with_analysis: Whether to include detailed reasoning analysis
705
- output_lang: Language for the output summary
706
- user_prompt: Additional instructions for summarization
707
- temperature: Controls randomness
695
+ output_lang: Language for the output
696
+ user_prompt: Additional instructions
697
+ temperature: Controls randomness (0.0 - 2.0)
708
698
  logprobs: Whether to return token probability information
709
699
  top_logprobs: Number of top token alternatives to return if logprobs enabled
710
700
  validator: Custom validation function to validate the output
@@ -715,7 +705,6 @@ class TheTool:
715
705
  ToolOutput
716
706
  """
717
707
  tool_name = sys._getframe().f_code.co_name
718
- prompt_file = tool_name + ".yaml"
719
708
  start = perf_counter()
720
709
 
721
710
  try:
@@ -732,7 +721,7 @@ class TheTool:
732
721
  max_validation_retries=max_validation_retries,
733
722
  priority=priority,
734
723
  # Internal parameters
735
- prompt_file=prompt_file,
724
+ tool_name=tool_name,
736
725
  output_model=Models.Str,
737
726
  mode=None,
738
727
  )
@@ -765,9 +754,9 @@ class TheTool:
765
754
  temperature: float | None = 0.0,
766
755
  logprobs: bool = False,
767
756
  top_logprobs: int = 3,
768
- validator: Callable[[object], bool] | None = None,
757
+ validator: Callable[[Any], bool] | None = None,
769
758
  max_validation_retries: int | None = None,
770
- priority: int | None = 0,
759
+ priority: int | None = None,
771
760
  ) -> Models.ToolOutput:
772
761
  """
773
762
  Translate text between languages.
@@ -775,12 +764,12 @@ class TheTool:
775
764
  Important Note: This tool is EXPERIMENTAL, you can use it but it isn't reliable.
776
765
 
777
766
  Arguments:
778
- text: The input text to translate
767
+ text: The input text
779
768
  target_language: The target language for translation
780
769
  use_chunker: Whether to use text chunker for text length bigger than 1500
781
770
  with_analysis: Whether to include detailed reasoning analysis
782
- user_prompt: Additional instructions for translation
783
- temperature: Controls randomness
771
+ user_prompt: Additional instructions
772
+ temperature: Controls randomness (0.0 - 2.0)
784
773
  logprobs: Whether to return token probability information
785
774
  top_logprobs: Number of top token alternatives to return if logprobs enabled
786
775
  validator: Custom validation function to validate the output
@@ -791,7 +780,6 @@ class TheTool:
791
780
  ToolOutput
792
781
  """
793
782
  tool_name = sys._getframe().f_code.co_name
794
- prompt_file = tool_name + ".yaml"
795
783
  start = perf_counter()
796
784
 
797
785
  try:
@@ -815,7 +803,7 @@ class TheTool:
815
803
  max_validation_retries=max_validation_retries,
816
804
  priority=priority,
817
805
  # Internal parameters
818
- prompt_file=prompt_file,
806
+ tool_name=tool_name,
819
807
  output_model=Models.Str,
820
808
  mode=None,
821
809
  output_lang=None,
@@ -852,7 +840,7 @@ class TheTool:
852
840
  max_validation_retries=max_validation_retries,
853
841
  priority=priority,
854
842
  # Internal parameters
855
- prompt_file=prompt_file,
843
+ tool_name=tool_name,
856
844
  output_model=Models.Str,
857
845
  mode=None,
858
846
  output_lang=None,
@@ -885,9 +873,9 @@ class TheTool:
885
873
  temperature: float | None = 0.0,
886
874
  logprobs: bool = False,
887
875
  top_logprobs: int = 3,
888
- validator: Callable[[object], bool] | None = None,
876
+ validator: Callable[[Any], bool] | None = None,
889
877
  max_validation_retries: int | None = None,
890
- priority: int | None = 0,
878
+ priority: int | None = None,
891
879
  ) -> Models.ToolOutput:
892
880
  """
893
881
  Proposition input text to meaningful sentences.
@@ -897,9 +885,9 @@ class TheTool:
897
885
  Arguments:
898
886
  text: The input text
899
887
  with_analysis: Whether to include detailed reasoning analysis
900
- output_lang: Language for the output summary
901
- user_prompt: Additional instructions for summarization
902
- temperature: Controls randomness
888
+ output_lang: Language for the output
889
+ user_prompt: Additional instructions
890
+ temperature: Controls randomness (0.0 - 2.0)
903
891
  logprobs: Whether to return token probability information
904
892
  top_logprobs: Number of top token alternatives to return if logprobs enabled
905
893
  validator: Custom validation function to validate the output
@@ -910,7 +898,6 @@ class TheTool:
910
898
  ToolOutput
911
899
  """
912
900
  tool_name = sys._getframe().f_code.co_name
913
- prompt_file = tool_name + ".yaml"
914
901
  start = perf_counter()
915
902
 
916
903
  try:
@@ -927,7 +914,7 @@ class TheTool:
927
914
  max_validation_retries=max_validation_retries,
928
915
  priority=priority,
929
916
  # Internal parameters
930
- prompt_file=prompt_file,
917
+ tool_name=tool_name,
931
918
  output_model=Models.ListStr,
932
919
  mode=None,
933
920
  )
@@ -960,9 +947,9 @@ class TheTool:
960
947
  temperature: float | None = 0.0,
961
948
  logprobs: bool = False,
962
949
  top_logprobs: int = 3,
963
- validator: Callable[[object], bool] | None = None,
950
+ validator: Callable[[Any], bool] | None = None,
964
951
  max_validation_retries: int | None = None,
965
- priority: int | None = 0,
952
+ priority: int | None = None,
966
953
  ) -> Models.ToolOutput:
967
954
  """
968
955
  Checks wheather a statement is relevant to the source text or not.
@@ -973,9 +960,9 @@ class TheTool:
973
960
  text: The input text
974
961
  source_text: the source text that we want to check relation of text to it
975
962
  with_analysis: Whether to include detailed reasoning analysis
976
- output_lang: Language for the output summary
977
- user_prompt: Additional instructions for summarization
978
- temperature: Controls randomness
963
+ output_lang: Language for the output
964
+ user_prompt: Additional instructions
965
+ temperature: Controls randomness (0.0 - 2.0)
979
966
  logprobs: Whether to return token probability information
980
967
  top_logprobs: Number of top token alternatives to return if logprobs enabled
981
968
  validator: Custom validation function to validate the output
@@ -986,7 +973,6 @@ class TheTool:
986
973
  ToolOutput
987
974
  """
988
975
  tool_name = sys._getframe().f_code.co_name
989
- prompt_file = tool_name + ".yaml"
990
976
  start = perf_counter()
991
977
 
992
978
  try:
@@ -1003,7 +989,7 @@ class TheTool:
1003
989
  max_validation_retries=max_validation_retries,
1004
990
  priority=priority,
1005
991
  # Internal parameters
1006
- prompt_file=prompt_file,
992
+ tool_name=tool_name,
1007
993
  output_model=Models.Bool,
1008
994
  mode=None,
1009
995
  source_text=source_text,
@@ -1030,29 +1016,27 @@ class TheTool:
1030
1016
  def run_custom(
1031
1017
  self,
1032
1018
  prompt: str,
1033
- output_model: object,
1019
+ output_model: Any,
1034
1020
  with_analysis: bool = False,
1035
1021
  analyze_template: str | None = None,
1036
1022
  output_lang: str | None = None,
1037
1023
  temperature: float | None = None,
1038
1024
  logprobs: bool | None = None,
1039
1025
  top_logprobs: int = 3,
1040
- validator: Callable[[object], bool] | None = None,
1026
+ validator: Callable[[Any], bool] | None = None,
1041
1027
  max_validation_retries: int | None = None,
1042
- priority: int | None = 0,
1028
+ priority: int | None = None,
1043
1029
  ) -> Models.ToolOutput:
1044
1030
  """
1045
1031
  Custom tool that can do almost anything!
1046
1032
 
1047
- Important Note: This tool is EXPERIMENTAL, you can use it but it isn't reliable.
1048
-
1049
1033
  Arguments:
1050
1034
  prompt: The user prompt
1051
1035
  output_model: Pydantic BaseModel used for structured output
1052
1036
  with_analysis: Whether to include detailed reasoning analysis
1053
1037
  analyze_template: The analyze template used for reasoning analysis
1054
- output_lang: Language for the output summary
1055
- temperature: Controls randomness
1038
+ output_lang: Language for the output
1039
+ temperature: Controls randomness (0.0 - 2.0)
1056
1040
  logprobs: Whether to return token probability information
1057
1041
  top_logprobs: Number of top token alternatives to return if logprobs enabled
1058
1042
  validator: Custom validation function to validate the output
@@ -1063,7 +1047,6 @@ class TheTool:
1063
1047
  ToolOutput
1064
1048
  """
1065
1049
  tool_name = sys._getframe().f_code.co_name
1066
- prompt_file = tool_name + ".yaml"
1067
1050
  start = perf_counter()
1068
1051
 
1069
1052
  try:
@@ -1082,7 +1065,7 @@ class TheTool:
1082
1065
  max_validation_retries=max_validation_retries,
1083
1066
  priority=priority,
1084
1067
  # Internal parameters
1085
- prompt_file=prompt_file,
1068
+ tool_name=tool_name,
1086
1069
  user_prompt=None,
1087
1070
  mode=None,
1088
1071
  )
@@ -1,32 +0,0 @@
1
- hamtaa_texttools-1.1.21.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
2
- texttools/__init__.py,sha256=CmCS9dEvO6061GiJ8A7gD3UAhCWHTkaID9q3Krlyq_o,311
3
- texttools/batch/batch_config.py,sha256=scWYQBDuaTj8-b2x_a33Zu-zxm7eqEf5FFoquD-Sv94,1029
4
- texttools/batch/batch_manager.py,sha256=6HfsexU0PHGGBH7HKReZ-CQxaQI9DXYKAPsFXxovb_I,8740
5
- texttools/batch/batch_runner.py,sha256=fmoq7yxtEdvfLbEhcx95ma-lgrL-ZdI2EgxmEfVcKtE,10016
6
- texttools/internals/async_operator.py,sha256=sKMYEy7jEcsXpwnBkA18PFubkM-TXZrBH3QwF7l-wSg,7054
7
- texttools/internals/exceptions.py,sha256=h_yp_5i_5IfmqTBQ4S6ZOISrrliJBQ3HTEAjwJXrplk,495
8
- texttools/internals/models.py,sha256=9uoCAe2TLrSzyS9lMJja5orPAYaCvVL1zoCb6FNdkfs,4541
9
- texttools/internals/operator_utils.py,sha256=eLY2OjYQ3jT-50nx3I8gzuVzgGpMi52f5oB3cnFyxko,1864
10
- texttools/internals/prompt_loader.py,sha256=yYXDD4YYG2zohGPAmvZwmv5f6xV_RSl5yOrObTh9w7I,3352
11
- texttools/internals/sync_operator.py,sha256=IG3CXfGmv4PdFlAQ4AZcKuBAqPJdkIAK4mVw77zLbqI,6959
12
- texttools/internals/text_to_chunks.py,sha256=vY3odhgCZK4E44k_SGlLoSiKkdN0ib6-lQAsPcplAHA,3843
13
- texttools/prompts/README.md,sha256=ztajRJcmFLhyrUF0_qmOXaCwGsTGCFabfMjch2LAJG0,1375
14
- texttools/prompts/categorize.yaml,sha256=016b1uGtbKXEwB8_2_bBgVuUelBlu_rgT85XK_c3Yv0,1219
15
- texttools/prompts/check_fact.yaml,sha256=gQqacCXqUEx3u2FRwhFSZHvhyWGwsYuJd1nIJyhpu7Q,700
16
- texttools/prompts/extract_entities.yaml,sha256=DN8lZjvzCjotODnHFkWIAxFvmVvoeSs-hDKdN1L6bec,608
17
- texttools/prompts/extract_keywords.yaml,sha256=GoeApi9SUCLZgs18H2-2BxZiKQ3lHptMPesgq3cluqU,3171
18
- texttools/prompts/is_question.yaml,sha256=w5qF-z05h62YVs-0x2b2ySlHDKIhukFC9pibnvNM0vc,469
19
- texttools/prompts/merge_questions.yaml,sha256=f6bHEx54jJ8hnb8iDBUCxXeGdGwRFmuu7vOkVWdaIkM,1788
20
- texttools/prompts/propositionize.yaml,sha256=agZKQY-NmeJD86DGjmd-paIuazf82bczIGadgzSP5Vs,1378
21
- texttools/prompts/rewrite.yaml,sha256=h6x8aXcW8oRxEbp466eak0y-LCkUOKf-mJ-vNVp5j5M,5386
22
- texttools/prompts/run_custom.yaml,sha256=IETY9H0wPGWIIzcnupfbwwKQblwZrbYAxB754W9MhgU,125
23
- texttools/prompts/subject_to_question.yaml,sha256=TfVmZ6gDgaHRqJWCVkFlKpuJczpMvJTo4XLWPaq5zic,1145
24
- texttools/prompts/summarize.yaml,sha256=CKx4vjhHbGus1TdjDz_oc0bNEQtq7zfHsZkV2WeYHDU,457
25
- texttools/prompts/text_to_question.yaml,sha256=mnArBoYu7gpGHriaU2-Aw5SixB2ZIgoHMt99PnTPKD0,1003
26
- texttools/prompts/translate.yaml,sha256=ew9RERAVSzg0cvxAinNwTSFIaOIjdwIsekbUsgAuNgo,632
27
- texttools/tools/async_tools.py,sha256=VU3cqqCPILsyjRiG84w8kCw3iDSuFbI6S3VjExXZwFQ,44635
28
- texttools/tools/sync_tools.py,sha256=2cqcosMYR6LHuYw32WFR-drvqQ-t7Q9_2rUBDOeYzho,44441
29
- hamtaa_texttools-1.1.21.dist-info/METADATA,sha256=lExdE6uMFSs_wqUSElOyktjpHpZx4RY-cUH6azF-IYA,10183
30
- hamtaa_texttools-1.1.21.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
31
- hamtaa_texttools-1.1.21.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
32
- hamtaa_texttools-1.1.21.dist-info/RECORD,,