hamtaa-texttools 1.1.13__py3-none-any.whl → 1.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,11 @@
1
- from typing import Literal, Any, Callable
1
+ from datetime import datetime
2
+ from typing import Literal, Any
3
+ from collections.abc import Callable
2
4
 
3
5
  from openai import AsyncOpenAI
4
6
 
5
7
  from texttools.tools.internals.async_operator import AsyncOperator
6
- import texttools.tools.internals.output_models as OM
8
+ import texttools.tools.internals.models as Models
7
9
 
8
10
 
9
11
  class AsyncTheTool:
@@ -29,19 +31,23 @@ class AsyncTheTool:
29
31
  async def categorize(
30
32
  self,
31
33
  text: str,
34
+ categories: list[str] | Models.CategoryTree,
32
35
  with_analysis: bool = False,
33
36
  user_prompt: str | None = None,
34
37
  temperature: float | None = 0.0,
35
38
  logprobs: bool = False,
36
39
  top_logprobs: int | None = None,
40
+ mode: Literal["category_list", "category_tree"] = "category_list",
37
41
  validator: Callable[[Any], bool] | None = None,
38
42
  max_validation_retries: int | None = None,
39
- ) -> OM.ToolOutput:
43
+ priority: int | None = 0,
44
+ ) -> Models.ToolOutput:
40
45
  """
41
- Categorize a text into a single Islamic studies domain category.
46
+ Categorize a text into a category / category tree.
42
47
 
43
48
  Arguments:
44
49
  text: The input text to categorize
50
+ categories: The category / category_tree to give to LLM
45
51
  with_analysis: Whether to include detailed reasoning analysis
46
52
  user_prompt: Additional instructions for the categorization
47
53
  temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
@@ -49,30 +55,118 @@ class AsyncTheTool:
49
55
  top_logprobs: Number of top token alternatives to return if logprobs enabled
50
56
  validator: Custom validation function to validate the output
51
57
  max_validation_retries: Maximum number of retry attempts if validation fails
58
+ priority: Task execution priority (if enabled by vLLM and model)
52
59
 
53
60
  Returns:
54
61
  ToolOutput: Object containing:
55
- - result (str): The assigned Islamic studies category
62
+ - result (str): The assigned category
56
63
  - logprobs (list | None): Probability data if logprobs enabled
57
64
  - analysis (str | None): Detailed reasoning if with_analysis enabled
58
65
  - errors (list(str) | None): Errors occured during tool call
59
66
  """
60
- return await self._operator.run(
61
- # User parameters
62
- text=text,
63
- with_analysis=with_analysis,
64
- user_prompt=user_prompt,
65
- temperature=temperature,
66
- logprobs=logprobs,
67
- top_logprobs=top_logprobs,
68
- validator=validator,
69
- max_validation_retries=max_validation_retries,
70
- # Internal parameters
71
- prompt_file="categorizer.yaml",
72
- output_model=OM.CategorizerOutput,
73
- mode=None,
74
- output_lang=None,
75
- )
67
+ start = datetime.now()
68
+
69
+ if mode == "category_tree":
70
+ # Initializations
71
+ output = Models.ToolOutput()
72
+ levels = categories.get_level_count()
73
+ parent_id = 0
74
+ final_output = []
75
+
76
+ for _ in range(levels):
77
+ # Get child nodes for current parent
78
+ parent_node = categories.find_node(parent_id)
79
+ children = categories.find_children(parent_node)
80
+
81
+ # Check if child nodes exist
82
+ if not children:
83
+ output.errors.append(
84
+ f"No categories found for parent_id {parent_id} in the tree"
85
+ )
86
+ end = datetime.now()
87
+ output.execution_time = (end - start).total_seconds()
88
+ return output
89
+
90
+ # Extract category names and descriptions
91
+ category_list = [
92
+ f"Category Name: {node.name}, Description: {node.description}"
93
+ for node in children
94
+ ]
95
+ category_names = [node.name for node in children]
96
+
97
+ # Run categorization for this level
98
+ level_output = await self._operator.run(
99
+ # User parameters
100
+ text=text,
101
+ category_list=category_list,
102
+ with_analysis=with_analysis,
103
+ user_prompt=user_prompt,
104
+ temperature=temperature,
105
+ logprobs=logprobs,
106
+ top_logprobs=top_logprobs,
107
+ mode=mode,
108
+ validator=validator,
109
+ max_validation_retries=max_validation_retries,
110
+ # Internal parameters
111
+ prompt_file="categorize.yaml",
112
+ output_model=Models.create_dynamic_model(category_names),
113
+ output_lang=None,
114
+ )
115
+
116
+ # Check for errors from operator
117
+ if level_output.errors:
118
+ output.errors.extend(level_output.errors)
119
+ end = datetime.now()
120
+ output.execution_time = (end - start).total_seconds()
121
+ return output
122
+
123
+ # Get the chosen category
124
+ chosen_category = level_output.result
125
+
126
+ # Find the corresponding node
127
+ parent_node = categories.find_node(chosen_category)
128
+ if parent_node is None:
129
+ output.errors.append(
130
+ f"Category '{chosen_category}' not found in tree after selection"
131
+ )
132
+ end = datetime.now()
133
+ output.execution_time = (end - start).total_seconds()
134
+ return output
135
+
136
+ parent_id = parent_node.node_id
137
+ final_output.append(parent_node.name)
138
+
139
+ # Copy analysis/logprobs/process from the last level's output
140
+ output.analysis = level_output.analysis
141
+ output.logprobs = level_output.logprobs
142
+ output.process = level_output.process
143
+
144
+ output.result = final_output
145
+ end = datetime.now()
146
+ output.execution_time = (end - start).total_seconds()
147
+ return output
148
+
149
+ else:
150
+ output = await self._operator.run(
151
+ # User parameters
152
+ text=text,
153
+ category_list=categories,
154
+ with_analysis=with_analysis,
155
+ user_prompt=user_prompt,
156
+ temperature=temperature,
157
+ logprobs=logprobs,
158
+ top_logprobs=top_logprobs,
159
+ mode=mode,
160
+ validator=validator,
161
+ max_validation_retries=max_validation_retries,
162
+ # Internal parameters
163
+ prompt_file="categorize.yaml",
164
+ output_model=Models.create_dynamic_model(categories),
165
+ output_lang=None,
166
+ )
167
+ end = datetime.now()
168
+ output.execution_time = (end - start).total_seconds()
169
+ return output
76
170
 
77
171
  async def extract_keywords(
78
172
  self,
@@ -83,9 +177,12 @@ class AsyncTheTool:
83
177
  temperature: float | None = 0.0,
84
178
  logprobs: bool = False,
85
179
  top_logprobs: int | None = None,
180
+ mode: Literal["auto", "threshold", "count"] = "auto",
181
+ number_of_keywords: int | None = None,
86
182
  validator: Callable[[Any], bool] | None = None,
87
183
  max_validation_retries: int | None = None,
88
- ) -> OM.ToolOutput:
184
+ priority: int | None = 0,
185
+ ) -> Models.ToolOutput:
89
186
  """
90
187
  Extract salient keywords from text.
91
188
 
@@ -99,6 +196,7 @@ class AsyncTheTool:
99
196
  top_logprobs: Number of top token alternatives to return if logprobs enabled
100
197
  validator: Custom validation function to validate the output
101
198
  max_validation_retries: Maximum number of retry attempts if validation fails
199
+ priority: Task execution priority (if enabled by vLLM and model)
102
200
 
103
201
  Returns:
104
202
  ToolOutput: Object containing:
@@ -107,7 +205,8 @@ class AsyncTheTool:
107
205
  - analysis (str | None): Detailed reasoning if with_analysis enabled
108
206
  - errors (list(str) | None): Errors occured during tool call
109
207
  """
110
- return await self._operator.run(
208
+ start = datetime.now()
209
+ output = await self._operator.run(
111
210
  # User parameters
112
211
  text=text,
113
212
  with_analysis=with_analysis,
@@ -116,13 +215,18 @@ class AsyncTheTool:
116
215
  temperature=temperature,
117
216
  logprobs=logprobs,
118
217
  top_logprobs=top_logprobs,
218
+ mode=mode,
219
+ number_of_keywords=number_of_keywords,
119
220
  validator=validator,
120
221
  max_validation_retries=max_validation_retries,
222
+ priority=priority,
121
223
  # Internal parameters
122
224
  prompt_file="extract_keywords.yaml",
123
- output_model=OM.ListStrOutput,
124
- mode=None,
225
+ output_model=Models.ListStrOutput,
125
226
  )
227
+ end = datetime.now()
228
+ output.execution_time = (end - start).total_seconds()
229
+ return output
126
230
 
127
231
  async def extract_entities(
128
232
  self,
@@ -135,7 +239,8 @@ class AsyncTheTool:
135
239
  top_logprobs: int | None = None,
136
240
  validator: Callable[[Any], bool] | None = None,
137
241
  max_validation_retries: int | None = None,
138
- ) -> OM.ToolOutput:
242
+ priority: int | None = 0,
243
+ ) -> Models.ToolOutput:
139
244
  """
140
245
  Perform Named Entity Recognition (NER) over the input text.
141
246
 
@@ -149,6 +254,7 @@ class AsyncTheTool:
149
254
  top_logprobs: Number of top token alternatives to return if logprobs enabled
150
255
  validator: Custom validation function to validate the output
151
256
  max_validation_retries: Maximum number of retry attempts if validation fails
257
+ priority: Task execution priority (if enabled by vLLM and model)
152
258
 
153
259
  Returns:
154
260
  ToolOutput: Object containing:
@@ -157,7 +263,8 @@ class AsyncTheTool:
157
263
  - analysis (str | None): Detailed reasoning if with_analysis enabled
158
264
  - errors (list(str) | None): Errors occured during tool call
159
265
  """
160
- return await self._operator.run(
266
+ start = datetime.now()
267
+ output = await self._operator.run(
161
268
  # User parameters
162
269
  text=text,
163
270
  with_analysis=with_analysis,
@@ -168,11 +275,15 @@ class AsyncTheTool:
168
275
  top_logprobs=top_logprobs,
169
276
  validator=validator,
170
277
  max_validation_retries=max_validation_retries,
278
+ priority=priority,
171
279
  # Internal parameters
172
280
  prompt_file="extract_entities.yaml",
173
- output_model=OM.ListDictStrStrOutput,
281
+ output_model=Models.ListDictStrStrOutput,
174
282
  mode=None,
175
283
  )
284
+ end = datetime.now()
285
+ output.execution_time = (end - start).total_seconds()
286
+ return output
176
287
 
177
288
  async def is_question(
178
289
  self,
@@ -184,7 +295,8 @@ class AsyncTheTool:
184
295
  top_logprobs: int | None = None,
185
296
  validator: Callable[[Any], bool] | None = None,
186
297
  max_validation_retries: int | None = None,
187
- ) -> OM.ToolOutput:
298
+ priority: int | None = 0,
299
+ ) -> Models.ToolOutput:
188
300
  """
189
301
  Detect if the input is phrased as a question.
190
302
 
@@ -197,6 +309,7 @@ class AsyncTheTool:
197
309
  top_logprobs: Number of top token alternatives to return if logprobs enabled
198
310
  validator: Custom validation function to validate the output
199
311
  max_validation_retries: Maximum number of retry attempts if validation fails
312
+ priority: Task execution priority (if enabled by vLLM and model)
200
313
 
201
314
  Returns:
202
315
  ToolOutput: Object containing:
@@ -205,7 +318,8 @@ class AsyncTheTool:
205
318
  - analysis (str | None): Detailed reasoning if with_analysis enabled
206
319
  - errors (list(str) | None): Errors occured during tool call
207
320
  """
208
- return await self._operator.run(
321
+ start = datetime.now()
322
+ output = await self._operator.run(
209
323
  # User parameters
210
324
  text=text,
211
325
  with_analysis=with_analysis,
@@ -215,12 +329,16 @@ class AsyncTheTool:
215
329
  top_logprobs=top_logprobs,
216
330
  validator=validator,
217
331
  max_validation_retries=max_validation_retries,
332
+ priority=priority,
218
333
  # Internal parameters
219
334
  prompt_file="is_question.yaml",
220
- output_model=OM.BoolOutput,
335
+ output_model=Models.BoolOutput,
221
336
  mode=None,
222
337
  output_lang=None,
223
338
  )
339
+ end = datetime.now()
340
+ output.execution_time = (end - start).total_seconds()
341
+ return output
224
342
 
225
343
  async def text_to_question(
226
344
  self,
@@ -233,7 +351,8 @@ class AsyncTheTool:
233
351
  top_logprobs: int | None = None,
234
352
  validator: Callable[[Any], bool] | None = None,
235
353
  max_validation_retries: int | None = None,
236
- ) -> OM.ToolOutput:
354
+ priority: int | None = 0,
355
+ ) -> Models.ToolOutput:
237
356
  """
238
357
  Generate a single question from the given text.
239
358
 
@@ -247,6 +366,7 @@ class AsyncTheTool:
247
366
  top_logprobs: Number of top token alternatives to return if logprobs enabled
248
367
  validator: Custom validation function to validate the output
249
368
  max_validation_retries: Maximum number of retry attempts if validation fails
369
+ priority: Task execution priority (if enabled by vLLM and model)
250
370
 
251
371
  Returns:
252
372
  ToolOutput: Object containing:
@@ -255,7 +375,8 @@ class AsyncTheTool:
255
375
  - analysis (str | None): Detailed reasoning if with_analysis enabled
256
376
  - errors (list(str) | None): Errors occured during tool call
257
377
  """
258
- return await self._operator.run(
378
+ start = datetime.now()
379
+ output = await self._operator.run(
259
380
  # User parameters
260
381
  text=text,
261
382
  with_analysis=with_analysis,
@@ -266,11 +387,15 @@ class AsyncTheTool:
266
387
  top_logprobs=top_logprobs,
267
388
  validator=validator,
268
389
  max_validation_retries=max_validation_retries,
390
+ priority=priority,
269
391
  # Internal parameters
270
392
  prompt_file="text_to_question.yaml",
271
- output_model=OM.StrOutput,
393
+ output_model=Models.StrOutput,
272
394
  mode=None,
273
395
  )
396
+ end = datetime.now()
397
+ output.execution_time = (end - start).total_seconds()
398
+ return output
274
399
 
275
400
  async def merge_questions(
276
401
  self,
@@ -284,7 +409,8 @@ class AsyncTheTool:
284
409
  mode: Literal["default", "reason"] = "default",
285
410
  validator: Callable[[Any], bool] | None = None,
286
411
  max_validation_retries: int | None = None,
287
- ) -> OM.ToolOutput:
412
+ priority: int | None = 0,
413
+ ) -> Models.ToolOutput:
288
414
  """
289
415
  Merge multiple questions into a single unified question.
290
416
 
@@ -299,6 +425,7 @@ class AsyncTheTool:
299
425
  mode: Merging strategy - 'default' for direct merge, 'reason' for reasoned merge
300
426
  validator: Custom validation function to validate the output
301
427
  max_validation_retries: Maximum number of retry attempts if validation fails
428
+ priority: Task execution priority (if enabled by vLLM and model)
302
429
 
303
430
  Returns:
304
431
  ToolOutput: Object containing:
@@ -307,10 +434,11 @@ class AsyncTheTool:
307
434
  - analysis (str | None): Detailed reasoning if with_analysis enabled
308
435
  - errors (list(str) | None): Errors occured during tool call
309
436
  """
310
- text = ", ".join(text)
311
- return await self._operator.run(
437
+ start = datetime.now()
438
+ text_combined = ", ".join(text)
439
+ output = await self._operator.run(
312
440
  # User parameters
313
- text=text,
441
+ text=text_combined,
314
442
  with_analysis=with_analysis,
315
443
  output_lang=output_lang,
316
444
  user_prompt=user_prompt,
@@ -319,11 +447,15 @@ class AsyncTheTool:
319
447
  top_logprobs=top_logprobs,
320
448
  validator=validator,
321
449
  max_validation_retries=max_validation_retries,
450
+ priority=priority,
322
451
  # Internal parameters
323
452
  prompt_file="merge_questions.yaml",
324
- output_model=OM.StrOutput,
453
+ output_model=Models.StrOutput,
325
454
  mode=mode,
326
455
  )
456
+ end = datetime.now()
457
+ output.execution_time = (end - start).total_seconds()
458
+ return output
327
459
 
328
460
  async def rewrite(
329
461
  self,
@@ -337,7 +469,8 @@ class AsyncTheTool:
337
469
  mode: Literal["positive", "negative", "hard_negative"] = "positive",
338
470
  validator: Callable[[Any], bool] | None = None,
339
471
  max_validation_retries: int | None = None,
340
- ) -> OM.ToolOutput:
472
+ priority: int | None = 0,
473
+ ) -> Models.ToolOutput:
341
474
  """
342
475
  Rewrite a text with different modes.
343
476
 
@@ -352,6 +485,7 @@ class AsyncTheTool:
352
485
  mode: Rewriting mode - 'positive', 'negative', or 'hard_negative'
353
486
  validator: Custom validation function to validate the output
354
487
  max_validation_retries: Maximum number of retry attempts if validation fails
488
+ priority: Task execution priority (if enabled by vLLM and model)
355
489
 
356
490
  Returns:
357
491
  ToolOutput: Object containing:
@@ -360,7 +494,8 @@ class AsyncTheTool:
360
494
  - analysis (str | None): Detailed reasoning if with_analysis enabled
361
495
  - errors (list(str) | None): Errors occured during tool call
362
496
  """
363
- return await self._operator.run(
497
+ start = datetime.now()
498
+ output = await self._operator.run(
364
499
  # User parameters
365
500
  text=text,
366
501
  with_analysis=with_analysis,
@@ -371,11 +506,15 @@ class AsyncTheTool:
371
506
  top_logprobs=top_logprobs,
372
507
  validator=validator,
373
508
  max_validation_retries=max_validation_retries,
509
+ priority=priority,
374
510
  # Internal parameters
375
511
  prompt_file="rewrite.yaml",
376
- output_model=OM.StrOutput,
512
+ output_model=Models.StrOutput,
377
513
  mode=mode,
378
514
  )
515
+ end = datetime.now()
516
+ output.execution_time = (end - start).total_seconds()
517
+ return output
379
518
 
380
519
  async def subject_to_question(
381
520
  self,
@@ -389,7 +528,8 @@ class AsyncTheTool:
389
528
  top_logprobs: int | None = None,
390
529
  validator: Callable[[Any], bool] | None = None,
391
530
  max_validation_retries: int | None = None,
392
- ) -> OM.ToolOutput:
531
+ priority: int | None = 0,
532
+ ) -> Models.ToolOutput:
393
533
  """
394
534
  Generate a list of questions about a subject.
395
535
 
@@ -404,6 +544,7 @@ class AsyncTheTool:
404
544
  top_logprobs: Number of top token alternatives to return if logprobs enabled
405
545
  validator: Custom validation function to validate the output
406
546
  max_validation_retries: Maximum number of retry attempts if validation fails
547
+ priority: Task execution priority (if enabled by vLLM and model)
407
548
 
408
549
  Returns:
409
550
  ToolOutput: Object containing:
@@ -412,7 +553,8 @@ class AsyncTheTool:
412
553
  - analysis (str | None): Detailed reasoning if with_analysis enabled
413
554
  - errors (list(str) | None): Errors occured during tool call
414
555
  """
415
- return await self._operator.run(
556
+ start = datetime.now()
557
+ output = await self._operator.run(
416
558
  # User parameters
417
559
  text=text,
418
560
  number_of_questions=number_of_questions,
@@ -424,11 +566,15 @@ class AsyncTheTool:
424
566
  top_logprobs=top_logprobs,
425
567
  validator=validator,
426
568
  max_validation_retries=max_validation_retries,
569
+ priority=priority,
427
570
  # Internal parameters
428
571
  prompt_file="subject_to_question.yaml",
429
- output_model=OM.ReasonListStrOutput,
572
+ output_model=Models.ReasonListStrOutput,
430
573
  mode=None,
431
574
  )
575
+ end = datetime.now()
576
+ output.execution_time = (end - start).total_seconds()
577
+ return output
432
578
 
433
579
  async def summarize(
434
580
  self,
@@ -441,7 +587,8 @@ class AsyncTheTool:
441
587
  top_logprobs: int | None = None,
442
588
  validator: Callable[[Any], bool] | None = None,
443
589
  max_validation_retries: int | None = None,
444
- ) -> OM.ToolOutput:
590
+ priority: int | None = 0,
591
+ ) -> Models.ToolOutput:
445
592
  """
446
593
  Summarize the given subject text.
447
594
 
@@ -455,6 +602,7 @@ class AsyncTheTool:
455
602
  top_logprobs: Number of top token alternatives to return if logprobs enabled
456
603
  validator: Custom validation function to validate the output
457
604
  max_validation_retries: Maximum number of retry attempts if validation fails
605
+ priority: Task execution priority (if enabled by vLLM and model)
458
606
 
459
607
  Returns:
460
608
  ToolOutput: Object containing:
@@ -463,7 +611,8 @@ class AsyncTheTool:
463
611
  - analysis (str | None): Detailed reasoning if with_analysis enabled
464
612
  - errors (list(str) | None): Errors occured during tool call
465
613
  """
466
- return await self._operator.run(
614
+ start = datetime.now()
615
+ output = await self._operator.run(
467
616
  # User parameters
468
617
  text=text,
469
618
  with_analysis=with_analysis,
@@ -474,11 +623,15 @@ class AsyncTheTool:
474
623
  top_logprobs=top_logprobs,
475
624
  validator=validator,
476
625
  max_validation_retries=max_validation_retries,
626
+ priority=priority,
477
627
  # Internal parameters
478
628
  prompt_file="summarize.yaml",
479
- output_model=OM.StrOutput,
629
+ output_model=Models.StrOutput,
480
630
  mode=None,
481
631
  )
632
+ end = datetime.now()
633
+ output.execution_time = (end - start).total_seconds()
634
+ return output
482
635
 
483
636
  async def translate(
484
637
  self,
@@ -491,7 +644,8 @@ class AsyncTheTool:
491
644
  top_logprobs: int | None = None,
492
645
  validator: Callable[[Any], bool] | None = None,
493
646
  max_validation_retries: int | None = None,
494
- ) -> OM.ToolOutput:
647
+ priority: int | None = 0,
648
+ ) -> Models.ToolOutput:
495
649
  """
496
650
  Translate text between languages.
497
651
 
@@ -505,6 +659,7 @@ class AsyncTheTool:
505
659
  top_logprobs: Number of top token alternatives to return if logprobs enabled
506
660
  validator: Custom validation function to validate the output
507
661
  max_validation_retries: Maximum number of retry attempts if validation fails
662
+ priority: Task execution priority (if enabled by vLLM and model)
508
663
 
509
664
  Returns:
510
665
  ToolOutput: Object containing:
@@ -513,7 +668,8 @@ class AsyncTheTool:
513
668
  - analysis (str | None): Detailed reasoning if with_analysis enabled
514
669
  - errors (list(str) | None): Errors occured during tool call
515
670
  """
516
- return await self._operator.run(
671
+ start = datetime.now()
672
+ output = await self._operator.run(
517
673
  # User parameters
518
674
  text=text,
519
675
  target_language=target_language,
@@ -524,12 +680,73 @@ class AsyncTheTool:
524
680
  top_logprobs=top_logprobs,
525
681
  validator=validator,
526
682
  max_validation_retries=max_validation_retries,
683
+ priority=priority,
527
684
  # Internal parameters
528
685
  prompt_file="translate.yaml",
529
- output_model=OM.StrOutput,
686
+ output_model=Models.StrOutput,
530
687
  mode=None,
531
688
  output_lang=None,
532
689
  )
690
+ end = datetime.now()
691
+ output.execution_time = (end - start).total_seconds()
692
+ return output
693
+
694
+ async def detect_entity(
695
+ self,
696
+ text: str,
697
+ with_analysis: bool = False,
698
+ output_lang: str | None = None,
699
+ user_prompt: str | None = None,
700
+ temperature: float | None = 0.0,
701
+ logprobs: bool = False,
702
+ top_logprobs: int | None = None,
703
+ validator: Callable[[Any], bool] | None = None,
704
+ max_validation_retries: int | None = None,
705
+ priority: int | None = 0,
706
+ ) -> Models.ToolOutput:
707
+ """
708
+ Detects entities in a given text based on the entity_detector.yaml prompt.
709
+
710
+ Arguments:
711
+ text: The input text
712
+ with_analysis: Whether to include detailed reasoning analysis
713
+ output_lang: Language for the output summary
714
+ user_prompt: Additional instructions for summarization
715
+ temperature: Controls randomness (0.0 = deterministic, 1.0 = creative)
716
+ logprobs: Whether to return token probability information
717
+ top_logprobs: Number of top token alternatives to return if logprobs enabled
718
+ validator: Custom validation function to validate the output
719
+ max_validation_retries: Maximum number of retry attempts if validation fails
720
+ priority: Task execution priority (if enabled by vLLM and model)
721
+
722
+ Returns:
723
+ ToolOutput: Object containing:
724
+ - result (list[Entity]): The entities
725
+ - logprobs (list | None): Probability data if logprobs enabled
726
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
727
+ - errors (list(str) | None): Errors occured during tool call
728
+ """
729
+ start = datetime.now()
730
+ output = await self._operator.run(
731
+ # User parameters
732
+ text=text,
733
+ with_analysis=with_analysis,
734
+ output_lang=output_lang,
735
+ user_prompt=user_prompt,
736
+ temperature=temperature,
737
+ logprobs=logprobs,
738
+ top_logprobs=top_logprobs,
739
+ validator=validator,
740
+ max_validation_retries=max_validation_retries,
741
+ priority=priority,
742
+ # Internal parameters
743
+ prompt_file="detect_entity.yaml",
744
+ output_model=Models.EntityDetectorOutput,
745
+ mode=None,
746
+ )
747
+ end = datetime.now()
748
+ output.execution_time = (end - start).total_seconds()
749
+ return output
533
750
 
534
751
  async def run_custom(
535
752
  self,
@@ -541,7 +758,8 @@ class AsyncTheTool:
541
758
  top_logprobs: int | None = None,
542
759
  validator: Callable[[Any], bool] | None = None,
543
760
  max_validation_retries: int | None = None,
544
- ) -> OM.ToolOutput:
761
+ priority: int | None = 0,
762
+ ) -> Models.ToolOutput:
545
763
  """
546
764
  Custom tool that can do almost anything!
547
765
 
@@ -553,6 +771,7 @@ class AsyncTheTool:
553
771
  top_logprobs: Number of top token alternatives to return if logprobs enabled
554
772
  validator: Custom validation function to validate the output
555
773
  max_validation_retries: Maximum number of retry attempts if validation fails
774
+ priority: Task execution priority (if enabled by vLLM and model)
556
775
 
557
776
  Returns:
558
777
  ToolOutput: Object containing:
@@ -561,7 +780,8 @@ class AsyncTheTool:
561
780
  - analysis (str | None): Detailed reasoning if with_analysis enabled
562
781
  - errors (list(str) | None): Errors occured during tool call
563
782
  """
564
- return await self._operator.run(
783
+ start = datetime.now()
784
+ output = await self._operator.run(
565
785
  # User paramaeters
566
786
  text=prompt,
567
787
  output_model=output_model,
@@ -572,9 +792,13 @@ class AsyncTheTool:
572
792
  top_logprobs=top_logprobs,
573
793
  validator=validator,
574
794
  max_validation_retries=max_validation_retries,
795
+ priority=priority,
575
796
  # Internal parameters
576
797
  prompt_file="run_custom.yaml",
577
798
  user_prompt=None,
578
799
  with_analysis=False,
579
800
  mode=None,
580
801
  )
802
+ end = datetime.now()
803
+ output.execution_time = (end - start).total_seconds()
804
+ return output