hamtaa-texttools 1.1.16__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. hamtaa_texttools-1.2.0.dist-info/METADATA +212 -0
  2. hamtaa_texttools-1.2.0.dist-info/RECORD +34 -0
  3. texttools/__init__.py +5 -5
  4. texttools/batch/__init__.py +0 -0
  5. texttools/batch/{batch_config.py → config.py} +16 -2
  6. texttools/batch/{internals/batch_manager.py → manager.py} +2 -2
  7. texttools/batch/{batch_runner.py → runner.py} +80 -69
  8. texttools/core/__init__.py +0 -0
  9. texttools/core/engine.py +254 -0
  10. texttools/core/exceptions.py +22 -0
  11. texttools/core/internal_models.py +58 -0
  12. texttools/core/operators/async_operator.py +194 -0
  13. texttools/core/operators/sync_operator.py +192 -0
  14. texttools/models.py +88 -0
  15. texttools/prompts/categorize.yaml +36 -77
  16. texttools/prompts/check_fact.yaml +24 -0
  17. texttools/prompts/extract_entities.yaml +7 -3
  18. texttools/prompts/extract_keywords.yaml +21 -9
  19. texttools/prompts/is_question.yaml +6 -2
  20. texttools/prompts/merge_questions.yaml +12 -5
  21. texttools/prompts/propositionize.yaml +24 -0
  22. texttools/prompts/rewrite.yaml +9 -10
  23. texttools/prompts/run_custom.yaml +2 -2
  24. texttools/prompts/subject_to_question.yaml +7 -3
  25. texttools/prompts/summarize.yaml +6 -2
  26. texttools/prompts/text_to_question.yaml +12 -6
  27. texttools/prompts/translate.yaml +7 -2
  28. texttools/py.typed +0 -0
  29. texttools/tools/__init__.py +0 -0
  30. texttools/tools/async_tools.py +778 -489
  31. texttools/tools/sync_tools.py +775 -487
  32. hamtaa_texttools-1.1.16.dist-info/METADATA +0 -255
  33. hamtaa_texttools-1.1.16.dist-info/RECORD +0 -31
  34. texttools/batch/internals/utils.py +0 -16
  35. texttools/prompts/README.md +0 -35
  36. texttools/prompts/detect_entity.yaml +0 -22
  37. texttools/tools/internals/async_operator.py +0 -200
  38. texttools/tools/internals/formatters.py +0 -24
  39. texttools/tools/internals/models.py +0 -183
  40. texttools/tools/internals/operator_utils.py +0 -54
  41. texttools/tools/internals/prompt_loader.py +0 -56
  42. texttools/tools/internals/sync_operator.py +0 -201
  43. {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.2.0.dist-info}/WHEEL +0 -0
  44. {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.2.0.dist-info}/licenses/LICENSE +0 -0
  45. {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.2.0.dist-info}/top_level.txt +0 -0
@@ -1,255 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: hamtaa-texttools
3
- Version: 1.1.16
4
- Summary: A high-level NLP toolkit built on top of modern LLMs.
5
- Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
- License: MIT License
7
-
8
- Copyright (c) 2025 Hamtaa
9
-
10
- Permission is hereby granted, free of charge, to any person obtaining a copy
11
- of this software and associated documentation files (the "Software"), to deal
12
- in the Software without restriction, including without limitation the rights
13
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
- copies of the Software, and to permit persons to whom the Software is
15
- furnished to do so, subject to the following conditions:
16
-
17
- The above copyright notice and this permission notice shall be included in all
18
- copies or substantial portions of the Software.
19
-
20
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
- SOFTWARE.
27
- Keywords: nlp,llm,text-processing,openai
28
- Requires-Python: >=3.8
29
- Description-Content-Type: text/markdown
30
- License-File: LICENSE
31
- Requires-Dist: openai==1.97.1
32
- Requires-Dist: pydantic>=2.0.0
33
- Requires-Dist: pyyaml>=6.0
34
- Dynamic: license-file
35
-
36
- # TextTools
37
-
38
- ## 📌 Overview
39
-
40
- **TextTools** is a high-level **NLP toolkit** built on top of modern **LLMs**.
41
-
42
- It provides both **sync (`TheTool`)** and **async (`AsyncTheTool`)** APIs for maximum flexibility.
43
-
44
- It provides ready-to-use utilities for **translation, question detection, keyword extraction, categorization, NER extraction, and more** - designed to help you integrate AI-powered text processing into your applications with minimal effort.
45
-
46
- ---
47
-
48
- ## ✨ Features
49
-
50
- TextTools provides a rich collection of high-level NLP utilities,
51
- Each tool is designed to work with structured outputs (JSON / Pydantic).
52
-
53
- - **`categorize()`** - Classifies text into given categories (You have to create a category tree)
54
- - **`extract_keywords()`** - Extracts keywords from text
55
- - **`extract_entities()`** - Named Entity Recognition (NER) system
56
- - **`is_question()`** - Binary detection of whether input is a question
57
- - **`text_to_question()`** - Generates questions from text
58
- - **`merge_questions()`** - Merges multiple questions with different modes
59
- - **`rewrite()`** - Rewrites text with different wording/meaning
60
- - **`subject_to_question()`** - Generates questions about a specific subject
61
- - **`summarize()`** - Text summarization
62
- - **`translate()`** - Text translation between languages
63
- - **`run_custom()`** - Allows users to define a custom tool with an arbitrary BaseModel
64
-
65
- ---
66
-
67
- ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt`, `temperature`, `validator` and `priority` parameters
68
-
69
- TextTools provides several optional flags to customize LLM behavior:
70
-
71
- - **`with_analysis (bool)`** → Adds a reasoning step before generating the final output.
72
- **Note:** This doubles token usage per call because it triggers an additional LLM request.
73
-
74
- - **`logprobs (bool)`** → Returns token-level probabilities for the generated output. You can also specify `top_logprobs=<N>` to get the top N alternative tokens and their probabilities.
75
- **Note:** This feature works if it's supported by the model.
76
-
77
- - **`output_lang (str)`** → Forces the model to respond in a specific language. The model will ignore other instructions about language and respond strictly in the requested language.
78
-
79
- - **`user_prompt (str)`** → Allows you to inject a custom instruction or prompt into the model alongside the main template. This gives you fine-grained control over how the model interprets or modifies the input text.
80
-
81
- - **`temperature (float)`** → Determines how creative the model should respond. Takes a float number from `0.0` to `2.0`.
82
-
83
- - **`validator (Callable)`** → Forces TheTool to validate the output result based on your custom validator. Validator should return a bool (True if there were no problem, False if the validation fails.) If the validator fails, TheTool will retry to get another output by modifying `temperature`. You can specify `max_validation_retries=<N>` to change the number of retries.
84
-
85
- - **`priority (int)`** → Task execution priority level. Higher values = higher priority. Affects processing order in queues.
86
- **Note:** This feature works if it's supported by the model and vLLM.
87
-
88
- **Note:** There might be some tools that don't support some of the parameters above.
89
-
90
- ---
91
-
92
- ## 🧩 ToolOutput
93
-
94
- Every tool of `TextTools` returns a `ToolOutput` object which is a BaseModel with attributes:
95
- - **`result: Any`** → The output of LLM
96
- - **`analysis: str`** → The reasoning step before generating the final output
97
- - **`logprobs: list`** → Token-level probabilities for the generated output
98
- - **`process: str`** → The tool name which processed the input
99
- - **`processed_at: datetime`** → The process time
100
- - **`execution_time: float`** → The execution time (seconds)
101
- - **`errors: list[str]`** → Any error that have occured during calling LLM
102
-
103
- **Note:** You can use `repr(ToolOutput)` to see details of your ToolOutput.
104
-
105
- ---
106
-
107
- ## 🚀 Installation
108
-
109
- Install the latest release via PyPI:
110
-
111
- ```bash
112
- pip install -U hamtaa-texttools
113
- ```
114
-
115
- ---
116
-
117
- ## 🧨 Sync vs Async
118
- | Tool | Style | Use case |
119
- |--------------|---------|---------------------------------------------|
120
- | `TheTool` | Sync | Simple scripts, sequential workflows |
121
- | `AsyncTheTool` | Async | High-throughput apps, APIs, concurrent tasks |
122
-
123
- ---
124
-
125
- ## ⚡ Quick Start (Sync)
126
-
127
- ```python
128
- from openai import OpenAI
129
- from texttools import TheTool
130
-
131
- # Create your OpenAI client
132
- client = OpenAI(base_url = "your_url", API_KEY = "your_api_key")
133
-
134
- # Specify the model
135
- model = "gpt-4o-mini"
136
-
137
- # Create an instance of TheTool
138
- the_tool = TheTool(client=client, model=model)
139
-
140
- # Example: Question Detection
141
- detection = the_tool.is_question("Is this project open source?", logprobs=True, top_logprobs=2)
142
- print(detection.result)
143
- print(detection.logprobs)
144
- # Output: True + logprobs
145
-
146
- # Example: Translation
147
- translation = the_tool.translate("سلام، حالت چطوره؟" target_language="English", with_analysis=True)
148
- print(translation.result)
149
- print(translation.analysis)
150
- # Output: "Hi! How are you?" + analysis
151
- ```
152
-
153
- ---
154
-
155
- ## ⚡ Quick Start (Async)
156
-
157
- ```python
158
- import asyncio
159
- from openai import AsyncOpenAI
160
- from texttools import AsyncTheTool
161
-
162
- async def main():
163
- # Create your AsyncOpenAI client
164
- async_client = AsyncOpenAI(base_url="your_url", api_key="your_api_key")
165
-
166
- # Specify the model
167
- model = "gpt-4o-mini"
168
-
169
- # Create an instance of AsyncTheTool
170
- async_the_tool = AsyncTheTool(client=async_client, model=model)
171
-
172
- # Example: Async Translation and Keyword Extraction
173
- translation_task = async_the_tool.translate("سلام، حالت چطوره؟", target_language="English")
174
- keywords_task = async_the_tool.extract_keywords("Tomorrow, we will be dead by the car crash")
175
-
176
- (translation, keywords) = await asyncio.gather(translation_task, keywords_task)
177
- print(translation.result)
178
- print(keywords.result)
179
-
180
- asyncio.run(main())
181
- ```
182
-
183
- ---
184
-
185
- ## 👍 Use Cases
186
-
187
- Use **TextTools** when you need to:
188
-
189
- - 🔍 **Classify** large datasets quickly without model training
190
- - 🌍 **Translate** and process multilingual corpora with ease
191
- - 🧩 **Integrate** LLMs into production pipelines (structured outputs)
192
- - 📊 **Analyze** large text collections using embeddings and categorization
193
-
194
- ---
195
-
196
- ## 🔍 Logging
197
-
198
- TextTools uses Python's standard `logging` module. The library's default logger level is `WARNING`, so if you want to modify it, follow instructions:
199
-
200
-
201
- ```python
202
- import logging
203
-
204
- # Default: warnings and errors only
205
- logging.basicConfig(level=logging.WARNING)
206
-
207
- # Debug everything (verbose)
208
- logging.basicConfig(level=logging.DEBUG)
209
-
210
- # Complete silence
211
- logging.basicConfig(level=logging.CRITICAL)
212
- ```
213
-
214
- ---
215
-
216
- ## 📚 Batch Processing
217
-
218
- Process large datasets efficiently using OpenAI's batch API.
219
-
220
- ## ⚡ Quick Start (Batch)
221
-
222
- ```python
223
- from pydantic import BaseModel
224
- from texttools import BatchJobRunner, BatchConfig
225
-
226
- # Configure your batch job
227
- config = BatchConfig(
228
- system_prompt="Extract entities from the text",
229
- job_name="entity_extraction",
230
- input_data_path="data.json",
231
- output_data_filename="results.json",
232
- model="gpt-4o-mini"
233
- )
234
-
235
- # Define your output schema
236
- class Output(BaseModel):
237
- entities: list[str]
238
-
239
- # Run the batch job
240
- runner = BatchJobRunner(config, output_model=Output)
241
- runner.run()
242
- ```
243
-
244
- ---
245
-
246
- ## 🤝 Contributing
247
-
248
- Contributions are welcome!
249
- Feel free to **open issues, suggest new features, or submit pull requests**.
250
-
251
- ---
252
-
253
- ## 🌿 License
254
-
255
- This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
@@ -1,31 +0,0 @@
1
- hamtaa_texttools-1.1.16.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
2
- texttools/__init__.py,sha256=dc81lXGWP29k7oVvq2BMoMotz6lgiwX4PO2jHHBe2S8,317
3
- texttools/batch/batch_config.py,sha256=m1UgILVKjNdWE6laNbfbG4vgi4o2fEegGZbeoam6pnY,749
4
- texttools/batch/batch_runner.py,sha256=9e4SPLlvLHHs3U7bHkuuMVw8TFNwsGUzRjkAMKN4_ik,9378
5
- texttools/batch/internals/batch_manager.py,sha256=UoBe76vmFG72qrSaGKDZf4HzkykFBkkkbL9TLfV8TuQ,8730
6
- texttools/batch/internals/utils.py,sha256=F1_7YlVFKhjUROAFX4m0SaP8KiZVZyHRMIIB87VUGQc,373
7
- texttools/prompts/README.md,sha256=-5YO93CN93QLifqZpUeUnCOCBbDiOTV-cFQeJ7Gg0I4,1377
8
- texttools/prompts/categorize.yaml,sha256=F7VezB25B_sT5yoC25ezODBddkuDD5lUHKetSpx9FKI,2743
9
- texttools/prompts/detect_entity.yaml,sha256=1rhMkJOjxSQcT4j_c5SRcIm77AUdeG-rUmeidb6VOFc,981
10
- texttools/prompts/extract_entities.yaml,sha256=KiKjeDpHaeh3JVtZ6q1pa3k4DYucUIU9WnEcRTCA-SE,651
11
- texttools/prompts/extract_keywords.yaml,sha256=Vj4Tt3vT6LtpOo_iBZPo9oWI50oVdPGXe5i8yDR8ex4,3177
12
- texttools/prompts/is_question.yaml,sha256=d0-vKRbXWkxvO64ikvxRjEmpAXGpCYIPGhgexvPPjws,471
13
- texttools/prompts/merge_questions.yaml,sha256=0J85GvTirZB4ELwH3sk8ub_WcqqpYf6PrMKr3djlZeo,1792
14
- texttools/prompts/rewrite.yaml,sha256=LO7He_IA3MZKz8a-LxH9DHJpOjpYwaYN1pbjp1Y0tFo,5392
15
- texttools/prompts/run_custom.yaml,sha256=38OkCoVITbuuS9c08UZSP1jZW4WjSmRIi8fR0RAiPu4,108
16
- texttools/prompts/subject_to_question.yaml,sha256=C7x7rNNm6U_ZG9HOn6zuzYOtvJUZ2skuWbL1-aYdd3E,1147
17
- texttools/prompts/summarize.yaml,sha256=o6rxGPfWtZd61Duvm8NVvCJqfq73b-wAuMSKR6UYUqY,459
18
- texttools/prompts/text_to_question.yaml,sha256=UheKYpDn6iyKI8NxunHZtFpNyfCLZZe5cvkuXpurUJY,783
19
- texttools/prompts/translate.yaml,sha256=mGT2uBCei6uucWqVbs4silk-UV060v3G0jnt0P6sr50,634
20
- texttools/tools/async_tools.py,sha256=vNAg0gxwUZPsMS4q8JCv7RlYymS8l_5FsFI5adEYT7w,34376
21
- texttools/tools/sync_tools.py,sha256=hFifFa9YatvSeGif2E_bIG006eMdIBr6SV9HsZ_dAlg,34187
22
- texttools/tools/internals/async_operator.py,sha256=1TMr8e1qbE9GSz8jl0q3MKdM8lIYE-1ZuSxHjYPqKHI,7198
23
- texttools/tools/internals/formatters.py,sha256=tACNLP6PeoqaRpNudVxBaHA25zyWqWYPZQuYysIu88g,941
24
- texttools/tools/internals/models.py,sha256=2QnvMiijuSqOqpCl026848rJy_pHNbRoDESlQvcdHlk,5839
25
- texttools/tools/internals/operator_utils.py,sha256=w1k0RJ_W_CRbVc_J2w337VuL-opHpHiCxfhEOwtyuOo,1856
26
- texttools/tools/internals/prompt_loader.py,sha256=4g6-U8kqrGN7VpNaRcrBcnF-h03PXjUDBP0lL0_4EZY,1953
27
- texttools/tools/internals/sync_operator.py,sha256=4-V__o55Q8w29lWxkhG4St-exZLZTfBbiW76knOXbc0,7106
28
- hamtaa_texttools-1.1.16.dist-info/METADATA,sha256=DL-cjlGMv7bft8QVd-pn5E_tNDuPgQHkTKGl4YTosGw,9555
29
- hamtaa_texttools-1.1.16.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
30
- hamtaa_texttools-1.1.16.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
31
- hamtaa_texttools-1.1.16.dist-info/RECORD,,
@@ -1,16 +0,0 @@
1
- from typing import Any
2
-
3
-
4
- def export_data(data) -> list[dict[str, str]]:
5
- """
6
- Produces a structure of the following form from an initial data structure:
7
- [{"id": str, "text": str},...]
8
- """
9
- return data
10
-
11
-
12
- def import_data(data) -> Any:
13
- """
14
- Takes the output and adds and aggregates it to the original structure.
15
- """
16
- return data
@@ -1,35 +0,0 @@
1
- # Prompts
2
-
3
- ## Overview
4
- This folder contains YAML files for all prompts used in the project. Each file represents a separate prompt template, which can be loaded by tools or scripts that require structured prompts for AI models.
5
-
6
- ---
7
-
8
- ## Structure
9
- - **prompt_file.yaml**: Each YAML file represents a single prompt template.
10
- - **main_template**: The main instruction template for the model.
11
- - **analyze_template** (optional): A secondary reasoning template used before generating the final response.
12
- - **Modes** (optional): Some prompts may have multiple modes (e.g., `default`, `reason`) to allow different behaviors.
13
-
14
- ### Example YAML Structure
15
- ```yaml
16
- main_template:
17
- mode_1: |
18
- Your main instructions here with placeholders like {input}.
19
- mode_2: |
20
- Optional reasoning instructions here.
21
-
22
- analyze_template:
23
- mode_1: |
24
- Analyze and summarize the input.
25
- mode_2: |
26
- Optional detailed analysis template.
27
- ```
28
-
29
- ---
30
-
31
- ## Guidelines
32
- 1. **Naming**: Use descriptive names for each YAML file corresponding to the tool or task it serves.
33
- 2. **Placeholders**: Use `{input}` or other relevant placeholders to dynamically inject data.
34
- 3. **Modes**: If using modes, ensure both `main_template` and `analyze_template` contain the corresponding keys.
35
- 4. **Consistency**: Keep formatting consistent across files for easier parsing by scripts.
@@ -1,22 +0,0 @@
1
- main_template: |
2
- You are an expert Named Entity Recognition (NER) system. Extract entities from the text.
3
- The output must strictly follow the provided Pydantic schema.
4
-
5
- Mapping Rule:
6
- - Person: شخص
7
- - Location: مکان
8
- - Time: زمان
9
- - Living Beings: موجود زنده
10
- - Organization: سازمان
11
- - Concept: مفهوم
12
-
13
- CRITICAL:
14
- 1. The final output structure must be a complete JSON object matching the Pydantic schema (List[Entity]).
15
- 2. Both the extracted text and the type must be in Persian, using the exact mapping provided above.
16
-
17
- Here is the text: {input}
18
-
19
- analyze_template: |
20
- Analyze the following text to identify all potential named entities and their categories (Person, Location, Time, Living Beings, Organization, Concept).
21
- Provide a brief summary of the entities identified that will help the main process to extract them accurately and apply the correct Persian type label.
22
- Here is the text: {input}
@@ -1,200 +0,0 @@
1
- from typing import Any, TypeVar, Type
2
- from collections.abc import Callable
3
- import logging
4
-
5
- from openai import AsyncOpenAI
6
- from pydantic import BaseModel
7
-
8
- from texttools.tools.internals.models import ToolOutput
9
- from texttools.tools.internals.operator_utils import OperatorUtils
10
- from texttools.tools.internals.formatters import Formatter
11
- from texttools.tools.internals.prompt_loader import PromptLoader
12
-
13
- # Base Model type for output models
14
- T = TypeVar("T", bound=BaseModel)
15
-
16
- logger = logging.getLogger("texttools.async_operator")
17
-
18
-
19
- class AsyncOperator:
20
- """
21
- Core engine for running text-processing operations with an LLM (Async).
22
-
23
- It wires together:
24
- - `PromptLoader` → loads YAML prompt templates.
25
- - `UserMergeFormatter` → applies formatting to messages (e.g., merging).
26
- - AsyncOpenAI client → executes completions/parsed completions.
27
- """
28
-
29
- def __init__(self, client: AsyncOpenAI, model: str):
30
- self._client = client
31
- self._model = model
32
-
33
- async def _analyze(self, prompt_configs: dict[str, str], temperature: float) -> str:
34
- """
35
- Calls OpenAI API for analysis using the configured prompt template.
36
- Returns the analyzed content as a string.
37
- """
38
- analyze_prompt = prompt_configs["analyze_template"]
39
- analyze_message = [OperatorUtils.build_user_message(analyze_prompt)]
40
- completion = await self._client.chat.completions.create(
41
- model=self._model,
42
- messages=analyze_message,
43
- temperature=temperature,
44
- )
45
- analysis = completion.choices[0].message.content.strip()
46
- return analysis
47
-
48
- async def _parse_completion(
49
- self,
50
- message: list[dict[str, str]],
51
- output_model: Type[T],
52
- temperature: float,
53
- logprobs: bool = False,
54
- top_logprobs: int = 3,
55
- priority: int | None = 0,
56
- ) -> tuple[T, Any]:
57
- """
58
- Parses a chat completion using OpenAI's structured output format.
59
- Returns both the parsed object and the raw completion for logprobs.
60
- """
61
- request_kwargs = {
62
- "model": self._model,
63
- "messages": message,
64
- "response_format": output_model,
65
- "temperature": temperature,
66
- }
67
-
68
- if logprobs:
69
- request_kwargs["logprobs"] = True
70
- request_kwargs["top_logprobs"] = top_logprobs
71
- if priority:
72
- request_kwargs["extra_body"] = {"priority": priority}
73
- completion = await self._client.beta.chat.completions.parse(**request_kwargs)
74
- parsed = completion.choices[0].message.parsed
75
- return parsed, completion
76
-
77
- async def run(
78
- self,
79
- # User parameters
80
- text: str,
81
- with_analysis: bool,
82
- output_lang: str | None,
83
- user_prompt: str | None,
84
- temperature: float,
85
- logprobs: bool,
86
- top_logprobs: int | None,
87
- validator: Callable[[Any], bool] | None,
88
- max_validation_retries: int | None,
89
- # Internal parameters
90
- prompt_file: str,
91
- output_model: Type[T],
92
- mode: str | None,
93
- priority: int | None = 0,
94
- **extra_kwargs,
95
- ) -> ToolOutput:
96
- """
97
- Execute the async LLM pipeline with the given input text. (Async)
98
- """
99
- prompt_loader = PromptLoader()
100
- formatter = Formatter()
101
- output = ToolOutput()
102
-
103
- try:
104
- # Prompt configs contain two keys: main_template and analyze template, both are string
105
- prompt_configs = prompt_loader.load(
106
- prompt_file=prompt_file,
107
- text=text.strip(),
108
- mode=mode,
109
- **extra_kwargs,
110
- )
111
-
112
- messages = []
113
-
114
- if with_analysis:
115
- analysis = await self._analyze(prompt_configs, temperature)
116
- messages.append(
117
- OperatorUtils.build_user_message(
118
- f"Based on this analysis: {analysis}"
119
- )
120
- )
121
-
122
- if output_lang:
123
- messages.append(
124
- OperatorUtils.build_user_message(
125
- f"Respond only in the {output_lang} language."
126
- )
127
- )
128
-
129
- if user_prompt:
130
- messages.append(
131
- OperatorUtils.build_user_message(
132
- f"Consider this instruction {user_prompt}"
133
- )
134
- )
135
-
136
- messages.append(
137
- OperatorUtils.build_user_message(prompt_configs["main_template"])
138
- )
139
-
140
- messages = formatter.user_merge_format(messages)
141
-
142
- parsed, completion = await self._parse_completion(
143
- messages, output_model, temperature, logprobs, top_logprobs, priority
144
- )
145
-
146
- output.result = parsed.result
147
-
148
- # Retry logic if validation fails
149
- if validator and not validator(output.result):
150
- for attempt in range(max_validation_retries):
151
- logger.warning(
152
- f"Validation failed, retrying for the {attempt + 1} time."
153
- )
154
-
155
- # Generate new temperature for retry
156
- retry_temperature = OperatorUtils.get_retry_temp(temperature)
157
- try:
158
- parsed, completion = await self._parse_completion(
159
- messages,
160
- output_model,
161
- retry_temperature,
162
- logprobs,
163
- top_logprobs,
164
- )
165
-
166
- output.result = parsed.result
167
-
168
- # Check if retry was successful
169
- if validator(output.result):
170
- logger.info(
171
- f"Validation passed on retry attempt {attempt + 1}"
172
- )
173
- break
174
- else:
175
- logger.warning(
176
- f"Validation still failing after retry attempt {attempt + 1}"
177
- )
178
-
179
- except Exception as e:
180
- logger.error(f"Retry attempt {attempt + 1} failed: {e}")
181
- # Continue to next retry attempt if this one fails
182
-
183
- # Final check after all retries
184
- if validator and not validator(output.result):
185
- output.errors.append("Validation failed after all retry attempts")
186
-
187
- if logprobs:
188
- output.logprobs = OperatorUtils.extract_logprobs(completion)
189
-
190
- if with_analysis:
191
- output.analysis = analysis
192
-
193
- output.process = prompt_file[:-5]
194
-
195
- return output
196
-
197
- except Exception as e:
198
- logger.error(f"AsyncTheTool failed: {e}")
199
- output.errors.append(str(e))
200
- return output
@@ -1,24 +0,0 @@
1
- class Formatter:
2
- @staticmethod
3
- def user_merge_format(messages: list[dict[str, str]]) -> list[dict[str, str]]:
4
- """
5
- Merges consecutive user messages into a single message, separated by newlines.
6
-
7
- This is useful for condensing a multi-turn user input into a single
8
- message for the LLM. Assistant and system messages are left unchanged and
9
- act as separators between user message groups.
10
- """
11
- merged: list[dict[str, str]] = []
12
-
13
- for message in messages:
14
- role, content = message["role"], message["content"].strip()
15
-
16
- # Merge with previous user turn
17
- if merged and role == "user" and merged[-1]["role"] == "user":
18
- merged[-1]["content"] += "\n" + content
19
-
20
- # Otherwise, start a new turn
21
- else:
22
- merged.append({"role": role, "content": content})
23
-
24
- return merged