hamtaa-texttools 1.0.2__tar.gz → 1.1.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. {hamtaa_texttools-1.0.2 → hamtaa_texttools-1.1.20}/LICENSE +20 -20
  2. {hamtaa_texttools-1.0.2 → hamtaa_texttools-1.1.20}/MANIFEST.in +2 -2
  3. hamtaa_texttools-1.1.20/PKG-INFO +286 -0
  4. hamtaa_texttools-1.1.20/README.md +251 -0
  5. hamtaa_texttools-1.1.20/hamtaa_texttools.egg-info/PKG-INFO +286 -0
  6. hamtaa_texttools-1.1.20/hamtaa_texttools.egg-info/SOURCES.txt +40 -0
  7. {hamtaa_texttools-1.0.2 → hamtaa_texttools-1.1.20}/hamtaa_texttools.egg-info/dependency_links.txt +0 -0
  8. hamtaa_texttools-1.1.20/hamtaa_texttools.egg-info/requires.txt +3 -0
  9. {hamtaa_texttools-1.0.2 → hamtaa_texttools-1.1.20}/hamtaa_texttools.egg-info/top_level.txt +0 -0
  10. {hamtaa_texttools-1.0.2 → hamtaa_texttools-1.1.20}/pyproject.toml +34 -32
  11. {hamtaa_texttools-1.0.2 → hamtaa_texttools-1.1.20}/setup.cfg +4 -4
  12. hamtaa_texttools-1.1.20/tests/test_all_async_tools.py +99 -0
  13. hamtaa_texttools-1.1.20/tests/test_all_tools.py +119 -0
  14. hamtaa_texttools-1.1.20/tests/test_output_validation.py +35 -0
  15. hamtaa_texttools-1.1.20/texttools/__init__.py +7 -0
  16. hamtaa_texttools-1.1.20/texttools/batch/batch_config.py +26 -0
  17. hamtaa_texttools-1.1.20/texttools/batch/batch_runner.py +228 -0
  18. {hamtaa_texttools-1.0.2/texttools/utils/batch_manager → hamtaa_texttools-1.1.20/texttools/batch/internals}/batch_manager.py +228 -240
  19. hamtaa_texttools-1.1.20/texttools/batch/internals/utils.py +13 -0
  20. hamtaa_texttools-1.1.20/texttools/internals/async_operator.py +246 -0
  21. hamtaa_texttools-1.1.20/texttools/internals/exceptions.py +28 -0
  22. hamtaa_texttools-1.1.20/texttools/internals/models.py +181 -0
  23. hamtaa_texttools-1.1.20/texttools/internals/operator_utils.py +78 -0
  24. hamtaa_texttools-1.1.20/texttools/internals/prompt_loader.py +108 -0
  25. hamtaa_texttools-1.1.20/texttools/internals/sync_operator.py +244 -0
  26. hamtaa_texttools-1.1.20/texttools/internals/text_to_chunks.py +97 -0
  27. {hamtaa_texttools-1.0.2 → hamtaa_texttools-1.1.20}/texttools/prompts/README.md +35 -31
  28. hamtaa_texttools-1.1.20/texttools/prompts/categorize.yaml +77 -0
  29. hamtaa_texttools-1.1.20/texttools/prompts/check_fact.yaml +19 -0
  30. hamtaa_texttools-1.0.2/texttools/prompts/ner_extractor.yaml → hamtaa_texttools-1.1.20/texttools/prompts/extract_entities.yaml +20 -18
  31. hamtaa_texttools-1.1.20/texttools/prompts/extract_keywords.yaml +68 -0
  32. hamtaa_texttools-1.0.2/texttools/prompts/question_detector.yaml → hamtaa_texttools-1.1.20/texttools/prompts/is_question.yaml +13 -11
  33. hamtaa_texttools-1.0.2/texttools/prompts/question_merger.yaml → hamtaa_texttools-1.1.20/texttools/prompts/merge_questions.yaml +45 -46
  34. hamtaa_texttools-1.1.20/texttools/prompts/propositionize.yaml +22 -0
  35. hamtaa_texttools-1.1.20/texttools/prompts/rewrite.yaml +111 -0
  36. hamtaa_texttools-1.1.20/texttools/prompts/run_custom.yaml +7 -0
  37. hamtaa_texttools-1.0.2/texttools/prompts/subject_question_generator.yaml → hamtaa_texttools-1.1.20/texttools/prompts/subject_to_question.yaml +22 -27
  38. hamtaa_texttools-1.0.2/texttools/prompts/summarizer.yaml → hamtaa_texttools-1.1.20/texttools/prompts/summarize.yaml +13 -10
  39. hamtaa_texttools-1.1.20/texttools/prompts/text_to_question.yaml +22 -0
  40. hamtaa_texttools-1.0.2/texttools/prompts/translator.yaml → hamtaa_texttools-1.1.20/texttools/prompts/translate.yaml +14 -13
  41. hamtaa_texttools-1.1.20/texttools/tools/async_tools.py +1198 -0
  42. hamtaa_texttools-1.1.20/texttools/tools/sync_tools.py +1198 -0
  43. hamtaa_texttools-1.0.2/PKG-INFO +0 -129
  44. hamtaa_texttools-1.0.2/README.md +0 -95
  45. hamtaa_texttools-1.0.2/hamtaa_texttools.egg-info/PKG-INFO +0 -129
  46. hamtaa_texttools-1.0.2/hamtaa_texttools.egg-info/SOURCES.txt +0 -33
  47. hamtaa_texttools-1.0.2/hamtaa_texttools.egg-info/requires.txt +0 -2
  48. hamtaa_texttools-1.0.2/tests/test_tools.py +0 -65
  49. hamtaa_texttools-1.0.2/texttools/__init__.py +0 -9
  50. hamtaa_texttools-1.0.2/texttools/formatters/base_formatter.py +0 -33
  51. hamtaa_texttools-1.0.2/texttools/formatters/user_merge_formatter/user_merge_formatter.py +0 -47
  52. hamtaa_texttools-1.0.2/texttools/prompts/categorizer.yaml +0 -25
  53. hamtaa_texttools-1.0.2/texttools/prompts/keyword_extractor.yaml +0 -11
  54. hamtaa_texttools-1.0.2/texttools/prompts/question_generator.yaml +0 -22
  55. hamtaa_texttools-1.0.2/texttools/prompts/question_rewriter.yaml +0 -44
  56. hamtaa_texttools-1.0.2/texttools/tools/__init__.py +0 -3
  57. hamtaa_texttools-1.0.2/texttools/tools/operator.py +0 -236
  58. hamtaa_texttools-1.0.2/texttools/tools/output_models.py +0 -54
  59. hamtaa_texttools-1.0.2/texttools/tools/prompt_loader.py +0 -84
  60. hamtaa_texttools-1.0.2/texttools/tools/the_tool.py +0 -291
  61. hamtaa_texttools-1.0.2/texttools/utils/__init__.py +0 -4
  62. hamtaa_texttools-1.0.2/texttools/utils/batch_manager/__init__.py +0 -4
  63. hamtaa_texttools-1.0.2/texttools/utils/batch_manager/batch_runner.py +0 -212
@@ -1,21 +1,21 @@
1
- MIT License
2
-
3
- Copyright (c) 2025 Hamtaa
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Hamtaa
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
21
  SOFTWARE.
@@ -1,2 +1,2 @@
1
- graft texttools/prompts
2
- global-exclude *.pyc
1
+ graft texttools/prompts
2
+ global-exclude *.pyc
@@ -0,0 +1,286 @@
1
+ Metadata-Version: 2.4
2
+ Name: hamtaa-texttools
3
+ Version: 1.1.20
4
+ Summary: A high-level NLP toolkit built on top of modern LLMs.
5
+ Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>, Zareshahi <a.zareshahi1377@gmail.com>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2025 Hamtaa
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ Keywords: nlp,llm,text-processing,openai
28
+ Requires-Python: >=3.8
29
+ Description-Content-Type: text/markdown
30
+ License-File: LICENSE
31
+ Requires-Dist: openai==1.97.1
32
+ Requires-Dist: pydantic>=2.0.0
33
+ Requires-Dist: pyyaml>=6.0
34
+ Dynamic: license-file
35
+
36
+ # TextTools
37
+
38
+ ## 📌 Overview
39
+
40
+ **TextTools** is a high-level **NLP toolkit** built on top of modern **LLMs**.
41
+
42
+ It provides both **sync (`TheTool`)** and **async (`AsyncTheTool`)** APIs for maximum flexibility.
43
+
44
+ It provides ready-to-use utilities for **translation, question detection, keyword extraction, categorization, NER extraction, and more** - designed to help you integrate AI-powered text processing into your applications with minimal effort.
45
+
46
+ ---
47
+
48
+ ## ✨ Features
49
+
50
+ TextTools provides a rich collection of high-level NLP utilities,
51
+ Each tool is designed to work with structured outputs (JSON / Pydantic).
52
+
53
+ - **`categorize()`** - Classifies text into given categories (You have to create a category tree)
54
+ - **`extract_keywords()`** - Extracts keywords from text
55
+ - **`extract_entities()`** - Named Entity Recognition (NER) system
56
+ - **`is_question()`** - Binary detection of whether input is a question
57
+ - **`text_to_question()`** - Generates questions from text
58
+ - **`merge_questions()`** - Merges multiple questions with different modes
59
+ - **`rewrite()`** - Rewrites text with different wording/meaning
60
+ - **`subject_to_question()`** - Generates questions about a specific subject
61
+ - **`summarize()`** - Text summarization
62
+ - **`translate()`** - Text translation between languages
63
+ - **`propositionize()`** - Convert text to atomic independence meaningful sentences
64
+ - **`check_fact()`** - Check a statement is relevant to source text or not
65
+ - **`run_custom()`** - Allows users to define a custom tool with an arbitrary BaseModel
66
+
67
+ ---
68
+
69
+ ## 📊 Tool Quality Tiers
70
+
71
+ | Status | Meaning | Use in Production? |
72
+ |--------|---------|-------------------|
73
+ | **✅ Production** | Evaluated, tested, stable. | **Yes** - ready for reliable use. |
74
+ | **🧪 Experimental** | Added to the package but **not fully evaluated**. Functional, but quality may vary. | **Use with caution** - outputs not yet validated. |
75
+
76
+ ### Current Status
77
+ **Production Tools:**
78
+ - `categorize()` (list mode)
79
+ - `extract_keywords()`
80
+ - `extract_entities()`
81
+ - `is_question()`
82
+ - `text_to_question()`
83
+ - `merge_questions()`
84
+ - `rewrite()`
85
+ - `subject_to_question()`
86
+ - `summarize()`
87
+ - `run_custom()` (fine in most cases)
88
+
89
+ **Experimental Tools:**
90
+ - `categorize()` (tree mode)
91
+ - `translate()`
92
+ - `propositionize()`
93
+ - `check_fact()`
94
+ - `run_custom()` (not evaluated in all scenarios)
95
+
96
+ ---
97
+
98
+ ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt`, `temperature`, `validator` and `priority` parameters
99
+
100
+ TextTools provides several optional flags to customize LLM behavior:
101
+
102
+ - **`with_analysis: bool`** → Adds a reasoning step before generating the final output.
103
+ **Note:** This doubles token usage per call because it triggers an additional LLM request.
104
+
105
+ - **`logprobs: bool`** → Returns token-level probabilities for the generated output. You can also specify `top_logprobs=<N>` to get the top N alternative tokens and their probabilities.
106
+ **Note:** This feature works if it's supported by the model.
107
+
108
+ - **`output_lang: str`** → Forces the model to respond in a specific language. The model will ignore other instructions about language and respond strictly in the requested language.
109
+
110
+ - **`user_prompt: str`** → Allows you to inject a custom instruction or prompt into the model alongside the main template. This gives you fine-grained control over how the model interprets or modifies the input text.
111
+
112
+ - **`temperature: float`** → Determines how creative the model should respond. Takes a float number from `0.0` to `2.0`.
113
+
114
+ - **`validator: Callable (Experimental)`** → Forces TheTool to validate the output result based on your custom validator. Validator should return a bool (True if there were no problem, False if the validation fails.) If the validator fails, TheTool will retry to get another output by modifying `temperature`. You can specify `max_validation_retries=<N>` to change the number of retries.
115
+
116
+ - **`priority: int (Experimental)`** → Task execution priority level. Higher values = higher priority. Affects processing order in queues.
117
+ **Note:** This feature works if it's supported by the model and vLLM.
118
+
119
+ **Note:** There might be some tools that don't support some of the parameters above.
120
+
121
+ ---
122
+
123
+ ## 🧩 ToolOutput
124
+
125
+ Every tool of `TextTools` returns a `ToolOutput` object which is a BaseModel with attributes:
126
+ - **`result: Any`** → The output of LLM
127
+ - **`analysis: str`** → The reasoning step before generating the final output
128
+ - **`logprobs: list`** → Token-level probabilities for the generated output
129
+ - **`process: str`** → The tool name which processed the input
130
+ - **`processed_at: datetime`** → The process time
131
+ - **`execution_time: float`** → The execution time (seconds)
132
+ - **`errors: list[str]`** → Any error that have occured during calling LLM
133
+
134
+ **Note:** You can use `repr(ToolOutput)` to see details of your ToolOutput.
135
+
136
+ ---
137
+
138
+ ## 🚀 Installation
139
+
140
+ Install the latest release via PyPI:
141
+
142
+ ```bash
143
+ pip install -U hamtaa-texttools
144
+ ```
145
+
146
+ ---
147
+
148
+ ## 🧨 Sync vs Async
149
+ | Tool | Style | Use case |
150
+ |--------------|---------|---------------------------------------------|
151
+ | `TheTool` | Sync | Simple scripts, sequential workflows |
152
+ | `AsyncTheTool` | Async | High-throughput apps, APIs, concurrent tasks |
153
+
154
+ ---
155
+
156
+ ## ⚡ Quick Start (Sync)
157
+
158
+ ```python
159
+ from openai import OpenAI
160
+ from texttools import TheTool
161
+
162
+ # Create your OpenAI client
163
+ client = OpenAI(base_url = "your_url", API_KEY = "your_api_key")
164
+
165
+ # Specify the model
166
+ model = "gpt-4o-mini"
167
+
168
+ # Create an instance of TheTool
169
+ the_tool = TheTool(client=client, model=model)
170
+
171
+ # Example: Question Detection
172
+ detection = the_tool.is_question("Is this project open source?", logprobs=True, top_logprobs=2)
173
+ print(detection.result)
174
+ print(detection.logprobs)
175
+ # Output: True + logprobs
176
+
177
+ # Example: Translation
178
+ translation = the_tool.translate("سلام، حالت چطوره؟" target_language="English", with_analysis=True)
179
+ print(translation.result)
180
+ print(translation.analysis)
181
+ # Output: "Hi! How are you?" + analysis
182
+ ```
183
+
184
+ ---
185
+
186
+ ## ⚡ Quick Start (Async)
187
+
188
+ ```python
189
+ import asyncio
190
+ from openai import AsyncOpenAI
191
+ from texttools import AsyncTheTool
192
+
193
+ async def main():
194
+ # Create your AsyncOpenAI client
195
+ async_client = AsyncOpenAI(base_url="your_url", api_key="your_api_key")
196
+
197
+ # Specify the model
198
+ model = "gpt-4o-mini"
199
+
200
+ # Create an instance of AsyncTheTool
201
+ async_the_tool = AsyncTheTool(client=async_client, model=model)
202
+
203
+ # Example: Async Translation and Keyword Extraction
204
+ translation_task = async_the_tool.translate("سلام، حالت چطوره؟", target_language="English")
205
+ keywords_task = async_the_tool.extract_keywords("Tomorrow, we will be dead by the car crash")
206
+
207
+ (translation, keywords) = await asyncio.gather(translation_task, keywords_task)
208
+ print(translation.result)
209
+ print(keywords.result)
210
+
211
+ asyncio.run(main())
212
+ ```
213
+
214
+ ---
215
+
216
+ ## 👍 Use Cases
217
+
218
+ Use **TextTools** when you need to:
219
+
220
+ - 🔍 **Classify** large datasets quickly without model training
221
+ - 🌍 **Translate** and process multilingual corpora with ease
222
+ - 🧩 **Integrate** LLMs into production pipelines (structured outputs)
223
+ - 📊 **Analyze** large text collections using embeddings and categorization
224
+
225
+ ---
226
+
227
+ ## 🔍 Logging
228
+
229
+ TextTools uses Python's standard `logging` module. The library's default logger level is `WARNING`, so if you want to modify it, follow instructions:
230
+
231
+
232
+ ```python
233
+ import logging
234
+
235
+ # Default: warnings and errors only
236
+ logging.basicConfig(level=logging.WARNING)
237
+
238
+ # Debug everything (verbose)
239
+ logging.basicConfig(level=logging.DEBUG)
240
+
241
+ # Complete silence
242
+ logging.basicConfig(level=logging.CRITICAL)
243
+ ```
244
+
245
+ ---
246
+
247
+ ## 📚 Batch Processing
248
+
249
+ Process large datasets efficiently using OpenAI's batch API.
250
+
251
+ ## ⚡ Quick Start (Batch)
252
+
253
+ ```python
254
+ from pydantic import BaseModel
255
+ from texttools import BatchJobRunner, BatchConfig
256
+
257
+ # Configure your batch job
258
+ config = BatchConfig(
259
+ system_prompt="Extract entities from the text",
260
+ job_name="entity_extraction",
261
+ input_data_path="data.json",
262
+ output_data_filename="results.json",
263
+ model="gpt-4o-mini"
264
+ )
265
+
266
+ # Define your output schema
267
+ class Output(BaseModel):
268
+ entities: list[str]
269
+
270
+ # Run the batch job
271
+ runner = BatchJobRunner(config, output_model=Output)
272
+ runner.run()
273
+ ```
274
+
275
+ ---
276
+
277
+ ## 🤝 Contributing
278
+
279
+ Contributions are welcome!
280
+ Feel free to **open issues, suggest new features, or submit pull requests**.
281
+
282
+ ---
283
+
284
+ ## 🌿 License
285
+
286
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
@@ -0,0 +1,251 @@
1
+ # TextTools
2
+
3
+ ## 📌 Overview
4
+
5
+ **TextTools** is a high-level **NLP toolkit** built on top of modern **LLMs**.
6
+
7
+ It provides both **sync (`TheTool`)** and **async (`AsyncTheTool`)** APIs for maximum flexibility.
8
+
9
+ It provides ready-to-use utilities for **translation, question detection, keyword extraction, categorization, NER extraction, and more** - designed to help you integrate AI-powered text processing into your applications with minimal effort.
10
+
11
+ ---
12
+
13
+ ## ✨ Features
14
+
15
+ TextTools provides a rich collection of high-level NLP utilities,
16
+ Each tool is designed to work with structured outputs (JSON / Pydantic).
17
+
18
+ - **`categorize()`** - Classifies text into given categories (You have to create a category tree)
19
+ - **`extract_keywords()`** - Extracts keywords from text
20
+ - **`extract_entities()`** - Named Entity Recognition (NER) system
21
+ - **`is_question()`** - Binary detection of whether input is a question
22
+ - **`text_to_question()`** - Generates questions from text
23
+ - **`merge_questions()`** - Merges multiple questions with different modes
24
+ - **`rewrite()`** - Rewrites text with different wording/meaning
25
+ - **`subject_to_question()`** - Generates questions about a specific subject
26
+ - **`summarize()`** - Text summarization
27
+ - **`translate()`** - Text translation between languages
28
+ - **`propositionize()`** - Convert text to atomic independence meaningful sentences
29
+ - **`check_fact()`** - Check a statement is relevant to source text or not
30
+ - **`run_custom()`** - Allows users to define a custom tool with an arbitrary BaseModel
31
+
32
+ ---
33
+
34
+ ## 📊 Tool Quality Tiers
35
+
36
+ | Status | Meaning | Use in Production? |
37
+ |--------|---------|-------------------|
38
+ | **✅ Production** | Evaluated, tested, stable. | **Yes** - ready for reliable use. |
39
+ | **🧪 Experimental** | Added to the package but **not fully evaluated**. Functional, but quality may vary. | **Use with caution** - outputs not yet validated. |
40
+
41
+ ### Current Status
42
+ **Production Tools:**
43
+ - `categorize()` (list mode)
44
+ - `extract_keywords()`
45
+ - `extract_entities()`
46
+ - `is_question()`
47
+ - `text_to_question()`
48
+ - `merge_questions()`
49
+ - `rewrite()`
50
+ - `subject_to_question()`
51
+ - `summarize()`
52
+ - `run_custom()` (fine in most cases)
53
+
54
+ **Experimental Tools:**
55
+ - `categorize()` (tree mode)
56
+ - `translate()`
57
+ - `propositionize()`
58
+ - `check_fact()`
59
+ - `run_custom()` (not evaluated in all scenarios)
60
+
61
+ ---
62
+
63
+ ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt`, `temperature`, `validator` and `priority` parameters
64
+
65
+ TextTools provides several optional flags to customize LLM behavior:
66
+
67
+ - **`with_analysis: bool`** → Adds a reasoning step before generating the final output.
68
+ **Note:** This doubles token usage per call because it triggers an additional LLM request.
69
+
70
+ - **`logprobs: bool`** → Returns token-level probabilities for the generated output. You can also specify `top_logprobs=<N>` to get the top N alternative tokens and their probabilities.
71
+ **Note:** This feature works if it's supported by the model.
72
+
73
+ - **`output_lang: str`** → Forces the model to respond in a specific language. The model will ignore other instructions about language and respond strictly in the requested language.
74
+
75
+ - **`user_prompt: str`** → Allows you to inject a custom instruction or prompt into the model alongside the main template. This gives you fine-grained control over how the model interprets or modifies the input text.
76
+
77
+ - **`temperature: float`** → Determines how creative the model should respond. Takes a float number from `0.0` to `2.0`.
78
+
79
+ - **`validator: Callable (Experimental)`** → Forces TheTool to validate the output result based on your custom validator. Validator should return a bool (True if there were no problem, False if the validation fails.) If the validator fails, TheTool will retry to get another output by modifying `temperature`. You can specify `max_validation_retries=<N>` to change the number of retries.
80
+
81
+ - **`priority: int (Experimental)`** → Task execution priority level. Higher values = higher priority. Affects processing order in queues.
82
+ **Note:** This feature works if it's supported by the model and vLLM.
83
+
84
+ **Note:** There might be some tools that don't support some of the parameters above.
85
+
86
+ ---
87
+
88
+ ## 🧩 ToolOutput
89
+
90
+ Every tool of `TextTools` returns a `ToolOutput` object which is a BaseModel with attributes:
91
+ - **`result: Any`** → The output of LLM
92
+ - **`analysis: str`** → The reasoning step before generating the final output
93
+ - **`logprobs: list`** → Token-level probabilities for the generated output
94
+ - **`process: str`** → The tool name which processed the input
95
+ - **`processed_at: datetime`** → The process time
96
+ - **`execution_time: float`** → The execution time (seconds)
97
+ - **`errors: list[str]`** → Any error that have occured during calling LLM
98
+
99
+ **Note:** You can use `repr(ToolOutput)` to see details of your ToolOutput.
100
+
101
+ ---
102
+
103
+ ## 🚀 Installation
104
+
105
+ Install the latest release via PyPI:
106
+
107
+ ```bash
108
+ pip install -U hamtaa-texttools
109
+ ```
110
+
111
+ ---
112
+
113
+ ## 🧨 Sync vs Async
114
+ | Tool | Style | Use case |
115
+ |--------------|---------|---------------------------------------------|
116
+ | `TheTool` | Sync | Simple scripts, sequential workflows |
117
+ | `AsyncTheTool` | Async | High-throughput apps, APIs, concurrent tasks |
118
+
119
+ ---
120
+
121
+ ## ⚡ Quick Start (Sync)
122
+
123
+ ```python
124
+ from openai import OpenAI
125
+ from texttools import TheTool
126
+
127
+ # Create your OpenAI client
128
+ client = OpenAI(base_url = "your_url", API_KEY = "your_api_key")
129
+
130
+ # Specify the model
131
+ model = "gpt-4o-mini"
132
+
133
+ # Create an instance of TheTool
134
+ the_tool = TheTool(client=client, model=model)
135
+
136
+ # Example: Question Detection
137
+ detection = the_tool.is_question("Is this project open source?", logprobs=True, top_logprobs=2)
138
+ print(detection.result)
139
+ print(detection.logprobs)
140
+ # Output: True + logprobs
141
+
142
+ # Example: Translation
143
+ translation = the_tool.translate("سلام، حالت چطوره؟" target_language="English", with_analysis=True)
144
+ print(translation.result)
145
+ print(translation.analysis)
146
+ # Output: "Hi! How are you?" + analysis
147
+ ```
148
+
149
+ ---
150
+
151
+ ## ⚡ Quick Start (Async)
152
+
153
+ ```python
154
+ import asyncio
155
+ from openai import AsyncOpenAI
156
+ from texttools import AsyncTheTool
157
+
158
+ async def main():
159
+ # Create your AsyncOpenAI client
160
+ async_client = AsyncOpenAI(base_url="your_url", api_key="your_api_key")
161
+
162
+ # Specify the model
163
+ model = "gpt-4o-mini"
164
+
165
+ # Create an instance of AsyncTheTool
166
+ async_the_tool = AsyncTheTool(client=async_client, model=model)
167
+
168
+ # Example: Async Translation and Keyword Extraction
169
+ translation_task = async_the_tool.translate("سلام، حالت چطوره؟", target_language="English")
170
+ keywords_task = async_the_tool.extract_keywords("Tomorrow, we will be dead by the car crash")
171
+
172
+ (translation, keywords) = await asyncio.gather(translation_task, keywords_task)
173
+ print(translation.result)
174
+ print(keywords.result)
175
+
176
+ asyncio.run(main())
177
+ ```
178
+
179
+ ---
180
+
181
+ ## 👍 Use Cases
182
+
183
+ Use **TextTools** when you need to:
184
+
185
+ - 🔍 **Classify** large datasets quickly without model training
186
+ - 🌍 **Translate** and process multilingual corpora with ease
187
+ - 🧩 **Integrate** LLMs into production pipelines (structured outputs)
188
+ - 📊 **Analyze** large text collections using embeddings and categorization
189
+
190
+ ---
191
+
192
+ ## 🔍 Logging
193
+
194
+ TextTools uses Python's standard `logging` module. The library's default logger level is `WARNING`, so if you want to modify it, follow instructions:
195
+
196
+
197
+ ```python
198
+ import logging
199
+
200
+ # Default: warnings and errors only
201
+ logging.basicConfig(level=logging.WARNING)
202
+
203
+ # Debug everything (verbose)
204
+ logging.basicConfig(level=logging.DEBUG)
205
+
206
+ # Complete silence
207
+ logging.basicConfig(level=logging.CRITICAL)
208
+ ```
209
+
210
+ ---
211
+
212
+ ## 📚 Batch Processing
213
+
214
+ Process large datasets efficiently using OpenAI's batch API.
215
+
216
+ ## ⚡ Quick Start (Batch)
217
+
218
+ ```python
219
+ from pydantic import BaseModel
220
+ from texttools import BatchJobRunner, BatchConfig
221
+
222
+ # Configure your batch job
223
+ config = BatchConfig(
224
+ system_prompt="Extract entities from the text",
225
+ job_name="entity_extraction",
226
+ input_data_path="data.json",
227
+ output_data_filename="results.json",
228
+ model="gpt-4o-mini"
229
+ )
230
+
231
+ # Define your output schema
232
+ class Output(BaseModel):
233
+ entities: list[str]
234
+
235
+ # Run the batch job
236
+ runner = BatchJobRunner(config, output_model=Output)
237
+ runner.run()
238
+ ```
239
+
240
+ ---
241
+
242
+ ## 🤝 Contributing
243
+
244
+ Contributions are welcome!
245
+ Feel free to **open issues, suggest new features, or submit pull requests**.
246
+
247
+ ---
248
+
249
+ ## 🌿 License
250
+
251
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.