hamtaa-texttools 1.0.5__py3-none-any.whl → 1.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. hamtaa_texttools-1.1.16.dist-info/METADATA +255 -0
  2. hamtaa_texttools-1.1.16.dist-info/RECORD +31 -0
  3. texttools/__init__.py +6 -8
  4. texttools/batch/batch_config.py +26 -0
  5. texttools/batch/batch_runner.py +144 -139
  6. texttools/batch/{batch_manager.py → internals/batch_manager.py} +42 -54
  7. texttools/batch/internals/utils.py +16 -0
  8. texttools/prompts/README.md +8 -4
  9. texttools/prompts/categorize.yaml +77 -0
  10. texttools/prompts/detect_entity.yaml +22 -0
  11. texttools/prompts/extract_keywords.yaml +68 -0
  12. texttools/prompts/{question_merger.yaml → merge_questions.yaml} +5 -5
  13. texttools/tools/async_tools.py +804 -0
  14. texttools/tools/internals/async_operator.py +139 -236
  15. texttools/tools/internals/formatters.py +24 -0
  16. texttools/tools/internals/models.py +183 -0
  17. texttools/tools/internals/operator_utils.py +54 -0
  18. texttools/tools/internals/prompt_loader.py +23 -43
  19. texttools/tools/internals/sync_operator.py +201 -0
  20. texttools/tools/sync_tools.py +804 -0
  21. hamtaa_texttools-1.0.5.dist-info/METADATA +0 -192
  22. hamtaa_texttools-1.0.5.dist-info/RECORD +0 -30
  23. texttools/batch/__init__.py +0 -4
  24. texttools/formatters/base_formatter.py +0 -33
  25. texttools/formatters/user_merge_formatter.py +0 -30
  26. texttools/prompts/categorizer.yaml +0 -28
  27. texttools/prompts/keyword_extractor.yaml +0 -18
  28. texttools/tools/__init__.py +0 -4
  29. texttools/tools/async_the_tool.py +0 -277
  30. texttools/tools/internals/operator.py +0 -295
  31. texttools/tools/internals/output_models.py +0 -52
  32. texttools/tools/the_tool.py +0 -501
  33. {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.1.16.dist-info}/WHEEL +0 -0
  34. {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.1.16.dist-info}/licenses/LICENSE +0 -0
  35. {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.1.16.dist-info}/top_level.txt +0 -0
  36. /texttools/prompts/{ner_extractor.yaml → extract_entities.yaml} +0 -0
  37. /texttools/prompts/{question_detector.yaml → is_question.yaml} +0 -0
  38. /texttools/prompts/{rewriter.yaml → rewrite.yaml} +0 -0
  39. /texttools/prompts/{custom_tool.yaml → run_custom.yaml} +0 -0
  40. /texttools/prompts/{subject_question_generator.yaml → subject_to_question.yaml} +0 -0
  41. /texttools/prompts/{summarizer.yaml → summarize.yaml} +0 -0
  42. /texttools/prompts/{question_generator.yaml → text_to_question.yaml} +0 -0
  43. /texttools/prompts/{translator.yaml → translate.yaml} +0 -0
@@ -0,0 +1,255 @@
1
+ Metadata-Version: 2.4
2
+ Name: hamtaa-texttools
3
+ Version: 1.1.16
4
+ Summary: A high-level NLP toolkit built on top of modern LLMs.
5
+ Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2025 Hamtaa
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ Keywords: nlp,llm,text-processing,openai
28
+ Requires-Python: >=3.8
29
+ Description-Content-Type: text/markdown
30
+ License-File: LICENSE
31
+ Requires-Dist: openai==1.97.1
32
+ Requires-Dist: pydantic>=2.0.0
33
+ Requires-Dist: pyyaml>=6.0
34
+ Dynamic: license-file
35
+
36
+ # TextTools
37
+
38
+ ## 📌 Overview
39
+
40
+ **TextTools** is a high-level **NLP toolkit** built on top of modern **LLMs**.
41
+
42
+ It provides both **sync (`TheTool`)** and **async (`AsyncTheTool`)** APIs for maximum flexibility.
43
+
44
+ It provides ready-to-use utilities for **translation, question detection, keyword extraction, categorization, NER extraction, and more** - designed to help you integrate AI-powered text processing into your applications with minimal effort.
45
+
46
+ ---
47
+
48
+ ## ✨ Features
49
+
50
+ TextTools provides a rich collection of high-level NLP utilities,
51
+ Each tool is designed to work with structured outputs (JSON / Pydantic).
52
+
53
+ - **`categorize()`** - Classifies text into given categories (You have to create a category tree)
54
+ - **`extract_keywords()`** - Extracts keywords from text
55
+ - **`extract_entities()`** - Named Entity Recognition (NER) system
56
+ - **`is_question()`** - Binary detection of whether input is a question
57
+ - **`text_to_question()`** - Generates questions from text
58
+ - **`merge_questions()`** - Merges multiple questions with different modes
59
+ - **`rewrite()`** - Rewrites text with different wording/meaning
60
+ - **`subject_to_question()`** - Generates questions about a specific subject
61
+ - **`summarize()`** - Text summarization
62
+ - **`translate()`** - Text translation between languages
63
+ - **`run_custom()`** - Allows users to define a custom tool with an arbitrary BaseModel
64
+
65
+ ---
66
+
67
+ ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt`, `temperature`, `validator` and `priority` parameters
68
+
69
+ TextTools provides several optional flags to customize LLM behavior:
70
+
71
+ - **`with_analysis (bool)`** → Adds a reasoning step before generating the final output.
72
+ **Note:** This doubles token usage per call because it triggers an additional LLM request.
73
+
74
+ - **`logprobs (bool)`** → Returns token-level probabilities for the generated output. You can also specify `top_logprobs=<N>` to get the top N alternative tokens and their probabilities.
75
+ **Note:** This feature works if it's supported by the model.
76
+
77
+ - **`output_lang (str)`** → Forces the model to respond in a specific language. The model will ignore other instructions about language and respond strictly in the requested language.
78
+
79
+ - **`user_prompt (str)`** → Allows you to inject a custom instruction or prompt into the model alongside the main template. This gives you fine-grained control over how the model interprets or modifies the input text.
80
+
81
+ - **`temperature (float)`** → Determines how creative the model should respond. Takes a float number from `0.0` to `2.0`.
82
+
83
+ - **`validator (Callable)`** → Forces TheTool to validate the output result based on your custom validator. Validator should return a bool (True if there were no problem, False if the validation fails.) If the validator fails, TheTool will retry to get another output by modifying `temperature`. You can specify `max_validation_retries=<N>` to change the number of retries.
84
+
85
+ - **`priority (int)`** → Task execution priority level. Higher values = higher priority. Affects processing order in queues.
86
+ **Note:** This feature works if it's supported by the model and vLLM.
87
+
88
+ **Note:** There might be some tools that don't support some of the parameters above.
89
+
90
+ ---
91
+
92
+ ## 🧩 ToolOutput
93
+
94
+ Every tool of `TextTools` returns a `ToolOutput` object which is a BaseModel with attributes:
95
+ - **`result: Any`** → The output of LLM
96
+ - **`analysis: str`** → The reasoning step before generating the final output
97
+ - **`logprobs: list`** → Token-level probabilities for the generated output
98
+ - **`process: str`** → The tool name which processed the input
99
+ - **`processed_at: datetime`** → The process time
100
+ - **`execution_time: float`** → The execution time (seconds)
101
+ - **`errors: list[str]`** → Any error that have occured during calling LLM
102
+
103
+ **Note:** You can use `repr(ToolOutput)` to see details of your ToolOutput.
104
+
105
+ ---
106
+
107
+ ## 🚀 Installation
108
+
109
+ Install the latest release via PyPI:
110
+
111
+ ```bash
112
+ pip install -U hamtaa-texttools
113
+ ```
114
+
115
+ ---
116
+
117
+ ## 🧨 Sync vs Async
118
+ | Tool | Style | Use case |
119
+ |--------------|---------|---------------------------------------------|
120
+ | `TheTool` | Sync | Simple scripts, sequential workflows |
121
+ | `AsyncTheTool` | Async | High-throughput apps, APIs, concurrent tasks |
122
+
123
+ ---
124
+
125
+ ## ⚡ Quick Start (Sync)
126
+
127
+ ```python
128
+ from openai import OpenAI
129
+ from texttools import TheTool
130
+
131
+ # Create your OpenAI client
132
+ client = OpenAI(base_url = "your_url", API_KEY = "your_api_key")
133
+
134
+ # Specify the model
135
+ model = "gpt-4o-mini"
136
+
137
+ # Create an instance of TheTool
138
+ the_tool = TheTool(client=client, model=model)
139
+
140
+ # Example: Question Detection
141
+ detection = the_tool.is_question("Is this project open source?", logprobs=True, top_logprobs=2)
142
+ print(detection.result)
143
+ print(detection.logprobs)
144
+ # Output: True + logprobs
145
+
146
+ # Example: Translation
147
+ translation = the_tool.translate("سلام، حالت چطوره؟" target_language="English", with_analysis=True)
148
+ print(translation.result)
149
+ print(translation.analysis)
150
+ # Output: "Hi! How are you?" + analysis
151
+ ```
152
+
153
+ ---
154
+
155
+ ## ⚡ Quick Start (Async)
156
+
157
+ ```python
158
+ import asyncio
159
+ from openai import AsyncOpenAI
160
+ from texttools import AsyncTheTool
161
+
162
+ async def main():
163
+ # Create your AsyncOpenAI client
164
+ async_client = AsyncOpenAI(base_url="your_url", api_key="your_api_key")
165
+
166
+ # Specify the model
167
+ model = "gpt-4o-mini"
168
+
169
+ # Create an instance of AsyncTheTool
170
+ async_the_tool = AsyncTheTool(client=async_client, model=model)
171
+
172
+ # Example: Async Translation and Keyword Extraction
173
+ translation_task = async_the_tool.translate("سلام، حالت چطوره؟", target_language="English")
174
+ keywords_task = async_the_tool.extract_keywords("Tomorrow, we will be dead by the car crash")
175
+
176
+ (translation, keywords) = await asyncio.gather(translation_task, keywords_task)
177
+ print(translation.result)
178
+ print(keywords.result)
179
+
180
+ asyncio.run(main())
181
+ ```
182
+
183
+ ---
184
+
185
+ ## 👍 Use Cases
186
+
187
+ Use **TextTools** when you need to:
188
+
189
+ - 🔍 **Classify** large datasets quickly without model training
190
+ - 🌍 **Translate** and process multilingual corpora with ease
191
+ - 🧩 **Integrate** LLMs into production pipelines (structured outputs)
192
+ - 📊 **Analyze** large text collections using embeddings and categorization
193
+
194
+ ---
195
+
196
+ ## 🔍 Logging
197
+
198
+ TextTools uses Python's standard `logging` module. The library's default logger level is `WARNING`, so if you want to modify it, follow instructions:
199
+
200
+
201
+ ```python
202
+ import logging
203
+
204
+ # Default: warnings and errors only
205
+ logging.basicConfig(level=logging.WARNING)
206
+
207
+ # Debug everything (verbose)
208
+ logging.basicConfig(level=logging.DEBUG)
209
+
210
+ # Complete silence
211
+ logging.basicConfig(level=logging.CRITICAL)
212
+ ```
213
+
214
+ ---
215
+
216
+ ## 📚 Batch Processing
217
+
218
+ Process large datasets efficiently using OpenAI's batch API.
219
+
220
+ ## ⚡ Quick Start (Batch)
221
+
222
+ ```python
223
+ from pydantic import BaseModel
224
+ from texttools import BatchJobRunner, BatchConfig
225
+
226
+ # Configure your batch job
227
+ config = BatchConfig(
228
+ system_prompt="Extract entities from the text",
229
+ job_name="entity_extraction",
230
+ input_data_path="data.json",
231
+ output_data_filename="results.json",
232
+ model="gpt-4o-mini"
233
+ )
234
+
235
+ # Define your output schema
236
+ class Output(BaseModel):
237
+ entities: list[str]
238
+
239
+ # Run the batch job
240
+ runner = BatchJobRunner(config, output_model=Output)
241
+ runner.run()
242
+ ```
243
+
244
+ ---
245
+
246
+ ## 🤝 Contributing
247
+
248
+ Contributions are welcome!
249
+ Feel free to **open issues, suggest new features, or submit pull requests**.
250
+
251
+ ---
252
+
253
+ ## 🌿 License
254
+
255
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
@@ -0,0 +1,31 @@
1
+ hamtaa_texttools-1.1.16.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
2
+ texttools/__init__.py,sha256=dc81lXGWP29k7oVvq2BMoMotz6lgiwX4PO2jHHBe2S8,317
3
+ texttools/batch/batch_config.py,sha256=m1UgILVKjNdWE6laNbfbG4vgi4o2fEegGZbeoam6pnY,749
4
+ texttools/batch/batch_runner.py,sha256=9e4SPLlvLHHs3U7bHkuuMVw8TFNwsGUzRjkAMKN4_ik,9378
5
+ texttools/batch/internals/batch_manager.py,sha256=UoBe76vmFG72qrSaGKDZf4HzkykFBkkkbL9TLfV8TuQ,8730
6
+ texttools/batch/internals/utils.py,sha256=F1_7YlVFKhjUROAFX4m0SaP8KiZVZyHRMIIB87VUGQc,373
7
+ texttools/prompts/README.md,sha256=-5YO93CN93QLifqZpUeUnCOCBbDiOTV-cFQeJ7Gg0I4,1377
8
+ texttools/prompts/categorize.yaml,sha256=F7VezB25B_sT5yoC25ezODBddkuDD5lUHKetSpx9FKI,2743
9
+ texttools/prompts/detect_entity.yaml,sha256=1rhMkJOjxSQcT4j_c5SRcIm77AUdeG-rUmeidb6VOFc,981
10
+ texttools/prompts/extract_entities.yaml,sha256=KiKjeDpHaeh3JVtZ6q1pa3k4DYucUIU9WnEcRTCA-SE,651
11
+ texttools/prompts/extract_keywords.yaml,sha256=Vj4Tt3vT6LtpOo_iBZPo9oWI50oVdPGXe5i8yDR8ex4,3177
12
+ texttools/prompts/is_question.yaml,sha256=d0-vKRbXWkxvO64ikvxRjEmpAXGpCYIPGhgexvPPjws,471
13
+ texttools/prompts/merge_questions.yaml,sha256=0J85GvTirZB4ELwH3sk8ub_WcqqpYf6PrMKr3djlZeo,1792
14
+ texttools/prompts/rewrite.yaml,sha256=LO7He_IA3MZKz8a-LxH9DHJpOjpYwaYN1pbjp1Y0tFo,5392
15
+ texttools/prompts/run_custom.yaml,sha256=38OkCoVITbuuS9c08UZSP1jZW4WjSmRIi8fR0RAiPu4,108
16
+ texttools/prompts/subject_to_question.yaml,sha256=C7x7rNNm6U_ZG9HOn6zuzYOtvJUZ2skuWbL1-aYdd3E,1147
17
+ texttools/prompts/summarize.yaml,sha256=o6rxGPfWtZd61Duvm8NVvCJqfq73b-wAuMSKR6UYUqY,459
18
+ texttools/prompts/text_to_question.yaml,sha256=UheKYpDn6iyKI8NxunHZtFpNyfCLZZe5cvkuXpurUJY,783
19
+ texttools/prompts/translate.yaml,sha256=mGT2uBCei6uucWqVbs4silk-UV060v3G0jnt0P6sr50,634
20
+ texttools/tools/async_tools.py,sha256=vNAg0gxwUZPsMS4q8JCv7RlYymS8l_5FsFI5adEYT7w,34376
21
+ texttools/tools/sync_tools.py,sha256=hFifFa9YatvSeGif2E_bIG006eMdIBr6SV9HsZ_dAlg,34187
22
+ texttools/tools/internals/async_operator.py,sha256=1TMr8e1qbE9GSz8jl0q3MKdM8lIYE-1ZuSxHjYPqKHI,7198
23
+ texttools/tools/internals/formatters.py,sha256=tACNLP6PeoqaRpNudVxBaHA25zyWqWYPZQuYysIu88g,941
24
+ texttools/tools/internals/models.py,sha256=2QnvMiijuSqOqpCl026848rJy_pHNbRoDESlQvcdHlk,5839
25
+ texttools/tools/internals/operator_utils.py,sha256=w1k0RJ_W_CRbVc_J2w337VuL-opHpHiCxfhEOwtyuOo,1856
26
+ texttools/tools/internals/prompt_loader.py,sha256=4g6-U8kqrGN7VpNaRcrBcnF-h03PXjUDBP0lL0_4EZY,1953
27
+ texttools/tools/internals/sync_operator.py,sha256=4-V__o55Q8w29lWxkhG4St-exZLZTfBbiW76knOXbc0,7106
28
+ hamtaa_texttools-1.1.16.dist-info/METADATA,sha256=DL-cjlGMv7bft8QVd-pn5E_tNDuPgQHkTKGl4YTosGw,9555
29
+ hamtaa_texttools-1.1.16.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
30
+ hamtaa_texttools-1.1.16.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
31
+ hamtaa_texttools-1.1.16.dist-info/RECORD,,
texttools/__init__.py CHANGED
@@ -1,9 +1,7 @@
1
- from .batch import BatchJobRunner, SimpleBatchManager
2
- from .tools import AsyncTheTool, TheTool
1
+ from .batch.batch_runner import BatchJobRunner
2
+ from .batch.batch_config import BatchConfig
3
+ from .tools.sync_tools import TheTool
4
+ from .tools.async_tools import AsyncTheTool
5
+ from .tools.internals.models import CategoryTree
3
6
 
4
- __all__ = [
5
- "TheTool",
6
- "AsyncTheTool",
7
- "SimpleBatchManager",
8
- "BatchJobRunner",
9
- ]
7
+ __all__ = ["TheTool", "AsyncTheTool", "BatchJobRunner", "BatchConfig", "CategoryTree"]
@@ -0,0 +1,26 @@
1
+ from dataclasses import dataclass
2
+ from collections.abc import Callable
3
+
4
+ from texttools.batch.internals.utils import import_data, export_data
5
+
6
+
7
+ @dataclass
8
+ class BatchConfig:
9
+ """
10
+ Configuration for batch job runner.
11
+ """
12
+
13
+ system_prompt: str = ""
14
+ job_name: str = ""
15
+ input_data_path: str = ""
16
+ output_data_filename: str = ""
17
+ model: str = "gpt-4.1-mini"
18
+ MAX_BATCH_SIZE: int = 100
19
+ MAX_TOTAL_TOKENS: int = 2_000_000
20
+ CHARS_PER_TOKEN: float = 2.7
21
+ PROMPT_TOKEN_MULTIPLIER: int = 1_000
22
+ BASE_OUTPUT_DIR: str = "Data/batch_entity_result"
23
+ import_function: Callable = import_data
24
+ export_function: Callable = export_data
25
+ poll_interval_seconds: int = 30
26
+ max_retries: int = 3