hamtaa-texttools 1.2.0__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hamtaa-texttools
3
- Version: 1.2.0
3
+ Version: 1.3.1
4
4
  Summary: A high-level NLP toolkit built on top of modern LLMs.
5
5
  Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Erfan Moosavi <erfanmoosavi84@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, Zareshahi <a.zareshahi1377@gmail.com>
6
6
  Maintainer-email: Erfan Moosavi <erfanmoosavi84@gmail.com>, Tohidi <the.mohammad.tohidi@gmail.com>
@@ -73,7 +73,7 @@ pip install -U hamtaa-texttools
73
73
 
74
74
  ---
75
75
 
76
- ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt`, `temperature`, `validator` and `priority` parameters
76
+ ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt`, `temperature`, `validator`, `priority` and `timeout` parameters
77
77
 
78
78
  TextTools provides several optional flags to customize LLM behavior:
79
79
 
@@ -94,6 +94,10 @@ TextTools provides several optional flags to customize LLM behavior:
94
94
  - **`priority: int (Experimental)`** → Task execution priority level. Affects processing order in queues.
95
95
  **Note:** This feature works if it's supported by the model and vLLM.
96
96
 
97
+ - **`timeout: float`** → Maximum time in seconds to wait for the response before raising a timeout error
98
+ **Note:** This feature only exists in `AsyncTheTool`.
99
+
100
+
97
101
  ---
98
102
 
99
103
  ## 🧩 ToolOutput
@@ -173,33 +177,6 @@ Use **TextTools** when you need to:
173
177
 
174
178
  ---
175
179
 
176
- ## 📚 Batch Processing
177
-
178
- Process large datasets efficiently using OpenAI's batch API.
179
-
180
- ## ⚡ Quick Start (Batch Runner)
181
-
182
- ```python
183
- from pydantic import BaseModel
184
- from texttools import BatchRunner, BatchConfig
185
-
186
- config = BatchConfig(
187
- system_prompt="Extract entities from the text",
188
- job_name="entity_extraction",
189
- input_data_path="data.json",
190
- output_data_filename="results.json",
191
- model="gpt-4o-mini"
192
- )
193
-
194
- class Output(BaseModel):
195
- entities: list[str]
196
-
197
- runner = BatchRunner(config, output_model=Output)
198
- runner.run()
199
- ```
200
-
201
- ---
202
-
203
180
  ## 🤝 Contributing
204
181
 
205
182
  Contributions are welcome!
@@ -1,13 +1,9 @@
1
- hamtaa_texttools-1.2.0.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
2
- texttools/__init__.py,sha256=4z7wInlrgbGSlWlXHQNeZMCGQH1sN2xtARsbgLHOLd8,283
1
+ hamtaa_texttools-1.3.1.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
2
+ texttools/__init__.py,sha256=RK1GAU6pq2lGwFtHdrCX5JkPRHmOLGcmGH67hd_7VAQ,175
3
3
  texttools/models.py,sha256=5eT2cSrFq8Xa38kANznV7gbi7lwB2PoDxciLKTpsd6c,2516
4
4
  texttools/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
- texttools/batch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- texttools/batch/config.py,sha256=GDDXuhRZ_bOGVwSIlU4tWP247tx1_A7qzLJn7VqDyLU,1050
7
- texttools/batch/manager.py,sha256=XZtf8UkdClfQlnRKne4nWEcFvdSKE67EamEePKy7jwI,8730
8
- texttools/batch/runner.py,sha256=9qxXIMfYRXW5SXDqqKtRr61rnQdYZkbCGqKImhSrY6I,9923
9
5
  texttools/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- texttools/core/engine.py,sha256=WhEtxZjYEbCnD0gExGRR4eSmAY4J05E9csovt2Qqlm8,9281
6
+ texttools/core/engine.py,sha256=iRHdlIOPuUwIN6_72HNyTQQE7h_7xUZhC-WO-fDA5k8,9597
11
7
  texttools/core/exceptions.py,sha256=6SDjUL1rmd3ngzD3ytF4LyTRj3bQMSFR9ECrLoqXXHw,395
12
8
  texttools/core/internal_models.py,sha256=aExdLvhXhSev8NY1kuAJckeXdFBEisQtKZPxybd3rW8,1703
13
9
  texttools/core/operators/async_operator.py,sha256=wFs7eZ9QJrL0jBOu00YffgfPnIrCSavNjecSorXh-mE,6452
@@ -26,9 +22,9 @@ texttools/prompts/summarize.yaml,sha256=rPh060Bx_yI1W2JNg-nr83LUk9itatYLKM8ciH2p
26
22
  texttools/prompts/text_to_question.yaml,sha256=pUwPgK9l5f8S4E5fCht9JY7PFVK2aY1InPfASr7R5o4,1017
27
23
  texttools/prompts/translate.yaml,sha256=Dd5bs3O8SI-FlVSwHMYGeEjMmdOWeRlcfBHkhixCx7c,665
28
24
  texttools/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
- texttools/tools/async_tools.py,sha256=DonASaaOPbWp7Gh1UY4RlP3yPoYTuhJtVmLns8KYupE,42949
30
- texttools/tools/sync_tools.py,sha256=y4nMlabgvRapb-YFoiGA5-5HflKrRHttiWSHpkg9tug,42742
31
- hamtaa_texttools-1.2.0.dist-info/METADATA,sha256=vN4XmIWdH6mdAGfgSkjRdQLEoFNzbhhH32jOyEv9H6w,7846
32
- hamtaa_texttools-1.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
33
- hamtaa_texttools-1.2.0.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
34
- hamtaa_texttools-1.2.0.dist-info/RECORD,,
25
+ texttools/tools/async_tools.py,sha256=2suwx8N0aRnowaSOpV6C57AqPlmQe5Z0Yx4E5QIMkmU,46939
26
+ texttools/tools/sync_tools.py,sha256=mEuL-nlbxVW30dPE3hGkAUnYXbul-3gN2Le4CMVFCgU,42528
27
+ hamtaa_texttools-1.3.1.dist-info/METADATA,sha256=6wLYAaPVOFpzUz8tN7lfzbAGhEr10JBXgRHcZZvrt5s,7453
28
+ hamtaa_texttools-1.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
29
+ hamtaa_texttools-1.3.1.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
30
+ hamtaa_texttools-1.3.1.dist-info/RECORD,,
texttools/__init__.py CHANGED
@@ -1,7 +1,5 @@
1
- from .batch.config import BatchConfig
2
- from .batch.runner import BatchRunner
3
1
  from .models import CategoryTree
4
2
  from .tools.async_tools import AsyncTheTool
5
3
  from .tools.sync_tools import TheTool
6
4
 
7
- __all__ = ["TheTool", "AsyncTheTool", "CategoryTree", "BatchRunner", "BatchConfig"]
5
+ __all__ = ["TheTool", "AsyncTheTool", "CategoryTree"]
texttools/core/engine.py CHANGED
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import math
2
3
  import random
3
4
  import re
@@ -252,3 +253,12 @@ def text_to_chunks(text: str, size: int, overlap: int) -> list[str]:
252
253
  return final_chunks
253
254
 
254
255
  return _split_text(text, separators)
256
+
257
+
258
+ async def run_with_timeout(coro, timeout: float | None):
259
+ if timeout is None:
260
+ return await coro
261
+ try:
262
+ return await asyncio.wait_for(coro, timeout=timeout)
263
+ except asyncio.TimeoutError:
264
+ raise TimeoutError(f"Operation exceeded timeout of {timeout} seconds")