hamtaa-texttools 1.1.1__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hamtaa_texttools-1.2.0.dist-info/METADATA +212 -0
- hamtaa_texttools-1.2.0.dist-info/RECORD +34 -0
- texttools/__init__.py +6 -8
- texttools/batch/__init__.py +0 -4
- texttools/batch/config.py +40 -0
- texttools/batch/{batch_manager.py → manager.py} +41 -42
- texttools/batch/runner.py +228 -0
- texttools/core/__init__.py +0 -0
- texttools/core/engine.py +254 -0
- texttools/core/exceptions.py +22 -0
- texttools/core/internal_models.py +58 -0
- texttools/core/operators/async_operator.py +194 -0
- texttools/core/operators/sync_operator.py +192 -0
- texttools/models.py +88 -0
- texttools/prompts/categorize.yaml +36 -0
- texttools/prompts/check_fact.yaml +24 -0
- texttools/prompts/extract_entities.yaml +7 -3
- texttools/prompts/extract_keywords.yaml +80 -18
- texttools/prompts/is_question.yaml +6 -2
- texttools/prompts/merge_questions.yaml +12 -5
- texttools/prompts/propositionize.yaml +24 -0
- texttools/prompts/rewrite.yaml +9 -10
- texttools/prompts/run_custom.yaml +2 -2
- texttools/prompts/subject_to_question.yaml +7 -3
- texttools/prompts/summarize.yaml +6 -2
- texttools/prompts/text_to_question.yaml +12 -6
- texttools/prompts/translate.yaml +7 -2
- texttools/py.typed +0 -0
- texttools/tools/__init__.py +0 -4
- texttools/tools/async_tools.py +1093 -0
- texttools/tools/sync_tools.py +1092 -0
- hamtaa_texttools-1.1.1.dist-info/METADATA +0 -183
- hamtaa_texttools-1.1.1.dist-info/RECORD +0 -30
- texttools/batch/batch_runner.py +0 -263
- texttools/prompts/README.md +0 -35
- texttools/prompts/categorizer.yaml +0 -28
- texttools/tools/async_the_tool.py +0 -414
- texttools/tools/internals/async_operator.py +0 -179
- texttools/tools/internals/base_operator.py +0 -91
- texttools/tools/internals/formatters.py +0 -24
- texttools/tools/internals/operator.py +0 -179
- texttools/tools/internals/output_models.py +0 -59
- texttools/tools/internals/prompt_loader.py +0 -57
- texttools/tools/the_tool.py +0 -412
- {hamtaa_texttools-1.1.1.dist-info → hamtaa_texttools-1.2.0.dist-info}/WHEEL +0 -0
- {hamtaa_texttools-1.1.1.dist-info → hamtaa_texttools-1.2.0.dist-info}/licenses/LICENSE +0 -0
- {hamtaa_texttools-1.1.1.dist-info → hamtaa_texttools-1.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: hamtaa-texttools
|
|
3
|
+
Version: 1.2.0
|
|
4
|
+
Summary: A high-level NLP toolkit built on top of modern LLMs.
|
|
5
|
+
Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Erfan Moosavi <erfanmoosavi84@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, Zareshahi <a.zareshahi1377@gmail.com>
|
|
6
|
+
Maintainer-email: Erfan Moosavi <erfanmoosavi84@gmail.com>, Tohidi <the.mohammad.tohidi@gmail.com>
|
|
7
|
+
License: MIT
|
|
8
|
+
Keywords: nlp,llm,text-processing,openai
|
|
9
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
12
|
+
Classifier: Topic :: Text Processing
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Requires-Python: >=3.9
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
License-File: LICENSE
|
|
17
|
+
Requires-Dist: openai>=1.97.1
|
|
18
|
+
Requires-Dist: pydantic>=2.0.0
|
|
19
|
+
Requires-Dist: pyyaml>=6.0
|
|
20
|
+
Dynamic: license-file
|
|
21
|
+
|
|
22
|
+
# TextTools
|
|
23
|
+
|
|
24
|
+
## 📌 Overview
|
|
25
|
+
|
|
26
|
+
**TextTools** is a high-level **NLP toolkit** built on top of **LLMs**.
|
|
27
|
+
|
|
28
|
+
It provides both **sync (`TheTool`)** and **async (`AsyncTheTool`)** APIs for maximum flexibility.
|
|
29
|
+
|
|
30
|
+
It provides ready-to-use utilities for **translation, question detection, keyword extraction, categorization, NER extraction, and more** - designed to help you integrate AI-powered text processing into your applications with minimal effort.
|
|
31
|
+
|
|
32
|
+
**Note:** Most features of `texttools` are reliable when you use `google/gemma-3n-e4b-it` model.
|
|
33
|
+
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
## ✨ Features
|
|
37
|
+
|
|
38
|
+
TextTools provides a rich collection of high-level NLP utilities,
|
|
39
|
+
Each tool is designed to work with structured outputs.
|
|
40
|
+
|
|
41
|
+
- **`categorize()`** - Classifies text into given categories
|
|
42
|
+
- **`extract_keywords()`** - Extracts keywords from the text
|
|
43
|
+
- **`extract_entities()`** - Named Entity Recognition (NER) system
|
|
44
|
+
- **`is_question()`** - Binary question detection
|
|
45
|
+
- **`text_to_question()`** - Generates questions from text
|
|
46
|
+
- **`merge_questions()`** - Merges multiple questions into one
|
|
47
|
+
- **`rewrite()`** - Rewrites text in a diffrent way
|
|
48
|
+
- **`subject_to_question()`** - Generates questions about a specific subject
|
|
49
|
+
- **`summarize()`** - Text summarization
|
|
50
|
+
- **`translate()`** - Text translation
|
|
51
|
+
- **`propositionize()`** - Convert text to atomic independence meaningful sentences
|
|
52
|
+
- **`check_fact()`** - Check whether a statement is relevant to the source text
|
|
53
|
+
- **`run_custom()`** - Allows users to define a custom tool with an arbitrary BaseModel
|
|
54
|
+
|
|
55
|
+
---
|
|
56
|
+
|
|
57
|
+
## 🚀 Installation
|
|
58
|
+
|
|
59
|
+
Install the latest release via PyPI:
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
pip install -U hamtaa-texttools
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
## 📊 Tool Quality Tiers
|
|
68
|
+
|
|
69
|
+
| Status | Meaning | Tools | Use in Production? |
|
|
70
|
+
|--------|---------|----------|-------------------|
|
|
71
|
+
| **✅ Production** | Evaluated, tested, stable. | `categorize()` (list mode), `extract_keywords()`, `extract_entities()`, `is_question()`, `text_to_question()`, `merge_questions()`, `rewrite()`, `subject_to_question()`, `summarize()`, `run_custom()` | **Yes** - ready for reliable use. |
|
|
72
|
+
| **🧪 Experimental** | Added to the package but **not fully evaluated**. Functional, but quality may vary. | `categorize()` (tree mode), `translate()`, `propositionize()`, `check_fact()` | **Use with caution** - outputs not yet validated. |
|
|
73
|
+
|
|
74
|
+
---
|
|
75
|
+
|
|
76
|
+
## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt`, `temperature`, `validator` and `priority` parameters
|
|
77
|
+
|
|
78
|
+
TextTools provides several optional flags to customize LLM behavior:
|
|
79
|
+
|
|
80
|
+
- **`with_analysis: bool`** → Adds a reasoning step before generating the final output.
|
|
81
|
+
**Note:** This doubles token usage per call.
|
|
82
|
+
|
|
83
|
+
- **`logprobs: bool`** → Returns token-level probabilities for the generated output. You can also specify `top_logprobs=<N>` to get the top N alternative tokens and their probabilities.
|
|
84
|
+
**Note:** This feature works if it's supported by the model.
|
|
85
|
+
|
|
86
|
+
- **`output_lang: str`** → Forces the model to respond in a specific language.
|
|
87
|
+
|
|
88
|
+
- **`user_prompt: str`** → Allows you to inject a custom instruction or into the model alongside the main template. This gives you fine-grained control over how the model interprets or modifies the input text.
|
|
89
|
+
|
|
90
|
+
- **`temperature: float`** → Determines how creative the model should respond. Takes a float number from `0.0` to `2.0`.
|
|
91
|
+
|
|
92
|
+
- **`validator: Callable (Experimental)`** → Forces TheTool to validate the output result based on your custom validator. Validator should return a boolean. If the validator fails, TheTool will retry to get another output by modifying `temperature`. You can also specify `max_validation_retries=<N>`.
|
|
93
|
+
|
|
94
|
+
- **`priority: int (Experimental)`** → Task execution priority level. Affects processing order in queues.
|
|
95
|
+
**Note:** This feature works if it's supported by the model and vLLM.
|
|
96
|
+
|
|
97
|
+
---
|
|
98
|
+
|
|
99
|
+
## 🧩 ToolOutput
|
|
100
|
+
|
|
101
|
+
Every tool of `TextTools` returns a `ToolOutput` object which is a BaseModel with attributes:
|
|
102
|
+
- **`result: Any`**
|
|
103
|
+
- **`analysis: str`**
|
|
104
|
+
- **`logprobs: list`**
|
|
105
|
+
- **`errors: list[str]`**
|
|
106
|
+
- **`ToolOutputMetadata`** →
|
|
107
|
+
- **`tool_name: str`**
|
|
108
|
+
- **`processed_at: datetime`**
|
|
109
|
+
- **`execution_time: float`**
|
|
110
|
+
|
|
111
|
+
**Note:** You can use `repr(ToolOutput)` to print your output with all the details.
|
|
112
|
+
|
|
113
|
+
---
|
|
114
|
+
|
|
115
|
+
## 🧨 Sync vs Async
|
|
116
|
+
| Tool | Style | Use case |
|
|
117
|
+
|--------------|---------|---------------------------------------------|
|
|
118
|
+
| `TheTool` | Sync | Simple scripts, sequential workflows |
|
|
119
|
+
| `AsyncTheTool` | Async | High-throughput apps, APIs, concurrent tasks |
|
|
120
|
+
|
|
121
|
+
---
|
|
122
|
+
|
|
123
|
+
## ⚡ Quick Start (Sync)
|
|
124
|
+
|
|
125
|
+
```python
|
|
126
|
+
from openai import OpenAI
|
|
127
|
+
from texttools import TheTool
|
|
128
|
+
|
|
129
|
+
client = OpenAI(base_url = "your_url", API_KEY = "your_api_key")
|
|
130
|
+
model = "model_name"
|
|
131
|
+
|
|
132
|
+
the_tool = TheTool(client=client, model=model)
|
|
133
|
+
|
|
134
|
+
detection = the_tool.is_question("Is this project open source?")
|
|
135
|
+
print(repr(detection))
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
---
|
|
139
|
+
|
|
140
|
+
## ⚡ Quick Start (Async)
|
|
141
|
+
|
|
142
|
+
```python
|
|
143
|
+
import asyncio
|
|
144
|
+
from openai import AsyncOpenAI
|
|
145
|
+
from texttools import AsyncTheTool
|
|
146
|
+
|
|
147
|
+
async def main():
|
|
148
|
+
async_client = AsyncOpenAI(base_url="your_url", api_key="your_api_key")
|
|
149
|
+
model = "model_name"
|
|
150
|
+
|
|
151
|
+
async_the_tool = AsyncTheTool(client=async_client, model=model)
|
|
152
|
+
|
|
153
|
+
translation_task = async_the_tool.translate("سلام، حالت چطوره؟", target_language="English")
|
|
154
|
+
keywords_task = async_the_tool.extract_keywords("Tomorrow, we will be dead by the car crash")
|
|
155
|
+
|
|
156
|
+
(translation, keywords) = await asyncio.gather(translation_task, keywords_task)
|
|
157
|
+
print(repr(translation))
|
|
158
|
+
print(repr(keywords))
|
|
159
|
+
|
|
160
|
+
asyncio.run(main())
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
---
|
|
164
|
+
|
|
165
|
+
## 👍 Use Cases
|
|
166
|
+
|
|
167
|
+
Use **TextTools** when you need to:
|
|
168
|
+
|
|
169
|
+
- 🔍 **Classify** large datasets quickly without model training
|
|
170
|
+
- 🌍 **Translate** and process multilingual corpora with ease
|
|
171
|
+
- 🧩 **Integrate** LLMs into production pipelines (structured outputs)
|
|
172
|
+
- 📊 **Analyze** large text collections using embeddings and categorization
|
|
173
|
+
|
|
174
|
+
---
|
|
175
|
+
|
|
176
|
+
## 📚 Batch Processing
|
|
177
|
+
|
|
178
|
+
Process large datasets efficiently using OpenAI's batch API.
|
|
179
|
+
|
|
180
|
+
## ⚡ Quick Start (Batch Runner)
|
|
181
|
+
|
|
182
|
+
```python
|
|
183
|
+
from pydantic import BaseModel
|
|
184
|
+
from texttools import BatchRunner, BatchConfig
|
|
185
|
+
|
|
186
|
+
config = BatchConfig(
|
|
187
|
+
system_prompt="Extract entities from the text",
|
|
188
|
+
job_name="entity_extraction",
|
|
189
|
+
input_data_path="data.json",
|
|
190
|
+
output_data_filename="results.json",
|
|
191
|
+
model="gpt-4o-mini"
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
class Output(BaseModel):
|
|
195
|
+
entities: list[str]
|
|
196
|
+
|
|
197
|
+
runner = BatchRunner(config, output_model=Output)
|
|
198
|
+
runner.run()
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
---
|
|
202
|
+
|
|
203
|
+
## 🤝 Contributing
|
|
204
|
+
|
|
205
|
+
Contributions are welcome!
|
|
206
|
+
Feel free to **open issues, suggest new features, or submit pull requests**.
|
|
207
|
+
|
|
208
|
+
---
|
|
209
|
+
|
|
210
|
+
## 🌿 License
|
|
211
|
+
|
|
212
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
hamtaa_texttools-1.2.0.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
|
|
2
|
+
texttools/__init__.py,sha256=4z7wInlrgbGSlWlXHQNeZMCGQH1sN2xtARsbgLHOLd8,283
|
|
3
|
+
texttools/models.py,sha256=5eT2cSrFq8Xa38kANznV7gbi7lwB2PoDxciLKTpsd6c,2516
|
|
4
|
+
texttools/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
|
+
texttools/batch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
texttools/batch/config.py,sha256=GDDXuhRZ_bOGVwSIlU4tWP247tx1_A7qzLJn7VqDyLU,1050
|
|
7
|
+
texttools/batch/manager.py,sha256=XZtf8UkdClfQlnRKne4nWEcFvdSKE67EamEePKy7jwI,8730
|
|
8
|
+
texttools/batch/runner.py,sha256=9qxXIMfYRXW5SXDqqKtRr61rnQdYZkbCGqKImhSrY6I,9923
|
|
9
|
+
texttools/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
+
texttools/core/engine.py,sha256=WhEtxZjYEbCnD0gExGRR4eSmAY4J05E9csovt2Qqlm8,9281
|
|
11
|
+
texttools/core/exceptions.py,sha256=6SDjUL1rmd3ngzD3ytF4LyTRj3bQMSFR9ECrLoqXXHw,395
|
|
12
|
+
texttools/core/internal_models.py,sha256=aExdLvhXhSev8NY1kuAJckeXdFBEisQtKZPxybd3rW8,1703
|
|
13
|
+
texttools/core/operators/async_operator.py,sha256=wFs7eZ9QJrL0jBOu00YffgfPnIrCSavNjecSorXh-mE,6452
|
|
14
|
+
texttools/core/operators/sync_operator.py,sha256=NaUS-aLh3y0QNMiKut4qtcSZKYXbuPbw0o2jvPsYKdY,6357
|
|
15
|
+
texttools/prompts/categorize.yaml,sha256=42Rp3SgVHaDLKrJ27_uK788LiQud0pOXJthz4r0a40Y,1214
|
|
16
|
+
texttools/prompts/check_fact.yaml,sha256=zWFQDRhEE1ij9wSeeenS9YSTM-bY5zzUaG390zUgmcs,714
|
|
17
|
+
texttools/prompts/extract_entities.yaml,sha256=_zYKHNJDIzVDI_-TnwFCKyMs-XLM5igvmWhvSTc3INQ,637
|
|
18
|
+
texttools/prompts/extract_keywords.yaml,sha256=1o4u3uwzapNtB1BUpNIRL5qtrwjW0Yhvyq0TZJiafdg,3272
|
|
19
|
+
texttools/prompts/is_question.yaml,sha256=jnPARd2ZiulLzHW_r4WAsz3sOryfz6Gy5-yYXp-2hd0,496
|
|
20
|
+
texttools/prompts/merge_questions.yaml,sha256=l9Q2OEjPp3SDkxbq3zZCj2ZmXacWSnmYMpUr3l6r5yE,1816
|
|
21
|
+
texttools/prompts/propositionize.yaml,sha256=nbGAfbm1-2Hoc0JLtqZi-S7VHQfnMmuTKI7dZeBxQW0,1403
|
|
22
|
+
texttools/prompts/rewrite.yaml,sha256=klEm8MqXK-Bo8RsS5R9KLMT0zlD-BKo_G6tz9lpAcEY,5420
|
|
23
|
+
texttools/prompts/run_custom.yaml,sha256=IETY9H0wPGWIIzcnupfbwwKQblwZrbYAxB754W9MhgU,125
|
|
24
|
+
texttools/prompts/subject_to_question.yaml,sha256=AK16pZW9HUppIF8JBSEenbUNOU3aqeVV781_WUXnLqk,1160
|
|
25
|
+
texttools/prompts/summarize.yaml,sha256=rPh060Bx_yI1W2JNg-nr83LUk9itatYLKM8ciH2pOvg,486
|
|
26
|
+
texttools/prompts/text_to_question.yaml,sha256=pUwPgK9l5f8S4E5fCht9JY7PFVK2aY1InPfASr7R5o4,1017
|
|
27
|
+
texttools/prompts/translate.yaml,sha256=Dd5bs3O8SI-FlVSwHMYGeEjMmdOWeRlcfBHkhixCx7c,665
|
|
28
|
+
texttools/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
|
+
texttools/tools/async_tools.py,sha256=DonASaaOPbWp7Gh1UY4RlP3yPoYTuhJtVmLns8KYupE,42949
|
|
30
|
+
texttools/tools/sync_tools.py,sha256=y4nMlabgvRapb-YFoiGA5-5HflKrRHttiWSHpkg9tug,42742
|
|
31
|
+
hamtaa_texttools-1.2.0.dist-info/METADATA,sha256=vN4XmIWdH6mdAGfgSkjRdQLEoFNzbhhH32jOyEv9H6w,7846
|
|
32
|
+
hamtaa_texttools-1.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
33
|
+
hamtaa_texttools-1.2.0.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
|
|
34
|
+
hamtaa_texttools-1.2.0.dist-info/RECORD,,
|
texttools/__init__.py
CHANGED
|
@@ -1,9 +1,7 @@
|
|
|
1
|
-
from .batch import
|
|
2
|
-
from .
|
|
1
|
+
from .batch.config import BatchConfig
|
|
2
|
+
from .batch.runner import BatchRunner
|
|
3
|
+
from .models import CategoryTree
|
|
4
|
+
from .tools.async_tools import AsyncTheTool
|
|
5
|
+
from .tools.sync_tools import TheTool
|
|
3
6
|
|
|
4
|
-
__all__ = [
|
|
5
|
-
"TheTool",
|
|
6
|
-
"AsyncTheTool",
|
|
7
|
-
"SimpleBatchManager",
|
|
8
|
-
"BatchJobRunner",
|
|
9
|
-
]
|
|
7
|
+
__all__ = ["TheTool", "AsyncTheTool", "CategoryTree", "BatchRunner", "BatchConfig"]
|
texttools/batch/__init__.py
CHANGED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def export_data(data) -> list[dict[str, str]]:
|
|
7
|
+
"""
|
|
8
|
+
Produces a structure of the following form from an initial data structure:
|
|
9
|
+
[{"id": str, "text": str},...]
|
|
10
|
+
"""
|
|
11
|
+
return data
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def import_data(data) -> Any:
|
|
15
|
+
"""
|
|
16
|
+
Takes the output and adds and aggregates it to the original structure.
|
|
17
|
+
"""
|
|
18
|
+
return data
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class BatchConfig:
|
|
23
|
+
"""
|
|
24
|
+
Configuration for batch job runner.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
system_prompt: str = ""
|
|
28
|
+
job_name: str = ""
|
|
29
|
+
input_data_path: str = ""
|
|
30
|
+
output_data_filename: str = ""
|
|
31
|
+
model: str = "gpt-4.1-mini"
|
|
32
|
+
MAX_BATCH_SIZE: int = 100
|
|
33
|
+
MAX_TOTAL_TOKENS: int = 2_000_000
|
|
34
|
+
CHARS_PER_TOKEN: float = 2.7
|
|
35
|
+
PROMPT_TOKEN_MULTIPLIER: int = 1_000
|
|
36
|
+
BASE_OUTPUT_DIR: str = "Data/batch_entity_result"
|
|
37
|
+
import_function: Callable = import_data
|
|
38
|
+
export_function: Callable = export_data
|
|
39
|
+
poll_interval_seconds: int = 30
|
|
40
|
+
max_retries: int = 3
|
|
@@ -1,19 +1,20 @@
|
|
|
1
1
|
import json
|
|
2
|
+
import logging
|
|
2
3
|
import uuid
|
|
3
4
|
from pathlib import Path
|
|
4
|
-
from typing import Any, Type
|
|
5
|
-
import logging
|
|
5
|
+
from typing import Any, Type, TypeVar
|
|
6
6
|
|
|
7
|
-
from pydantic import BaseModel
|
|
8
7
|
from openai import OpenAI
|
|
9
8
|
from openai.lib._pydantic import to_strict_json_schema
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
# Base Model type for output models
|
|
12
|
+
T = TypeVar("T", bound=BaseModel)
|
|
10
13
|
|
|
11
|
-
|
|
12
|
-
logger = logging.getLogger("batch_runner")
|
|
13
|
-
logger.setLevel(logging.INFO)
|
|
14
|
+
logger = logging.getLogger("texttools.batch_manager")
|
|
14
15
|
|
|
15
16
|
|
|
16
|
-
class
|
|
17
|
+
class BatchManager:
|
|
17
18
|
"""
|
|
18
19
|
Manages batch processing jobs for OpenAI's chat completions with structured outputs.
|
|
19
20
|
|
|
@@ -26,30 +27,29 @@ class SimpleBatchManager:
|
|
|
26
27
|
self,
|
|
27
28
|
client: OpenAI,
|
|
28
29
|
model: str,
|
|
29
|
-
output_model: Type[
|
|
30
|
+
output_model: Type[T],
|
|
30
31
|
prompt_template: str,
|
|
31
|
-
handlers: list[Any] | None = None,
|
|
32
32
|
state_dir: Path = Path(".batch_jobs"),
|
|
33
33
|
custom_json_schema_obj_str: dict | None = None,
|
|
34
34
|
**client_kwargs: Any,
|
|
35
35
|
):
|
|
36
|
-
self.
|
|
37
|
-
self.
|
|
38
|
-
self.
|
|
39
|
-
self.
|
|
40
|
-
self.
|
|
41
|
-
self.
|
|
42
|
-
self.
|
|
43
|
-
self.
|
|
44
|
-
self.
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
36
|
+
self._client = client
|
|
37
|
+
self._model = model
|
|
38
|
+
self._output_model = output_model
|
|
39
|
+
self._prompt_template = prompt_template
|
|
40
|
+
self._state_dir = state_dir
|
|
41
|
+
self._custom_json_schema_obj_str = custom_json_schema_obj_str
|
|
42
|
+
self._client_kwargs = client_kwargs
|
|
43
|
+
self._dict_input = False
|
|
44
|
+
self._state_dir.mkdir(parents=True, exist_ok=True)
|
|
45
|
+
|
|
46
|
+
if custom_json_schema_obj_str and not isinstance(
|
|
47
|
+
custom_json_schema_obj_str, dict
|
|
48
|
+
):
|
|
49
|
+
raise ValueError("Schema should be a dict")
|
|
50
50
|
|
|
51
51
|
def _state_file(self, job_name: str) -> Path:
|
|
52
|
-
return self.
|
|
52
|
+
return self._state_dir / f"{job_name}.json"
|
|
53
53
|
|
|
54
54
|
def _load_state(self, job_name: str) -> list[dict[str, Any]]:
|
|
55
55
|
"""
|
|
@@ -83,17 +83,17 @@ class SimpleBatchManager:
|
|
|
83
83
|
"""
|
|
84
84
|
response_format_config: dict[str, Any]
|
|
85
85
|
|
|
86
|
-
if self.
|
|
86
|
+
if self._custom_json_schema_obj_str:
|
|
87
87
|
response_format_config = {
|
|
88
88
|
"type": "json_schema",
|
|
89
|
-
"json_schema": self.
|
|
89
|
+
"json_schema": self._custom_json_schema_obj_str,
|
|
90
90
|
}
|
|
91
91
|
else:
|
|
92
|
-
raw_schema = to_strict_json_schema(self.
|
|
92
|
+
raw_schema = to_strict_json_schema(self._output_model)
|
|
93
93
|
response_format_config = {
|
|
94
94
|
"type": "json_schema",
|
|
95
95
|
"json_schema": {
|
|
96
|
-
"name": self.
|
|
96
|
+
"name": self._output_model.__name__,
|
|
97
97
|
"schema": raw_schema,
|
|
98
98
|
},
|
|
99
99
|
}
|
|
@@ -105,11 +105,11 @@ class SimpleBatchManager:
|
|
|
105
105
|
"body": {
|
|
106
106
|
"model": self.model,
|
|
107
107
|
"messages": [
|
|
108
|
-
{"role": "system", "content": self.
|
|
108
|
+
{"role": "system", "content": self._prompt_template},
|
|
109
109
|
{"role": "user", "content": text},
|
|
110
110
|
],
|
|
111
111
|
"response_format": response_format_config,
|
|
112
|
-
**self.
|
|
112
|
+
**self._client_kwargs,
|
|
113
113
|
},
|
|
114
114
|
}
|
|
115
115
|
|
|
@@ -127,10 +127,10 @@ class SimpleBatchManager:
|
|
|
127
127
|
|
|
128
128
|
else:
|
|
129
129
|
raise TypeError(
|
|
130
|
-
"The input must be either a list of texts or a dictionary in the form {'id': str, 'text': str}
|
|
130
|
+
"The input must be either a list of texts or a dictionary in the form {'id': str, 'text': str}"
|
|
131
131
|
)
|
|
132
132
|
|
|
133
|
-
file_path = self.
|
|
133
|
+
file_path = self._state_dir / f"batch_{uuid.uuid4().hex}.jsonl"
|
|
134
134
|
with open(file_path, "w", encoding="utf-8") as f:
|
|
135
135
|
for task in tasks:
|
|
136
136
|
f.write(json.dumps(task) + "\n")
|
|
@@ -143,9 +143,10 @@ class SimpleBatchManager:
|
|
|
143
143
|
"""
|
|
144
144
|
if self._load_state(job_name):
|
|
145
145
|
return
|
|
146
|
+
|
|
146
147
|
path = self._prepare_file(payload)
|
|
147
|
-
upload = self.
|
|
148
|
-
job = self.
|
|
148
|
+
upload = self._client.files.create(file=open(path, "rb"), purpose="batch")
|
|
149
|
+
job = self._client.batches.create(
|
|
149
150
|
input_file_id=upload.id,
|
|
150
151
|
endpoint="/v1/chat/completions",
|
|
151
152
|
completion_window="24h",
|
|
@@ -161,7 +162,7 @@ class SimpleBatchManager:
|
|
|
161
162
|
if not job:
|
|
162
163
|
return "completed"
|
|
163
164
|
|
|
164
|
-
info = self.
|
|
165
|
+
info = self._client.batches.retrieve(job["id"])
|
|
165
166
|
job = info.to_dict()
|
|
166
167
|
self._save_state(job_name, [job])
|
|
167
168
|
logger.info("Batch job status: %s", job)
|
|
@@ -179,18 +180,18 @@ class SimpleBatchManager:
|
|
|
179
180
|
return {}
|
|
180
181
|
batch_id = job["id"]
|
|
181
182
|
|
|
182
|
-
info = self.
|
|
183
|
+
info = self._client.batches.retrieve(batch_id)
|
|
183
184
|
out_file_id = info.output_file_id
|
|
184
185
|
if not out_file_id:
|
|
185
186
|
error_file_id = info.error_file_id
|
|
186
187
|
if error_file_id:
|
|
187
188
|
err_content = (
|
|
188
|
-
self.
|
|
189
|
+
self._client.files.content(error_file_id).read().decode("utf-8")
|
|
189
190
|
)
|
|
190
|
-
logger.
|
|
191
|
+
logger.error("Error file content:", err_content)
|
|
191
192
|
return {}
|
|
192
193
|
|
|
193
|
-
content = self.
|
|
194
|
+
content = self._client.files.content(out_file_id).read().decode("utf-8")
|
|
194
195
|
lines = content.splitlines()
|
|
195
196
|
results = {}
|
|
196
197
|
log = []
|
|
@@ -201,7 +202,7 @@ class SimpleBatchManager:
|
|
|
201
202
|
content = result["response"]["body"]["choices"][0]["message"]["content"]
|
|
202
203
|
try:
|
|
203
204
|
parsed_content = json.loads(content)
|
|
204
|
-
model_instance = self.
|
|
205
|
+
model_instance = self._output_model(**parsed_content)
|
|
205
206
|
results[custom_id] = model_instance.model_dump(mode="json")
|
|
206
207
|
except json.JSONDecodeError:
|
|
207
208
|
results[custom_id] = {"error": "Failed to parse content as JSON"}
|
|
@@ -221,8 +222,6 @@ class SimpleBatchManager:
|
|
|
221
222
|
error_d = {custom_id: results[custom_id]}
|
|
222
223
|
log.append(error_d)
|
|
223
224
|
|
|
224
|
-
for handler in self.handlers:
|
|
225
|
-
handler.handle(results)
|
|
226
225
|
if remove_cache:
|
|
227
226
|
self._clear_state(job_name)
|
|
228
227
|
|