hamtaa-texttools 1.1.16__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. hamtaa_texttools-1.2.0.dist-info/METADATA +212 -0
  2. hamtaa_texttools-1.2.0.dist-info/RECORD +34 -0
  3. texttools/__init__.py +5 -5
  4. texttools/batch/__init__.py +0 -0
  5. texttools/batch/{batch_config.py → config.py} +16 -2
  6. texttools/batch/{internals/batch_manager.py → manager.py} +2 -2
  7. texttools/batch/{batch_runner.py → runner.py} +80 -69
  8. texttools/core/__init__.py +0 -0
  9. texttools/core/engine.py +254 -0
  10. texttools/core/exceptions.py +22 -0
  11. texttools/core/internal_models.py +58 -0
  12. texttools/core/operators/async_operator.py +194 -0
  13. texttools/core/operators/sync_operator.py +192 -0
  14. texttools/models.py +88 -0
  15. texttools/prompts/categorize.yaml +36 -77
  16. texttools/prompts/check_fact.yaml +24 -0
  17. texttools/prompts/extract_entities.yaml +7 -3
  18. texttools/prompts/extract_keywords.yaml +21 -9
  19. texttools/prompts/is_question.yaml +6 -2
  20. texttools/prompts/merge_questions.yaml +12 -5
  21. texttools/prompts/propositionize.yaml +24 -0
  22. texttools/prompts/rewrite.yaml +9 -10
  23. texttools/prompts/run_custom.yaml +2 -2
  24. texttools/prompts/subject_to_question.yaml +7 -3
  25. texttools/prompts/summarize.yaml +6 -2
  26. texttools/prompts/text_to_question.yaml +12 -6
  27. texttools/prompts/translate.yaml +7 -2
  28. texttools/py.typed +0 -0
  29. texttools/tools/__init__.py +0 -0
  30. texttools/tools/async_tools.py +778 -489
  31. texttools/tools/sync_tools.py +775 -487
  32. hamtaa_texttools-1.1.16.dist-info/METADATA +0 -255
  33. hamtaa_texttools-1.1.16.dist-info/RECORD +0 -31
  34. texttools/batch/internals/utils.py +0 -16
  35. texttools/prompts/README.md +0 -35
  36. texttools/prompts/detect_entity.yaml +0 -22
  37. texttools/tools/internals/async_operator.py +0 -200
  38. texttools/tools/internals/formatters.py +0 -24
  39. texttools/tools/internals/models.py +0 -183
  40. texttools/tools/internals/operator_utils.py +0 -54
  41. texttools/tools/internals/prompt_loader.py +0 -56
  42. texttools/tools/internals/sync_operator.py +0 -201
  43. {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.2.0.dist-info}/WHEEL +0 -0
  44. {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.2.0.dist-info}/licenses/LICENSE +0 -0
  45. {hamtaa_texttools-1.1.16.dist-info → hamtaa_texttools-1.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,212 @@
1
+ Metadata-Version: 2.4
2
+ Name: hamtaa-texttools
3
+ Version: 1.2.0
4
+ Summary: A high-level NLP toolkit built on top of modern LLMs.
5
+ Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Erfan Moosavi <erfanmoosavi84@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, Zareshahi <a.zareshahi1377@gmail.com>
6
+ Maintainer-email: Erfan Moosavi <erfanmoosavi84@gmail.com>, Tohidi <the.mohammad.tohidi@gmail.com>
7
+ License: MIT
8
+ Keywords: nlp,llm,text-processing,openai
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
12
+ Classifier: Topic :: Text Processing
13
+ Classifier: Operating System :: OS Independent
14
+ Requires-Python: >=3.9
15
+ Description-Content-Type: text/markdown
16
+ License-File: LICENSE
17
+ Requires-Dist: openai>=1.97.1
18
+ Requires-Dist: pydantic>=2.0.0
19
+ Requires-Dist: pyyaml>=6.0
20
+ Dynamic: license-file
21
+
22
+ # TextTools
23
+
24
+ ## 📌 Overview
25
+
26
+ **TextTools** is a high-level **NLP toolkit** built on top of **LLMs**.
27
+
28
+ It provides both **sync (`TheTool`)** and **async (`AsyncTheTool`)** APIs for maximum flexibility.
29
+
30
+ It provides ready-to-use utilities for **translation, question detection, keyword extraction, categorization, NER extraction, and more** - designed to help you integrate AI-powered text processing into your applications with minimal effort.
31
+
32
+ **Note:** Most features of `texttools` are reliable when you use `google/gemma-3n-e4b-it` model.
33
+
34
+ ---
35
+
36
+ ## ✨ Features
37
+
38
+ TextTools provides a rich collection of high-level NLP utilities,
39
+ Each tool is designed to work with structured outputs.
40
+
41
+ - **`categorize()`** - Classifies text into given categories
42
+ - **`extract_keywords()`** - Extracts keywords from the text
43
+ - **`extract_entities()`** - Named Entity Recognition (NER) system
44
+ - **`is_question()`** - Binary question detection
45
+ - **`text_to_question()`** - Generates questions from text
46
+ - **`merge_questions()`** - Merges multiple questions into one
47
+ - **`rewrite()`** - Rewrites text in a diffrent way
48
+ - **`subject_to_question()`** - Generates questions about a specific subject
49
+ - **`summarize()`** - Text summarization
50
+ - **`translate()`** - Text translation
51
+ - **`propositionize()`** - Convert text to atomic independence meaningful sentences
52
+ - **`check_fact()`** - Check whether a statement is relevant to the source text
53
+ - **`run_custom()`** - Allows users to define a custom tool with an arbitrary BaseModel
54
+
55
+ ---
56
+
57
+ ## 🚀 Installation
58
+
59
+ Install the latest release via PyPI:
60
+
61
+ ```bash
62
+ pip install -U hamtaa-texttools
63
+ ```
64
+
65
+ ---
66
+
67
+ ## 📊 Tool Quality Tiers
68
+
69
+ | Status | Meaning | Tools | Use in Production? |
70
+ |--------|---------|----------|-------------------|
71
+ | **✅ Production** | Evaluated, tested, stable. | `categorize()` (list mode), `extract_keywords()`, `extract_entities()`, `is_question()`, `text_to_question()`, `merge_questions()`, `rewrite()`, `subject_to_question()`, `summarize()`, `run_custom()` | **Yes** - ready for reliable use. |
72
+ | **🧪 Experimental** | Added to the package but **not fully evaluated**. Functional, but quality may vary. | `categorize()` (tree mode), `translate()`, `propositionize()`, `check_fact()` | **Use with caution** - outputs not yet validated. |
73
+
74
+ ---
75
+
76
+ ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt`, `temperature`, `validator` and `priority` parameters
77
+
78
+ TextTools provides several optional flags to customize LLM behavior:
79
+
80
+ - **`with_analysis: bool`** → Adds a reasoning step before generating the final output.
81
+ **Note:** This doubles token usage per call.
82
+
83
+ - **`logprobs: bool`** → Returns token-level probabilities for the generated output. You can also specify `top_logprobs=<N>` to get the top N alternative tokens and their probabilities.
84
+ **Note:** This feature works if it's supported by the model.
85
+
86
+ - **`output_lang: str`** → Forces the model to respond in a specific language.
87
+
88
+ - **`user_prompt: str`** → Allows you to inject a custom instruction or into the model alongside the main template. This gives you fine-grained control over how the model interprets or modifies the input text.
89
+
90
+ - **`temperature: float`** → Determines how creative the model should respond. Takes a float number from `0.0` to `2.0`.
91
+
92
+ - **`validator: Callable (Experimental)`** → Forces TheTool to validate the output result based on your custom validator. Validator should return a boolean. If the validator fails, TheTool will retry to get another output by modifying `temperature`. You can also specify `max_validation_retries=<N>`.
93
+
94
+ - **`priority: int (Experimental)`** → Task execution priority level. Affects processing order in queues.
95
+ **Note:** This feature works if it's supported by the model and vLLM.
96
+
97
+ ---
98
+
99
+ ## 🧩 ToolOutput
100
+
101
+ Every tool of `TextTools` returns a `ToolOutput` object which is a BaseModel with attributes:
102
+ - **`result: Any`**
103
+ - **`analysis: str`**
104
+ - **`logprobs: list`**
105
+ - **`errors: list[str]`**
106
+ - **`ToolOutputMetadata`** →
107
+ - **`tool_name: str`**
108
+ - **`processed_at: datetime`**
109
+ - **`execution_time: float`**
110
+
111
+ **Note:** You can use `repr(ToolOutput)` to print your output with all the details.
112
+
113
+ ---
114
+
115
+ ## 🧨 Sync vs Async
116
+ | Tool | Style | Use case |
117
+ |--------------|---------|---------------------------------------------|
118
+ | `TheTool` | Sync | Simple scripts, sequential workflows |
119
+ | `AsyncTheTool` | Async | High-throughput apps, APIs, concurrent tasks |
120
+
121
+ ---
122
+
123
+ ## ⚡ Quick Start (Sync)
124
+
125
+ ```python
126
+ from openai import OpenAI
127
+ from texttools import TheTool
128
+
129
+ client = OpenAI(base_url = "your_url", API_KEY = "your_api_key")
130
+ model = "model_name"
131
+
132
+ the_tool = TheTool(client=client, model=model)
133
+
134
+ detection = the_tool.is_question("Is this project open source?")
135
+ print(repr(detection))
136
+ ```
137
+
138
+ ---
139
+
140
+ ## ⚡ Quick Start (Async)
141
+
142
+ ```python
143
+ import asyncio
144
+ from openai import AsyncOpenAI
145
+ from texttools import AsyncTheTool
146
+
147
+ async def main():
148
+ async_client = AsyncOpenAI(base_url="your_url", api_key="your_api_key")
149
+ model = "model_name"
150
+
151
+ async_the_tool = AsyncTheTool(client=async_client, model=model)
152
+
153
+ translation_task = async_the_tool.translate("سلام، حالت چطوره؟", target_language="English")
154
+ keywords_task = async_the_tool.extract_keywords("Tomorrow, we will be dead by the car crash")
155
+
156
+ (translation, keywords) = await asyncio.gather(translation_task, keywords_task)
157
+ print(repr(translation))
158
+ print(repr(keywords))
159
+
160
+ asyncio.run(main())
161
+ ```
162
+
163
+ ---
164
+
165
+ ## 👍 Use Cases
166
+
167
+ Use **TextTools** when you need to:
168
+
169
+ - 🔍 **Classify** large datasets quickly without model training
170
+ - 🌍 **Translate** and process multilingual corpora with ease
171
+ - 🧩 **Integrate** LLMs into production pipelines (structured outputs)
172
+ - 📊 **Analyze** large text collections using embeddings and categorization
173
+
174
+ ---
175
+
176
+ ## 📚 Batch Processing
177
+
178
+ Process large datasets efficiently using OpenAI's batch API.
179
+
180
+ ## ⚡ Quick Start (Batch Runner)
181
+
182
+ ```python
183
+ from pydantic import BaseModel
184
+ from texttools import BatchRunner, BatchConfig
185
+
186
+ config = BatchConfig(
187
+ system_prompt="Extract entities from the text",
188
+ job_name="entity_extraction",
189
+ input_data_path="data.json",
190
+ output_data_filename="results.json",
191
+ model="gpt-4o-mini"
192
+ )
193
+
194
+ class Output(BaseModel):
195
+ entities: list[str]
196
+
197
+ runner = BatchRunner(config, output_model=Output)
198
+ runner.run()
199
+ ```
200
+
201
+ ---
202
+
203
+ ## 🤝 Contributing
204
+
205
+ Contributions are welcome!
206
+ Feel free to **open issues, suggest new features, or submit pull requests**.
207
+
208
+ ---
209
+
210
+ ## 🌿 License
211
+
212
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
@@ -0,0 +1,34 @@
1
+ hamtaa_texttools-1.2.0.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
2
+ texttools/__init__.py,sha256=4z7wInlrgbGSlWlXHQNeZMCGQH1sN2xtARsbgLHOLd8,283
3
+ texttools/models.py,sha256=5eT2cSrFq8Xa38kANznV7gbi7lwB2PoDxciLKTpsd6c,2516
4
+ texttools/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ texttools/batch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ texttools/batch/config.py,sha256=GDDXuhRZ_bOGVwSIlU4tWP247tx1_A7qzLJn7VqDyLU,1050
7
+ texttools/batch/manager.py,sha256=XZtf8UkdClfQlnRKne4nWEcFvdSKE67EamEePKy7jwI,8730
8
+ texttools/batch/runner.py,sha256=9qxXIMfYRXW5SXDqqKtRr61rnQdYZkbCGqKImhSrY6I,9923
9
+ texttools/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ texttools/core/engine.py,sha256=WhEtxZjYEbCnD0gExGRR4eSmAY4J05E9csovt2Qqlm8,9281
11
+ texttools/core/exceptions.py,sha256=6SDjUL1rmd3ngzD3ytF4LyTRj3bQMSFR9ECrLoqXXHw,395
12
+ texttools/core/internal_models.py,sha256=aExdLvhXhSev8NY1kuAJckeXdFBEisQtKZPxybd3rW8,1703
13
+ texttools/core/operators/async_operator.py,sha256=wFs7eZ9QJrL0jBOu00YffgfPnIrCSavNjecSorXh-mE,6452
14
+ texttools/core/operators/sync_operator.py,sha256=NaUS-aLh3y0QNMiKut4qtcSZKYXbuPbw0o2jvPsYKdY,6357
15
+ texttools/prompts/categorize.yaml,sha256=42Rp3SgVHaDLKrJ27_uK788LiQud0pOXJthz4r0a40Y,1214
16
+ texttools/prompts/check_fact.yaml,sha256=zWFQDRhEE1ij9wSeeenS9YSTM-bY5zzUaG390zUgmcs,714
17
+ texttools/prompts/extract_entities.yaml,sha256=_zYKHNJDIzVDI_-TnwFCKyMs-XLM5igvmWhvSTc3INQ,637
18
+ texttools/prompts/extract_keywords.yaml,sha256=1o4u3uwzapNtB1BUpNIRL5qtrwjW0Yhvyq0TZJiafdg,3272
19
+ texttools/prompts/is_question.yaml,sha256=jnPARd2ZiulLzHW_r4WAsz3sOryfz6Gy5-yYXp-2hd0,496
20
+ texttools/prompts/merge_questions.yaml,sha256=l9Q2OEjPp3SDkxbq3zZCj2ZmXacWSnmYMpUr3l6r5yE,1816
21
+ texttools/prompts/propositionize.yaml,sha256=nbGAfbm1-2Hoc0JLtqZi-S7VHQfnMmuTKI7dZeBxQW0,1403
22
+ texttools/prompts/rewrite.yaml,sha256=klEm8MqXK-Bo8RsS5R9KLMT0zlD-BKo_G6tz9lpAcEY,5420
23
+ texttools/prompts/run_custom.yaml,sha256=IETY9H0wPGWIIzcnupfbwwKQblwZrbYAxB754W9MhgU,125
24
+ texttools/prompts/subject_to_question.yaml,sha256=AK16pZW9HUppIF8JBSEenbUNOU3aqeVV781_WUXnLqk,1160
25
+ texttools/prompts/summarize.yaml,sha256=rPh060Bx_yI1W2JNg-nr83LUk9itatYLKM8ciH2pOvg,486
26
+ texttools/prompts/text_to_question.yaml,sha256=pUwPgK9l5f8S4E5fCht9JY7PFVK2aY1InPfASr7R5o4,1017
27
+ texttools/prompts/translate.yaml,sha256=Dd5bs3O8SI-FlVSwHMYGeEjMmdOWeRlcfBHkhixCx7c,665
28
+ texttools/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
+ texttools/tools/async_tools.py,sha256=DonASaaOPbWp7Gh1UY4RlP3yPoYTuhJtVmLns8KYupE,42949
30
+ texttools/tools/sync_tools.py,sha256=y4nMlabgvRapb-YFoiGA5-5HflKrRHttiWSHpkg9tug,42742
31
+ hamtaa_texttools-1.2.0.dist-info/METADATA,sha256=vN4XmIWdH6mdAGfgSkjRdQLEoFNzbhhH32jOyEv9H6w,7846
32
+ hamtaa_texttools-1.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
33
+ hamtaa_texttools-1.2.0.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
34
+ hamtaa_texttools-1.2.0.dist-info/RECORD,,
texttools/__init__.py CHANGED
@@ -1,7 +1,7 @@
1
- from .batch.batch_runner import BatchJobRunner
2
- from .batch.batch_config import BatchConfig
3
- from .tools.sync_tools import TheTool
1
+ from .batch.config import BatchConfig
2
+ from .batch.runner import BatchRunner
3
+ from .models import CategoryTree
4
4
  from .tools.async_tools import AsyncTheTool
5
- from .tools.internals.models import CategoryTree
5
+ from .tools.sync_tools import TheTool
6
6
 
7
- __all__ = ["TheTool", "AsyncTheTool", "BatchJobRunner", "BatchConfig", "CategoryTree"]
7
+ __all__ = ["TheTool", "AsyncTheTool", "CategoryTree", "BatchRunner", "BatchConfig"]
File without changes
@@ -1,7 +1,21 @@
1
- from dataclasses import dataclass
2
1
  from collections.abc import Callable
2
+ from dataclasses import dataclass
3
+ from typing import Any
4
+
3
5
 
4
- from texttools.batch.internals.utils import import_data, export_data
6
+ def export_data(data) -> list[dict[str, str]]:
7
+ """
8
+ Produces a structure of the following form from an initial data structure:
9
+ [{"id": str, "text": str},...]
10
+ """
11
+ return data
12
+
13
+
14
+ def import_data(data) -> Any:
15
+ """
16
+ Takes the output and adds and aggregates it to the original structure.
17
+ """
18
+ return data
5
19
 
6
20
 
7
21
  @dataclass
@@ -1,12 +1,12 @@
1
1
  import json
2
+ import logging
2
3
  import uuid
3
4
  from pathlib import Path
4
5
  from typing import Any, Type, TypeVar
5
- import logging
6
6
 
7
- from pydantic import BaseModel
8
7
  from openai import OpenAI
9
8
  from openai.lib._pydantic import to_strict_json_schema
9
+ from pydantic import BaseModel
10
10
 
11
11
  # Base Model type for output models
12
12
  T = TypeVar("T", bound=BaseModel)
@@ -1,17 +1,18 @@
1
1
  import json
2
+ import logging
2
3
  import os
3
4
  import time
4
5
  from pathlib import Path
5
6
  from typing import Any, Type, TypeVar
6
- import logging
7
7
 
8
8
  from dotenv import load_dotenv
9
9
  from openai import OpenAI
10
10
  from pydantic import BaseModel
11
11
 
12
- from texttools.batch.internals.batch_manager import BatchManager
13
- from texttools.batch.batch_config import BatchConfig
14
- from texttools.tools.internals.models import StrOutput
12
+ from ..core.exceptions import TextToolsError
13
+ from ..core.internal_models import Str
14
+ from .config import BatchConfig
15
+ from .manager import BatchManager
15
16
 
16
17
  # Base Model type for output models
17
18
  T = TypeVar("T", bound=BaseModel)
@@ -19,30 +20,34 @@ T = TypeVar("T", bound=BaseModel)
19
20
  logger = logging.getLogger("texttools.batch_runner")
20
21
 
21
22
 
22
- class BatchJobRunner:
23
+ class BatchRunner:
23
24
  """
24
25
  Handles running batch jobs using a batch manager and configuration.
25
26
  """
26
27
 
27
28
  def __init__(
28
- self, config: BatchConfig = BatchConfig(), output_model: Type[T] = StrOutput
29
+ self, config: BatchConfig = BatchConfig(), output_model: Type[T] = Str
29
30
  ):
30
- self._config = config
31
- self._system_prompt = config.system_prompt
32
- self._job_name = config.job_name
33
- self._input_data_path = config.input_data_path
34
- self._output_data_filename = config.output_data_filename
35
- self._model = config.model
36
- self._output_model = output_model
37
- self._manager = self._init_manager()
38
- self._data = self._load_data()
39
- self._parts: list[list[dict[str, Any]]] = []
40
- # Map part index to job name
41
- self._part_idx_to_job_name: dict[int, str] = {}
42
- # Track retry attempts per part
43
- self._part_attempts: dict[int, int] = {}
44
- self._partition_data()
45
- Path(self._config.BASE_OUTPUT_DIR).mkdir(parents=True, exist_ok=True)
31
+ try:
32
+ self._config = config
33
+ self._system_prompt = config.system_prompt
34
+ self._job_name = config.job_name
35
+ self._input_data_path = config.input_data_path
36
+ self._output_data_filename = config.output_data_filename
37
+ self._model = config.model
38
+ self._output_model = output_model
39
+ self._manager = self._init_manager()
40
+ self._data = self._load_data()
41
+ self._parts: list[list[dict[str, Any]]] = []
42
+ # Map part index to job name
43
+ self._part_idx_to_job_name: dict[int, str] = {}
44
+ # Track retry attempts per part
45
+ self._part_attempts: dict[int, int] = {}
46
+ self._partition_data()
47
+ Path(self._config.BASE_OUTPUT_DIR).mkdir(parents=True, exist_ok=True)
48
+
49
+ except Exception as e:
50
+ raise TextToolsError(f"Batch runner initialization failed: {e}")
46
51
 
47
52
  def _init_manager(self) -> BatchManager:
48
53
  load_dotenv()
@@ -162,56 +167,62 @@ class BatchJobRunner:
162
167
 
163
168
  Submits jobs, monitors progress, handles retries, and saves results.
164
169
  """
165
- # Submit all jobs up-front for concurrent execution
166
- self._submit_all_jobs()
167
- pending_parts: set[int] = set(self._part_idx_to_job_name.keys())
168
- logger.info(f"Pending parts: {sorted(pending_parts)}")
169
- # Polling loop
170
- while pending_parts:
171
- finished_this_round: list[int] = []
172
- for part_idx in list(pending_parts):
173
- job_name = self._part_idx_to_job_name[part_idx]
174
- status = self._manager.check_status(job_name=job_name)
175
- logger.info(f"Status for {job_name}: {status}")
176
- if status == "completed":
177
- logger.info(
178
- f"Job completed. Fetching results for part {part_idx + 1}..."
179
- )
180
- output_data, log = self._manager.fetch_results(
181
- job_name=job_name, remove_cache=False
182
- )
183
- output_data = self._config.import_function(output_data)
184
- self._save_results(output_data, log, part_idx)
185
- logger.info(f"Fetched and saved results for part {part_idx + 1}.")
186
- finished_this_round.append(part_idx)
187
- elif status == "failed":
188
- attempt = self._part_attempts.get(part_idx, 0) + 1
189
- self._part_attempts[part_idx] = attempt
190
- if attempt <= self._config.max_retries:
170
+ try:
171
+ # Submit all jobs up-front for concurrent execution
172
+ self._submit_all_jobs()
173
+ pending_parts: set[int] = set(self._part_idx_to_job_name.keys())
174
+ logger.info(f"Pending parts: {sorted(pending_parts)}")
175
+ # Polling loop
176
+ while pending_parts:
177
+ finished_this_round: list[int] = []
178
+ for part_idx in list(pending_parts):
179
+ job_name = self._part_idx_to_job_name[part_idx]
180
+ status = self._manager.check_status(job_name=job_name)
181
+ logger.info(f"Status for {job_name}: {status}")
182
+ if status == "completed":
191
183
  logger.info(
192
- f"Job {job_name} failed (attempt {attempt}). Retrying after short backoff..."
184
+ f"Job completed. Fetching results for part {part_idx + 1}..."
193
185
  )
194
- self._manager._clear_state(job_name)
195
- time.sleep(10)
196
- payload = self._to_manager_payload(self._parts[part_idx])
197
- new_job_name = (
198
- f"{self._job_name}_part_{part_idx + 1}_retry_{attempt}"
186
+ output_data, log = self._manager.fetch_results(
187
+ job_name=job_name, remove_cache=False
199
188
  )
200
- self._manager.start(payload, job_name=new_job_name)
201
- self._part_idx_to_job_name[part_idx] = new_job_name
202
- else:
189
+ output_data = self._config.import_function(output_data)
190
+ self._save_results(output_data, log, part_idx)
203
191
  logger.info(
204
- f"Job {job_name} failed after {attempt - 1} retries. Marking as failed."
192
+ f"Fetched and saved results for part {part_idx + 1}."
205
193
  )
206
194
  finished_this_round.append(part_idx)
207
- else:
208
- # Still running or queued
209
- continue
210
- # Remove finished parts
211
- for part_idx in finished_this_round:
212
- pending_parts.discard(part_idx)
213
- if pending_parts:
214
- logger.info(
215
- f"Waiting {self._config.poll_interval_seconds}s before next status check for parts: {sorted(pending_parts)}"
216
- )
217
- time.sleep(self._config.poll_interval_seconds)
195
+ elif status == "failed":
196
+ attempt = self._part_attempts.get(part_idx, 0) + 1
197
+ self._part_attempts[part_idx] = attempt
198
+ if attempt <= self._config.max_retries:
199
+ logger.info(
200
+ f"Job {job_name} failed (attempt {attempt}). Retrying after short backoff..."
201
+ )
202
+ self._manager._clear_state(job_name)
203
+ time.sleep(10)
204
+ payload = self._to_manager_payload(self._parts[part_idx])
205
+ new_job_name = (
206
+ f"{self._job_name}_part_{part_idx + 1}_retry_{attempt}"
207
+ )
208
+ self._manager.start(payload, job_name=new_job_name)
209
+ self._part_idx_to_job_name[part_idx] = new_job_name
210
+ else:
211
+ logger.info(
212
+ f"Job {job_name} failed after {attempt - 1} retries. Marking as failed."
213
+ )
214
+ finished_this_round.append(part_idx)
215
+ else:
216
+ # Still running or queued
217
+ continue
218
+ # Remove finished parts
219
+ for part_idx in finished_this_round:
220
+ pending_parts.discard(part_idx)
221
+ if pending_parts:
222
+ logger.info(
223
+ f"Waiting {self._config.poll_interval_seconds}s before next status check for parts: {sorted(pending_parts)}"
224
+ )
225
+ time.sleep(self._config.poll_interval_seconds)
226
+
227
+ except Exception as e:
228
+ raise TextToolsError(f"Batch job execution failed: {e}")
File without changes