hamtaa-texttools 0.1.44__py3-none-any.whl → 0.1.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hamtaa-texttools might be problematic. Click here for more details.

@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hamtaa-texttools
3
- Version: 0.1.44
3
+ Version: 0.1.46
4
4
  Summary: A set of high-level NLP tools
5
5
  Author: Tohidi, Montazer, Givechi, Mousavinezhad
6
6
  Requires-Python: >=3.8
7
7
  Description-Content-Type: text/markdown
8
- Requires-Dist: openai>=1.97.0
9
- Requires-Dist: numpy>=1.26.4
8
+ Requires-Dist: openai==1.97.1
9
+ Requires-Dist: numpy==1.26.4
10
10
 
11
11
  # Text Tools
12
12
 
@@ -1,4 +1,4 @@
1
- texttools/__init__.py,sha256=cI10Q_zaM9DPUCVOM79gZceuyt6Pjgpj3R-AG7xgUM8,778
1
+ texttools/__init__.py,sha256=ZaopVfEJlp9n9mgtdYC0JcpRwAMuf99lUD8VsgemT20,784
2
2
  texttools/base/__init__.py,sha256=KUGm-Oe0BxlrRhPS-Jm2q1NCmwX8MdtZtloia7bcLaM,189
3
3
  texttools/base/base_categorizer.py,sha256=ojup94iXLxh92TjiJmrFXeRbsWKlon7PPAqez96B1bs,1130
4
4
  texttools/base/base_keyword_extractor.py,sha256=uKpxb3xI-sim-vXWe1R4_36QRhSNsWDR4IuVdpkZMME,868
@@ -11,9 +11,6 @@ texttools/base/base_router.py,sha256=pFDjIXFqAhPiS9Onu5py_GxOq8geDGJDQh6k6IhCkvw
11
11
  texttools/base/base_summarizer.py,sha256=7NAilhUPs6ZUwkBpTtXAj6n2XxQH1w6SOolf3gQX2gc,1627
12
12
  texttools/base/base_task_performer.py,sha256=3-6qshkie50S7pRG4WHRNC_RdUbSmHOPKW56CD92-rM,1852
13
13
  texttools/base/base_translator.py,sha256=BoOxqaoPoUs8t1O3m2yL9pQa5iwisl097immTVcGZoE,1020
14
- texttools/batch_manager/__init__.py,sha256=3ZkxA395lRD4gNxJ1vp0fNuz_XuBr50GoP51rrwQ0Ks,87
15
- texttools/batch_manager/batch_manager.py,sha256=jAmKskL3OTYwwsO1mWsWAB3VxMlOF07c2GW1Ev83ZhY,9283
16
- texttools/batch_manager/batch_runner.py,sha256=kW0IPauI11xpssApMA7b4XI19FePImywym3V7tBaa-o,7404
17
14
  texttools/formatter/__init__.py,sha256=KHz2tFZctbit_HVbQNCTMi46JzmKlg-uB6Ost63IpVU,46
18
15
  texttools/formatter/base.py,sha256=0fiM6E7NdJevAVpL6yyPaUZVJGKWxE3fr-Ay1oqgJqQ,879
19
16
  texttools/formatter/gemma3_formatter.py,sha256=AmdKBYLj6HMsI2DDX4KHNEEVYJmz_VVNUBOv8ScGjsY,1865
@@ -53,8 +50,12 @@ texttools/tools/summarizer/__init__.py,sha256=phrR7qO20CNhO3hjXQBzhTRVumdVdGSufm
53
50
  texttools/tools/summarizer/gemma_summarizer.py,sha256=ikhsBv7AiZD1dT_d12AyjXxojzSW92e2y5WjchI_3bE,4474
54
51
  texttools/tools/summarizer/llm_summerizer.py,sha256=-0rUKbSnl1aDeBfJ5DCSbIlwd2k-9qIaCKgoQJa0hWc,3412
55
52
  texttools/tools/translator/__init__.py,sha256=KO1m08J2BZwRqBGO9ICB4l4cnH1jfHLHL5HbgYFUWM8,72
56
- texttools/tools/translator/gemma_translator.py,sha256=57NMfJAZHQjZSr_eCBePE_Pnag8pu3O00Jicxhzn6Jc,7572
57
- hamtaa_texttools-0.1.44.dist-info/METADATA,sha256=OImC1zmuJh7p8SY3s3mhm8poOzYOuuqx6vjOeDy5O3k,1481
58
- hamtaa_texttools-0.1.44.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
59
- hamtaa_texttools-0.1.44.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
60
- hamtaa_texttools-0.1.44.dist-info/RECORD,,
53
+ texttools/tools/translator/gemma_translator.py,sha256=rbP0kgkhOiEPdHWgHQc7Lev7lrAIYqNb6t_OfZLp44E,7180
54
+ texttools/utils/flex_processor.py,sha256=Y44uTracvXUJiUm5hh57Uk0933RU9GTc3dN_1Bo_XQA,3214
55
+ texttools/utils/batch_manager/__init__.py,sha256=3ZkxA395lRD4gNxJ1vp0fNuz_XuBr50GoP51rrwQ0Ks,87
56
+ texttools/utils/batch_manager/batch_manager.py,sha256=jAmKskL3OTYwwsO1mWsWAB3VxMlOF07c2GW1Ev83ZhY,9283
57
+ texttools/utils/batch_manager/batch_runner.py,sha256=kW0IPauI11xpssApMA7b4XI19FePImywym3V7tBaa-o,7404
58
+ hamtaa_texttools-0.1.46.dist-info/METADATA,sha256=zZG-0IaOyeEFQO0rhBpT194Jsst-uuqRevcizIm8tiI,1481
59
+ hamtaa_texttools-0.1.46.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
+ hamtaa_texttools-0.1.46.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
61
+ hamtaa_texttools-0.1.46.dist-info/RECORD,,
texttools/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- from texttools.batch_manager import BatchJobRunner, SimpleBatchManager
1
+ from texttools.utils.batch_manager import BatchJobRunner, SimpleBatchManager
2
2
  from texttools.handlers import (
3
3
  NoOpResultHandler,
4
4
  PrintResultHandler,
@@ -1,5 +1,3 @@
1
- import json
2
- import re
3
1
  from typing import Any, Optional
4
2
 
5
3
  from openai import OpenAI
@@ -35,7 +33,7 @@ class GemmaTranslator(BaseTranslator):
35
33
  **client_kwargs: Any,
36
34
  ):
37
35
  super().__init__(handlers)
38
- self.client = client
36
+ self.client: OpenAI = client
39
37
  self.model = model
40
38
  self.temperature = temperature
41
39
  self.client_kwargs = client_kwargs
@@ -134,27 +132,16 @@ class GemmaTranslator(BaseTranslator):
134
132
  messages.append({"role": "user", "content": text_prompt})
135
133
 
136
134
  restructured = self.chat_formatter.format(messages=messages)
137
- completion = self.client.chat.completions.create(
135
+ completion = self.client.chat.completions.parse(
138
136
  model=self.model,
139
137
  messages=restructured,
140
- response_format={
141
- "type": "json_schema",
142
- "json_schema": {
143
- "name": "NER",
144
- "schema": PreprocessorOutput.model_json_schema(),
145
- },
146
- },
138
+ response_format=PreprocessorOutput,
147
139
  temperature=self.temperature,
148
- **self.client_kwargs,
140
+ extra_body=dict(guided_decoding_backend="auto") ** self.client_kwargs,
149
141
  )
150
- response = completion.choices[0].message.content
151
-
152
- # Remove Markdown-style triple backticks and any optional language tag like "json"
153
- if response.startswith("```"):
154
- response = re.sub(r"^```(?:json)?\s*|```$", "", response.strip())
155
-
156
- entities = json.loads(response)
142
+ message = completion.choices[0].message
157
143
 
144
+ entities = message.parsed
158
145
  return entities
159
146
 
160
147
  def translate(
@@ -189,7 +176,7 @@ class GemmaTranslator(BaseTranslator):
189
176
  temperature=self.temperature,
190
177
  **self.client_kwargs,
191
178
  )
192
- response = completion.choices[0].message.content.strip()
179
+ response = completion.choices[0].message.content
193
180
 
194
181
  self._dispatch(
195
182
  {
@@ -0,0 +1,78 @@
1
+ import random
2
+ import asyncio
3
+ from openai import OpenAI, RateLimitError, APIError
4
+ from typing import Optional
5
+ from pydantic import BaseModel, ValidationError
6
+ import httpx
7
+
8
+ # http_client = httpx()
9
+ # test_client = OpenAI(http_client=http_client)
10
+
11
+ async def flex_processing(
12
+ LLM_client: OpenAI,
13
+ system_prompt: str,
14
+ user_prompt: str,
15
+ output_model: Optional[BaseModel]=None,
16
+ prompt_cache_key: Optional[str]=None,
17
+ max_retries: int = 10,
18
+ base_delay: float = 2.0,
19
+ model_name: Optional[str] ="gpt-5-mini",
20
+ **client_kwargs):
21
+ """
22
+ Wrapper for flex processing with retry and exponential backoff.
23
+ Handles 429 'Resource Unavailable' errors gracefully.
24
+ """
25
+ for attempt in range(max_retries):
26
+ try:
27
+ request_kwargs = {
28
+ "model": model_name,
29
+ "messages": [
30
+ {"role": "system", "content": system_prompt},
31
+ {"role": "user", "content": user_prompt},
32
+ ],
33
+ "service_tier": "flex",
34
+ "timeout": 900.0,
35
+ **client_kwargs
36
+ }
37
+ if output_model:
38
+ request_kwargs["response_format"] = output_model
39
+ if prompt_cache_key:
40
+ request_kwargs["prompt_cache_key"] = prompt_cache_key
41
+
42
+ response = LLM_client.chat.completions.parse(**request_kwargs)
43
+ # response = self.client.chat.completions.parse(output_model)
44
+ content = response.choices[0].message.content
45
+ # ✅ Validate structured output if a model is provided
46
+ if output_model is not None:
47
+ try:
48
+ output_model.model_validate_json(content)
49
+ base_content = response.choices[0].message.parsed
50
+ # base_content = output_model(**content)
51
+ return base_content
52
+ except ValidationError as ve:
53
+ # Treat invalid output as retryable
54
+ wait_time = base_delay * (2 ** attempt) + random.uniform(0, 1)
55
+ print(
56
+ f"[Flex Retry] Attempt {attempt+1}/{max_retries} produced invalid structured output. "
57
+ f"Retrying in {wait_time:.2f}s... (ValidationError: {ve})"
58
+ )
59
+ await asyncio.sleep(wait_time)
60
+ continue
61
+ except (RateLimitError, APIError) as e:
62
+ wait_time = base_delay * (2 ** attempt) + random.uniform(0, 1)
63
+ print(
64
+ f"[Flex Retry] Attempt {attempt+1}/{max_retries} failed "
65
+ f"with error: {type(e).__name__} - {e}. "
66
+ f"Retrying in {wait_time:.2f}s..."
67
+ )
68
+ await asyncio.sleep(wait_time)
69
+
70
+ except Exception as e:
71
+ # Non-recoverable error: break out immediately
72
+ raise RuntimeError(
73
+ f"[Flex Processing] Unrecoverable error for prompt_key={prompt_cache_key}: {e}"
74
+ )
75
+
76
+ raise RuntimeError(
77
+ f"[Flex Processing] Exhausted {max_retries} retries for prompt_key={prompt_cache_key}"
78
+ )