hamtaa-texttools 1.1.17__py3-none-any.whl → 1.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/METADATA +1 -1
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/RECORD +16 -15
- texttools/__init__.py +1 -1
- texttools/batch/batch_runner.py +75 -64
- texttools/{tools/internals → internals}/async_operator.py +96 -48
- texttools/internals/exceptions.py +28 -0
- texttools/{tools/internals → internals}/models.py +2 -2
- texttools/internals/prompt_loader.py +80 -0
- texttools/{tools/internals → internals}/sync_operator.py +92 -47
- texttools/tools/async_tools.py +551 -341
- texttools/tools/sync_tools.py +548 -339
- texttools/tools/internals/prompt_loader.py +0 -56
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/WHEEL +0 -0
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/licenses/LICENSE +0 -0
- {hamtaa_texttools-1.1.17.dist-info → hamtaa_texttools-1.1.18.dist-info}/top_level.txt +0 -0
- /texttools/{tools/internals → internals}/formatters.py +0 -0
- /texttools/{tools/internals → internals}/operator_utils.py +0 -0
|
@@ -5,15 +5,21 @@ import logging
|
|
|
5
5
|
from openai import OpenAI
|
|
6
6
|
from pydantic import BaseModel
|
|
7
7
|
|
|
8
|
-
from texttools.
|
|
9
|
-
from texttools.
|
|
10
|
-
from texttools.
|
|
11
|
-
from texttools.
|
|
8
|
+
from texttools.internals.models import ToolOutput
|
|
9
|
+
from texttools.internals.operator_utils import OperatorUtils
|
|
10
|
+
from texttools.internals.formatters import Formatter
|
|
11
|
+
from texttools.internals.prompt_loader import PromptLoader
|
|
12
|
+
from texttools.internals.exceptions import (
|
|
13
|
+
TextToolsError,
|
|
14
|
+
LLMError,
|
|
15
|
+
ValidationError,
|
|
16
|
+
PromptError,
|
|
17
|
+
)
|
|
12
18
|
|
|
13
19
|
# Base Model type for output models
|
|
14
20
|
T = TypeVar("T", bound=BaseModel)
|
|
15
21
|
|
|
16
|
-
logger = logging.getLogger("texttools.
|
|
22
|
+
logger = logging.getLogger("texttools.sync_operator")
|
|
17
23
|
|
|
18
24
|
|
|
19
25
|
class Operator:
|
|
@@ -35,15 +41,33 @@ class Operator:
|
|
|
35
41
|
Calls OpenAI API for analysis using the configured prompt template.
|
|
36
42
|
Returns the analyzed content as a string.
|
|
37
43
|
"""
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
44
|
+
try:
|
|
45
|
+
analyze_prompt = prompt_configs["analyze_template"]
|
|
46
|
+
|
|
47
|
+
if not analyze_prompt:
|
|
48
|
+
raise PromptError("Analyze template is empty")
|
|
49
|
+
|
|
50
|
+
analyze_message = [OperatorUtils.build_user_message(analyze_prompt)]
|
|
51
|
+
completion = self._client.chat.completions.create(
|
|
52
|
+
model=self._model,
|
|
53
|
+
messages=analyze_message,
|
|
54
|
+
temperature=temperature,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
if not completion.choices:
|
|
58
|
+
raise LLMError("No choices returned from LLM")
|
|
59
|
+
|
|
60
|
+
analysis = completion.choices[0].message.content.strip()
|
|
61
|
+
|
|
62
|
+
if not analysis:
|
|
63
|
+
raise LLMError("Empty analysis response")
|
|
64
|
+
|
|
65
|
+
return analysis.strip()
|
|
66
|
+
|
|
67
|
+
except Exception as e:
|
|
68
|
+
if isinstance(e, (PromptError, LLMError)):
|
|
69
|
+
raise
|
|
70
|
+
raise LLMError(f"Analysis failed: {e}")
|
|
47
71
|
|
|
48
72
|
def _parse_completion(
|
|
49
73
|
self,
|
|
@@ -58,23 +82,35 @@ class Operator:
|
|
|
58
82
|
Parses a chat completion using OpenAI's structured output format.
|
|
59
83
|
Returns both the parsed object and the raw completion for logprobs.
|
|
60
84
|
"""
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
85
|
+
try:
|
|
86
|
+
request_kwargs = {
|
|
87
|
+
"model": self._model,
|
|
88
|
+
"messages": message,
|
|
89
|
+
"response_format": output_model,
|
|
90
|
+
"temperature": temperature,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if logprobs:
|
|
94
|
+
request_kwargs["logprobs"] = True
|
|
95
|
+
request_kwargs["top_logprobs"] = top_logprobs
|
|
96
|
+
if priority:
|
|
97
|
+
request_kwargs["extra_body"] = {"priority": priority}
|
|
98
|
+
completion = self._client.beta.chat.completions.parse(**request_kwargs)
|
|
99
|
+
|
|
100
|
+
if not completion.choices:
|
|
101
|
+
raise LLMError("No choices returned from LLM")
|
|
102
|
+
|
|
103
|
+
parsed = completion.choices[0].message.parsed
|
|
67
104
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
request_kwargs["top_logprobs"] = top_logprobs
|
|
105
|
+
if not parsed:
|
|
106
|
+
raise LLMError("Failed to parse LLM response")
|
|
71
107
|
|
|
72
|
-
|
|
73
|
-
request_kwargs["extra_body"] = {"priority": priority}
|
|
108
|
+
return parsed, completion
|
|
74
109
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
110
|
+
except Exception as e:
|
|
111
|
+
if isinstance(e, LLMError):
|
|
112
|
+
raise
|
|
113
|
+
raise LLMError(f"Completion failed: {e}")
|
|
78
114
|
|
|
79
115
|
def run(
|
|
80
116
|
self,
|
|
@@ -96,12 +132,13 @@ class Operator:
|
|
|
96
132
|
**extra_kwargs,
|
|
97
133
|
) -> ToolOutput:
|
|
98
134
|
"""
|
|
99
|
-
Execute the LLM pipeline with the given input text.
|
|
135
|
+
Execute the LLM pipeline with the given input text. (Sync)
|
|
100
136
|
"""
|
|
101
|
-
prompt_loader = PromptLoader()
|
|
102
|
-
formatter = Formatter()
|
|
103
|
-
output = ToolOutput()
|
|
104
137
|
try:
|
|
138
|
+
prompt_loader = PromptLoader()
|
|
139
|
+
formatter = Formatter()
|
|
140
|
+
output = ToolOutput()
|
|
141
|
+
|
|
105
142
|
# Prompt configs contain two keys: main_template and analyze template, both are string
|
|
106
143
|
prompt_configs = prompt_loader.load(
|
|
107
144
|
prompt_file=prompt_file,
|
|
@@ -140,6 +177,9 @@ class Operator:
|
|
|
140
177
|
|
|
141
178
|
messages = formatter.user_merge_format(messages)
|
|
142
179
|
|
|
180
|
+
if logprobs and (not isinstance(top_logprobs, int) or top_logprobs < 2):
|
|
181
|
+
raise ValueError("top_logprobs should be an integer greater than 1")
|
|
182
|
+
|
|
143
183
|
parsed, completion = self._parse_completion(
|
|
144
184
|
messages, output_model, temperature, logprobs, top_logprobs, priority
|
|
145
185
|
)
|
|
@@ -148,6 +188,15 @@ class Operator:
|
|
|
148
188
|
|
|
149
189
|
# Retry logic if validation fails
|
|
150
190
|
if validator and not validator(output.result):
|
|
191
|
+
if (
|
|
192
|
+
not isinstance(max_validation_retries, int)
|
|
193
|
+
or max_validation_retries < 1
|
|
194
|
+
):
|
|
195
|
+
raise ValueError(
|
|
196
|
+
"max_validation_retries should be a positive integer"
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
succeeded = False
|
|
151
200
|
for attempt in range(max_validation_retries):
|
|
152
201
|
logger.warning(
|
|
153
202
|
f"Validation failed, retrying for the {attempt + 1} time."
|
|
@@ -155,6 +204,7 @@ class Operator:
|
|
|
155
204
|
|
|
156
205
|
# Generate new temperature for retry
|
|
157
206
|
retry_temperature = OperatorUtils.get_retry_temp(temperature)
|
|
207
|
+
|
|
158
208
|
try:
|
|
159
209
|
parsed, completion = self._parse_completion(
|
|
160
210
|
messages,
|
|
@@ -162,28 +212,23 @@ class Operator:
|
|
|
162
212
|
retry_temperature,
|
|
163
213
|
logprobs,
|
|
164
214
|
top_logprobs,
|
|
215
|
+
priority=priority,
|
|
165
216
|
)
|
|
166
217
|
|
|
167
218
|
output.result = parsed.result
|
|
168
219
|
|
|
169
220
|
# Check if retry was successful
|
|
170
221
|
if validator(output.result):
|
|
171
|
-
|
|
172
|
-
f"Validation passed on retry attempt {attempt + 1}"
|
|
173
|
-
)
|
|
222
|
+
succeeded = True
|
|
174
223
|
break
|
|
175
|
-
else:
|
|
176
|
-
logger.warning(
|
|
177
|
-
f"Validation still failing after retry attempt {attempt + 1}"
|
|
178
|
-
)
|
|
179
224
|
|
|
180
|
-
except
|
|
225
|
+
except LLMError as e:
|
|
181
226
|
logger.error(f"Retry attempt {attempt + 1} failed: {e}")
|
|
182
|
-
# Continue to next retry attempt if this one fails
|
|
183
227
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
228
|
+
if not succeeded:
|
|
229
|
+
raise ValidationError(
|
|
230
|
+
f"Validation failed after {max_validation_retries} retries"
|
|
231
|
+
)
|
|
187
232
|
|
|
188
233
|
if logprobs:
|
|
189
234
|
output.logprobs = OperatorUtils.extract_logprobs(completion)
|
|
@@ -195,7 +240,7 @@ class Operator:
|
|
|
195
240
|
|
|
196
241
|
return output
|
|
197
242
|
|
|
243
|
+
except (PromptError, LLMError, ValidationError):
|
|
244
|
+
raise
|
|
198
245
|
except Exception as e:
|
|
199
|
-
|
|
200
|
-
output.errors.append(str(e))
|
|
201
|
-
return output
|
|
246
|
+
raise TextToolsError(f"Unexpected error in operator: {e}")
|