hamtaa-texttools 1.0.4__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hamtaa-texttools might be problematic. Click here for more details.

Files changed (32) hide show
  1. {hamtaa_texttools-1.0.4.dist-info → hamtaa_texttools-1.0.5.dist-info}/METADATA +192 -141
  2. hamtaa_texttools-1.0.5.dist-info/RECORD +30 -0
  3. {hamtaa_texttools-1.0.4.dist-info → hamtaa_texttools-1.0.5.dist-info}/licenses/LICENSE +20 -20
  4. {hamtaa_texttools-1.0.4.dist-info → hamtaa_texttools-1.0.5.dist-info}/top_level.txt +0 -0
  5. texttools/__init__.py +9 -9
  6. texttools/batch/__init__.py +4 -4
  7. texttools/batch/batch_manager.py +240 -240
  8. texttools/batch/batch_runner.py +212 -212
  9. texttools/formatters/base_formatter.py +33 -33
  10. texttools/formatters/{user_merge_formatter/user_merge_formatter.py → user_merge_formatter.py} +30 -30
  11. texttools/prompts/README.md +31 -31
  12. texttools/prompts/categorizer.yaml +28 -31
  13. texttools/prompts/custom_tool.yaml +7 -0
  14. texttools/prompts/keyword_extractor.yaml +18 -14
  15. texttools/prompts/ner_extractor.yaml +20 -21
  16. texttools/prompts/question_detector.yaml +13 -14
  17. texttools/prompts/question_generator.yaml +19 -22
  18. texttools/prompts/question_merger.yaml +45 -48
  19. texttools/prompts/rewriter.yaml +111 -0
  20. texttools/prompts/subject_question_generator.yaml +22 -26
  21. texttools/prompts/summarizer.yaml +13 -11
  22. texttools/prompts/translator.yaml +14 -14
  23. texttools/tools/__init__.py +4 -4
  24. texttools/tools/async_the_tool.py +277 -263
  25. texttools/tools/internals/async_operator.py +297 -288
  26. texttools/tools/internals/operator.py +295 -306
  27. texttools/tools/internals/output_models.py +52 -62
  28. texttools/tools/internals/prompt_loader.py +76 -82
  29. texttools/tools/the_tool.py +501 -400
  30. hamtaa_texttools-1.0.4.dist-info/RECORD +0 -29
  31. texttools/prompts/question_rewriter.yaml +0 -46
  32. {hamtaa_texttools-1.0.4.dist-info → hamtaa_texttools-1.0.5.dist-info}/WHEEL +0 -0
@@ -1,288 +1,297 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- import math
5
- import re
6
- from typing import Any, Literal, Optional, TypeVar
7
-
8
- from openai import AsyncOpenAI
9
- from pydantic import BaseModel
10
-
11
- from texttools.formatters.user_merge_formatter.user_merge_formatter import (
12
- UserMergeFormatter,
13
- )
14
- from texttools.tools.internals.prompt_loader import PromptLoader
15
-
16
- # Base Model type for output models
17
- T = TypeVar("T", bound=BaseModel)
18
-
19
-
20
- class AsyncOperator:
21
- """
22
- Async version of Operator.
23
-
24
- Behaves like the synchronous Operator but uses AsyncOpenAI and async/await.
25
- """
26
-
27
- def __init__(
28
- self,
29
- client: AsyncOpenAI,
30
- *,
31
- model: str,
32
- temperature: float = 0.0,
33
- **client_kwargs: Any,
34
- ):
35
- self.client: AsyncOpenAI = client
36
- self.model = model
37
- self.temperature = temperature
38
- self.client_kwargs = client_kwargs
39
-
40
- def _build_user_message(self, prompt: str) -> dict[str, str]:
41
- return {"role": "user", "content": prompt}
42
-
43
- async def _analysis_completion(self, analyze_message: list[dict[str, str]]) -> str:
44
- try:
45
- completion = await self.client.chat.completions.create(
46
- model=self.model,
47
- messages=analyze_message,
48
- temperature=self.temperature,
49
- **self.client_kwargs,
50
- )
51
- analysis = completion.choices[0].message.content.strip()
52
- return analysis
53
-
54
- except Exception as e:
55
- print(f"[ERROR] Analysis failed: {e}")
56
- raise
57
-
58
- async def _analyze(self, prompt_configs: dict[str, str]) -> str:
59
- analyze_prompt = prompt_configs["analyze_template"]
60
- analyze_message = [self._build_user_message(analyze_prompt)]
61
- analysis = await self._analysis_completion(analyze_message)
62
-
63
- return analysis
64
-
65
- async def _parse_completion(
66
- self,
67
- message: list[dict[str, str]],
68
- output_model: T,
69
- logprobs: bool = False,
70
- top_logprobs: int = 3,
71
- ) -> tuple[T, Any]:
72
- try:
73
- request_kwargs = {
74
- "model": self.model,
75
- "messages": message,
76
- "response_format": output_model,
77
- "temperature": self.temperature,
78
- **self.client_kwargs,
79
- }
80
- if logprobs:
81
- request_kwargs["logprobs"] = True
82
- request_kwargs["top_logprobs"] = top_logprobs
83
-
84
- completion = await self.client.beta.chat.completions.parse(**request_kwargs)
85
- parsed = completion.choices[0].message.parsed
86
- return parsed, completion
87
-
88
- except Exception as e:
89
- print(f"[ERROR] Failed to parse completion: {e}")
90
- raise
91
-
92
- def _clean_json_response(self, response: str) -> str:
93
- """
94
- Clean JSON response by removing code block markers and whitespace.
95
- Handles cases like:
96
- - ```json{"result": "value"}```
97
- """
98
- cleaned = response.strip()
99
-
100
- # Remove ```json marker
101
- if cleaned.startswith("```json"):
102
- cleaned = cleaned[7:]
103
-
104
- # Remove trailing ```
105
- if cleaned.endswith("```"):
106
- cleaned = cleaned[:-3]
107
-
108
- return cleaned.strip()
109
-
110
- def _convert_to_output_model(self, response_string: str, output_model: T) -> T:
111
- """
112
- Convert a JSON response string to output model.
113
-
114
- Args:
115
- response_string: The JSON string (may contain code block markers)
116
- output_model: Your Pydantic output model class (e.g., StrOutput, ListStrOutput)
117
-
118
- Returns:
119
- Instance of your output model
120
- """
121
- try:
122
- # Clean the response string
123
- cleaned_json = self._clean_json_response(response_string)
124
-
125
- # Fix Python-style booleans
126
- cleaned_json = cleaned_json.replace("False", "false").replace(
127
- "True", "true"
128
- )
129
-
130
- # Convert string to Python dictionary
131
- response_dict = json.loads(cleaned_json)
132
-
133
- # Convert dictionary to output model
134
- return output_model(**response_dict)
135
-
136
- except json.JSONDecodeError as e:
137
- raise ValueError(
138
- f"Failed to parse JSON response: {e}\nResponse: {response_string}"
139
- )
140
- except Exception as e:
141
- raise ValueError(f"Failed to convert to output model: {e}")
142
-
143
- async def _vllm_completion(
144
- self,
145
- message: list[dict[str, str]],
146
- output_model: T,
147
- logprobs: bool = False,
148
- top_logprobs: int = 3,
149
- ) -> tuple[T, Any]:
150
- try:
151
- json_schema = output_model.model_json_schema()
152
-
153
- # Build kwargs dynamically
154
- request_kwargs = {
155
- "model": self.model,
156
- "messages": message,
157
- "extra_body": {"guided_json": json_schema},
158
- "temperature": self.temperature,
159
- **self.client_kwargs,
160
- }
161
-
162
- if logprobs:
163
- request_kwargs["logprobs"] = True
164
- request_kwargs["top_logprobs"] = top_logprobs
165
-
166
- completion = await self.client.chat.completions.create(**request_kwargs)
167
- response = completion.choices[0].message.content
168
-
169
- # Convert the string response to output model
170
- parsed = self._convert_to_output_model(response, output_model)
171
-
172
- return parsed, completion
173
-
174
- except Exception as e:
175
- print(f"[ERROR] Failed to get vLLM structured output: {e}")
176
- raise
177
-
178
- def _extract_logprobs(self, completion: dict):
179
- logprobs_data = []
180
- ignore_pattern = re.compile(r'^(result|[\s\[\]\{\}",:]+)$')
181
-
182
- for choice in completion.choices:
183
- if not getattr(choice, "logprobs", None):
184
- continue
185
-
186
- for logprob_item in choice.logprobs.content:
187
- if ignore_pattern.match(logprob_item.token):
188
- continue
189
- token_entry = {
190
- "token": logprob_item.token,
191
- "prob": round(math.exp(logprob_item.logprob), 8),
192
- "top_alternatives": [],
193
- }
194
- for alt in logprob_item.top_logprobs:
195
- if ignore_pattern.match(alt.token):
196
- continue
197
- token_entry["top_alternatives"].append(
198
- {
199
- "token": alt.token,
200
- "prob": round(math.exp(alt.logprob), 8),
201
- }
202
- )
203
- logprobs_data.append(token_entry)
204
-
205
- return logprobs_data
206
-
207
- async def run(
208
- self,
209
- input_text: str,
210
- prompt_file: str,
211
- output_model: T,
212
- with_analysis: bool = False,
213
- use_modes: bool = False,
214
- mode: str = "",
215
- resp_format: Literal["vllm", "parse"] = "parse",
216
- output_lang: Optional[str] = None,
217
- logprobs: bool = False,
218
- top_logprobs: int = 3,
219
- **extra_kwargs,
220
- ) -> dict[str, Any]:
221
- """
222
- Execute the async LLM pipeline with the given input text.
223
-
224
- Args:
225
- input_text: The text to process (will be stripped of whitespace)
226
- **extra_kwargs: Additional variables to inject into prompt templates
227
-
228
- Returns:
229
- Dictionary containing the parsed result and optional analysis
230
- """
231
- prompt_loader = PromptLoader()
232
- formatter = UserMergeFormatter()
233
-
234
- try:
235
- cleaned_text = input_text.strip()
236
-
237
- prompt_configs = prompt_loader.load_prompts(
238
- prompt_file,
239
- use_modes,
240
- mode,
241
- cleaned_text,
242
- **extra_kwargs,
243
- )
244
-
245
- messages: list[dict[str, str]] = []
246
-
247
- if with_analysis:
248
- analysis = await self._analyze(prompt_configs)
249
- messages.append(
250
- self._build_user_message(f"Based on this analysis: {analysis}")
251
- )
252
-
253
- if output_lang:
254
- messages.append(
255
- self._build_user_message(
256
- f"Respond only in the {output_lang} language."
257
- )
258
- )
259
-
260
- messages.append(self._build_user_message(prompt_configs["main_template"]))
261
-
262
- messages = formatter.format(messages)
263
-
264
- if resp_format == "vllm":
265
- parsed, completion = await self._vllm_completion(
266
- messages, output_model, logprobs, top_logprobs
267
- )
268
- elif resp_format == "parse":
269
- parsed, completion = await self._parse_completion(
270
- messages, output_model, logprobs, top_logprobs
271
- )
272
- else:
273
- raise ValueError(f"Unknown resp_format: {resp_format}")
274
-
275
- results = {"result": parsed.result}
276
-
277
- if logprobs:
278
- results["logprobs"] = self._extract_logprobs(completion)
279
-
280
- if with_analysis:
281
- results["analysis"] = analysis
282
-
283
- return results
284
-
285
- except Exception as e:
286
- # Print error clearly and re-raise for the caller to handle
287
- print(f"[ERROR] Async operation failed: {e}")
288
- raise
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import math
5
+ import re
6
+ from typing import Any, Literal, Optional, TypeVar
7
+
8
+ from openai import AsyncOpenAI
9
+ from pydantic import BaseModel
10
+
11
+ from texttools.formatters.user_merge_formatter import (
12
+ UserMergeFormatter,
13
+ )
14
+ from texttools.tools.internals.prompt_loader import PromptLoader
15
+
16
+ # Base Model type for output models
17
+ T = TypeVar("T", bound=BaseModel)
18
+
19
+
20
+ class AsyncOperator:
21
+ """
22
+ Async version of Operator.
23
+
24
+ Behaves like the synchronous Operator but uses AsyncOpenAI and async/await.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ client: AsyncOpenAI,
30
+ *,
31
+ model: str,
32
+ temperature: float = 0.0,
33
+ **client_kwargs: Any,
34
+ ):
35
+ self.client: AsyncOpenAI = client
36
+ self.model = model
37
+ self.temperature = temperature
38
+ self.client_kwargs = client_kwargs
39
+
40
+ def _build_user_message(self, prompt: str) -> dict[str, str]:
41
+ return {"role": "user", "content": prompt}
42
+
43
+ async def _analysis_completion(self, analyze_message: list[dict[str, str]]) -> str:
44
+ try:
45
+ completion = await self.client.chat.completions.create(
46
+ model=self.model,
47
+ messages=analyze_message,
48
+ temperature=self.temperature,
49
+ **self.client_kwargs,
50
+ )
51
+ analysis = completion.choices[0].message.content.strip()
52
+ return analysis
53
+
54
+ except Exception as e:
55
+ print(f"[ERROR] Analysis failed: {e}")
56
+ raise
57
+
58
+ async def _analyze(self, prompt_configs: dict[str, str]) -> str:
59
+ analyze_prompt = prompt_configs["analyze_template"]
60
+ analyze_message = [self._build_user_message(analyze_prompt)]
61
+ analysis = await self._analysis_completion(analyze_message)
62
+
63
+ return analysis
64
+
65
+ async def _parse_completion(
66
+ self,
67
+ message: list[dict[str, str]],
68
+ output_model: T,
69
+ logprobs: bool = False,
70
+ top_logprobs: int = 3,
71
+ max_tokens: int | None = None,
72
+ ) -> tuple[T, Any]:
73
+ try:
74
+ request_kwargs = {
75
+ "model": self.model,
76
+ "messages": message,
77
+ "response_format": output_model,
78
+ "temperature": self.temperature,
79
+ **self.client_kwargs,
80
+ }
81
+
82
+ if max_tokens is not None:
83
+ request_kwargs["max_tokens"] = max_tokens
84
+
85
+ if logprobs:
86
+ request_kwargs["logprobs"] = True
87
+ request_kwargs["top_logprobs"] = top_logprobs
88
+
89
+ completion = await self.client.beta.chat.completions.parse(**request_kwargs)
90
+ parsed = completion.choices[0].message.parsed
91
+ return parsed, completion
92
+
93
+ except Exception as e:
94
+ print(f"[ERROR] Failed to parse completion: {e}")
95
+ raise
96
+
97
+ def _clean_json_response(self, response: str) -> str:
98
+ """
99
+ Clean JSON response by removing code block markers and whitespace.
100
+ Handles cases like:
101
+ - ```json{"result": "value"}```
102
+ """
103
+ cleaned = response.strip()
104
+
105
+ # Remove ```json marker
106
+ if cleaned.startswith("```json"):
107
+ cleaned = cleaned[7:]
108
+
109
+ # Remove trailing ```
110
+ if cleaned.endswith("```"):
111
+ cleaned = cleaned[:-3]
112
+
113
+ return cleaned.strip()
114
+
115
+ def _convert_to_output_model(self, response_string: str, output_model: T) -> T:
116
+ """
117
+ Convert a JSON response string to output model.
118
+
119
+ Args:
120
+ response_string: The JSON string (may contain code block markers)
121
+ output_model: Your Pydantic output model class (e.g., StrOutput, ListStrOutput)
122
+
123
+ Returns:
124
+ Instance of your output model
125
+ """
126
+ try:
127
+ # Clean the response string
128
+ cleaned_json = self._clean_json_response(response_string)
129
+
130
+ # Fix Python-style booleans
131
+ cleaned_json = cleaned_json.replace("False", "false").replace(
132
+ "True", "true"
133
+ )
134
+
135
+ # Convert string to Python dictionary
136
+ response_dict = json.loads(cleaned_json)
137
+
138
+ # Convert dictionary to output model
139
+ return output_model(**response_dict)
140
+
141
+ except json.JSONDecodeError as e:
142
+ raise ValueError(
143
+ f"Failed to parse JSON response: {e}\nResponse: {response_string}"
144
+ )
145
+ except Exception as e:
146
+ raise ValueError(f"Failed to convert to output model: {e}")
147
+
148
+ async def _vllm_completion(
149
+ self,
150
+ message: list[dict[str, str]],
151
+ output_model: T,
152
+ logprobs: bool = False,
153
+ top_logprobs: int = 3,
154
+ max_tokens: int | None = None,
155
+ ) -> tuple[T, Any]:
156
+ try:
157
+ json_schema = output_model.model_json_schema()
158
+
159
+ # Build kwargs dynamically
160
+ request_kwargs = {
161
+ "model": self.model,
162
+ "messages": message,
163
+ "extra_body": {"guided_json": json_schema},
164
+ "temperature": self.temperature,
165
+ **self.client_kwargs,
166
+ }
167
+
168
+ if max_tokens is not None:
169
+ request_kwargs["max_tokens"] = max_tokens
170
+
171
+ if logprobs:
172
+ request_kwargs["logprobs"] = True
173
+ request_kwargs["top_logprobs"] = top_logprobs
174
+
175
+ completion = await self.client.chat.completions.create(**request_kwargs)
176
+ response = completion.choices[0].message.content
177
+
178
+ # Convert the string response to output model
179
+ parsed = self._convert_to_output_model(response, output_model)
180
+
181
+ return parsed, completion
182
+
183
+ except Exception as e:
184
+ print(f"[ERROR] Failed to get vLLM structured output: {e}")
185
+ raise
186
+
187
+ def _extract_logprobs(self, completion: dict):
188
+ logprobs_data = []
189
+ ignore_pattern = re.compile(r'^(result|[\s\[\]\{\}",:]+)$')
190
+
191
+ for choice in completion.choices:
192
+ if not getattr(choice, "logprobs", None):
193
+ continue
194
+
195
+ for logprob_item in choice.logprobs.content:
196
+ if ignore_pattern.match(logprob_item.token):
197
+ continue
198
+ token_entry = {
199
+ "token": logprob_item.token,
200
+ "prob": round(math.exp(logprob_item.logprob), 8),
201
+ "top_alternatives": [],
202
+ }
203
+ for alt in logprob_item.top_logprobs:
204
+ if ignore_pattern.match(alt.token):
205
+ continue
206
+ token_entry["top_alternatives"].append(
207
+ {
208
+ "token": alt.token,
209
+ "prob": round(math.exp(alt.logprob), 8),
210
+ }
211
+ )
212
+ logprobs_data.append(token_entry)
213
+
214
+ return logprobs_data
215
+
216
+ async def run(
217
+ self,
218
+ input_text: str,
219
+ prompt_file: str,
220
+ output_model: T,
221
+ with_analysis: bool = False,
222
+ use_modes: bool = False,
223
+ mode: str = "",
224
+ resp_format: Literal["vllm", "parse"] = "parse",
225
+ output_lang: str | None = None,
226
+ logprobs: bool = False,
227
+ top_logprobs: int = 3,
228
+ max_tokens: int | None = None,
229
+ **extra_kwargs,
230
+ ) -> dict[str, Any]:
231
+ """
232
+ Execute the async LLM pipeline with the given input text.
233
+ """
234
+ prompt_loader = PromptLoader()
235
+ formatter = UserMergeFormatter()
236
+
237
+ try:
238
+ cleaned_text = input_text.strip()
239
+
240
+ # FIXED: Correct parameter order for load
241
+ prompt_configs = prompt_loader.load(
242
+ prompt_file=prompt_file, # prompt_file
243
+ text=cleaned_text, # text
244
+ mode=mode if use_modes else "", # mode
245
+ **extra_kwargs,
246
+ )
247
+
248
+ messages: list[dict[str, str]] = []
249
+
250
+ if with_analysis:
251
+ analysis = await self._analyze(prompt_configs)
252
+ messages.append(
253
+ self._build_user_message(f"Based on this analysis: {analysis}")
254
+ )
255
+
256
+ if output_lang:
257
+ messages.append(
258
+ self._build_user_message(
259
+ f"Respond only in the {output_lang} language."
260
+ )
261
+ )
262
+
263
+ messages.append(self._build_user_message(prompt_configs["main_template"]))
264
+ messages = formatter.format(messages)
265
+
266
+ if resp_format == "vllm":
267
+ parsed, completion = await self._vllm_completion(
268
+ messages,
269
+ output_model,
270
+ logprobs,
271
+ top_logprobs,
272
+ max_tokens, # Pass max_tokens
273
+ )
274
+ elif resp_format == "parse":
275
+ parsed, completion = await self._parse_completion(
276
+ messages,
277
+ output_model,
278
+ logprobs,
279
+ top_logprobs,
280
+ max_tokens, # Pass max_tokens
281
+ )
282
+ else:
283
+ raise ValueError(f"Unknown resp_format: {resp_format}")
284
+
285
+ results = {"result": parsed.result}
286
+
287
+ if logprobs:
288
+ results["logprobs"] = self._extract_logprobs(completion)
289
+
290
+ if with_analysis:
291
+ results["analysis"] = analysis
292
+
293
+ return results
294
+
295
+ except Exception as e:
296
+ print(f"[ERROR] Async operation failed: {e}")
297
+ raise