hamtaa-texttools 1.1.8__py3-none-any.whl → 1.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- from typing import Any, TypeVar, Type, Literal, Callable
1
+ from typing import Any, TypeVar, Type, Callable
2
2
  import logging
3
3
 
4
4
  from openai import OpenAI
@@ -6,7 +6,6 @@ from pydantic import BaseModel
6
6
 
7
7
  from texttools.tools.internals.output_models import ToolOutput
8
8
  from texttools.tools.internals.base_operator import BaseOperator
9
- from texttools.tools.internals.formatters import Formatter
10
9
  from texttools.tools.internals.prompt_loader import PromptLoader
11
10
 
12
11
  # Base Model type for output models
@@ -26,8 +25,7 @@ class Operator(BaseOperator):
26
25
  """
27
26
 
28
27
  def __init__(self, client: OpenAI, model: str):
29
- self.client = client
30
- self.model = model
28
+ super().__init__(client, model)
31
29
 
32
30
  def _analyze(self, prompt_configs: dict[str, str], temperature: float) -> str:
33
31
  """
@@ -36,8 +34,8 @@ class Operator(BaseOperator):
36
34
  """
37
35
  analyze_prompt = prompt_configs["analyze_template"]
38
36
  analyze_message = [self._build_user_message(analyze_prompt)]
39
- completion = self.client.chat.completions.create(
40
- model=self.model,
37
+ completion = self._client.chat.completions.create(
38
+ model=self._model,
41
39
  messages=analyze_message,
42
40
  temperature=temperature,
43
41
  )
@@ -51,13 +49,13 @@ class Operator(BaseOperator):
51
49
  temperature: float,
52
50
  logprobs: bool = False,
53
51
  top_logprobs: int = 3,
54
- ) -> tuple[Type[T], Any]:
52
+ ) -> tuple[T, Any]:
55
53
  """
56
54
  Parses a chat completion using OpenAI's structured output format.
57
55
  Returns both the parsed object and the raw completion for logging.
58
56
  """
59
57
  request_kwargs = {
60
- "model": self.model,
58
+ "model": self._model,
61
59
  "messages": message,
62
60
  "response_format": output_model,
63
61
  "temperature": temperature,
@@ -67,43 +65,10 @@ class Operator(BaseOperator):
67
65
  request_kwargs["logprobs"] = True
68
66
  request_kwargs["top_logprobs"] = top_logprobs
69
67
 
70
- completion = self.client.beta.chat.completions.parse(**request_kwargs)
68
+ completion = self._client.beta.chat.completions.parse(**request_kwargs)
71
69
  parsed = completion.choices[0].message.parsed
72
70
  return parsed, completion
73
71
 
74
- def _vllm_completion(
75
- self,
76
- message: list[dict[str, str]],
77
- output_model: Type[T],
78
- temperature: float,
79
- logprobs: bool = False,
80
- top_logprobs: int = 3,
81
- ) -> tuple[Type[T], Any]:
82
- """
83
- Generates a completion using vLLM with JSON schema guidance.
84
- Returns the parsed output model and raw completion.
85
- """
86
- json_schema = output_model.model_json_schema()
87
-
88
- # Build kwargs dynamically
89
- request_kwargs = {
90
- "model": self.model,
91
- "messages": message,
92
- "extra_body": {"guided_json": json_schema},
93
- "temperature": temperature,
94
- }
95
-
96
- if logprobs:
97
- request_kwargs["logprobs"] = True
98
- request_kwargs["top_logprobs"] = top_logprobs
99
-
100
- completion = self.client.chat.completions.create(**request_kwargs)
101
- response = completion.choices[0].message.content
102
-
103
- # Convert the string response to output model
104
- parsed = self._convert_to_output_model(response, output_model)
105
- return parsed, completion
106
-
107
72
  def run(
108
73
  self,
109
74
  # User parameters
@@ -118,7 +83,6 @@ class Operator(BaseOperator):
118
83
  # Internal parameters
119
84
  prompt_file: str,
120
85
  output_model: Type[T],
121
- resp_format: Literal["vllm", "parse"],
122
86
  mode: str | None,
123
87
  **extra_kwargs,
124
88
  ) -> ToolOutput:
@@ -126,7 +90,6 @@ class Operator(BaseOperator):
126
90
  Execute the LLM pipeline with the given input text.
127
91
  """
128
92
  prompt_loader = PromptLoader()
129
- formatter = Formatter()
130
93
  output = ToolOutput()
131
94
 
132
95
  try:
@@ -138,7 +101,7 @@ class Operator(BaseOperator):
138
101
  **extra_kwargs,
139
102
  )
140
103
 
141
- messages: list[dict[str, str]] = []
104
+ messages = []
142
105
 
143
106
  if with_analysis:
144
107
  analysis = self._analyze(prompt_configs, temperature)
@@ -159,16 +122,11 @@ class Operator(BaseOperator):
159
122
  )
160
123
 
161
124
  messages.append(self._build_user_message(prompt_configs["main_template"]))
162
- messages = formatter.user_merge_format(messages)
125
+ messages
163
126
 
164
- if resp_format == "vllm":
165
- parsed, completion = self._vllm_completion(
166
- messages, output_model, temperature, logprobs, top_logprobs
167
- )
168
- elif resp_format == "parse":
169
- parsed, completion = self._parse_completion(
170
- messages, output_model, temperature, logprobs, top_logprobs
171
- )
127
+ parsed, completion = self._parse_completion(
128
+ messages, output_model, temperature, logprobs, top_logprobs
129
+ )
172
130
 
173
131
  # Ensure output_model has a `result` field
174
132
  if not hasattr(parsed, "result"):
@@ -181,8 +139,7 @@ class Operator(BaseOperator):
181
139
 
182
140
  # Retry logic if validation fails
183
141
  if validator and not validator(output.result):
184
- max_retries = 3
185
- for attempt in range(max_retries):
142
+ for attempt in range(self.MAX_RETRIES):
186
143
  logger.warning(
187
144
  f"Validation failed, retrying for the {attempt + 1} time."
188
145
  )
@@ -190,22 +147,13 @@ class Operator(BaseOperator):
190
147
  # Generate new temperature for retry
191
148
  retry_temperature = self._get_retry_temp(temperature)
192
149
  try:
193
- if resp_format == "vllm":
194
- parsed, completion = self._vllm_completion(
195
- messages,
196
- output_model,
197
- retry_temperature,
198
- logprobs,
199
- top_logprobs,
200
- )
201
- elif resp_format == "parse":
202
- parsed, completion = self._parse_completion(
203
- messages,
204
- output_model,
205
- retry_temperature,
206
- logprobs,
207
- top_logprobs,
208
- )
150
+ parsed, completion = self._parse_completion(
151
+ messages,
152
+ output_model,
153
+ retry_temperature,
154
+ logprobs,
155
+ top_logprobs,
156
+ )
209
157
 
210
158
  output.result = parsed.result
211
159
 
@@ -11,15 +11,18 @@ class PromptLoader:
11
11
  - Load and parse YAML prompt definitions.
12
12
  - Select the right template (by mode, if applicable).
13
13
  - Inject variables (`{input}`, plus any extra kwargs) into the templates.
14
- - Return a dict with:
15
- {
16
- "main_template": "...",
17
- "analyze_template": "..." | None
18
- }
19
14
  """
20
15
 
21
- MAIN_TEMPLATE: str = "main_template"
22
- ANALYZE_TEMPLATE: str = "analyze_template"
16
+ MAIN_TEMPLATE = "main_template"
17
+ ANALYZE_TEMPLATE = "analyze_template"
18
+
19
+ @staticmethod
20
+ def _build_format_args(text: str, **extra_kwargs) -> dict[str, str]:
21
+ # Base formatting args
22
+ format_args = {"input": text}
23
+ # Merge extras
24
+ format_args.update(extra_kwargs)
25
+ return format_args
23
26
 
24
27
  # Use lru_cache to load each file once
25
28
  @lru_cache(maxsize=32)
@@ -40,13 +43,6 @@ class PromptLoader:
40
43
  else data.get(self.ANALYZE_TEMPLATE),
41
44
  }
42
45
 
43
- def _build_format_args(self, text: str, **extra_kwargs) -> dict[str, str]:
44
- # Base formatting args
45
- format_args = {"input": text}
46
- # Merge extras
47
- format_args.update(extra_kwargs)
48
- return format_args
49
-
50
46
  def load(
51
47
  self, prompt_file: str, text: str, mode: str, **extra_kwargs
52
48
  ) -> dict[str, str]:
@@ -3,7 +3,7 @@ from typing import Literal, Any, Callable
3
3
  from openai import OpenAI
4
4
 
5
5
  from texttools.tools.internals.operator import Operator
6
- import texttools.tools.internals.output_models as OutputModels
6
+ import texttools.tools.internals.output_models as OM
7
7
 
8
8
 
9
9
  class TheTool:
@@ -22,7 +22,7 @@ class TheTool:
22
22
  client: OpenAI,
23
23
  model: str,
24
24
  ):
25
- self.operator = Operator(client=client, model=model)
25
+ self._operator = Operator(client=client, model=model)
26
26
 
27
27
  def categorize(
28
28
  self,
@@ -33,7 +33,7 @@ class TheTool:
33
33
  logprobs: bool = False,
34
34
  top_logprobs: int | None = None,
35
35
  validator: Callable[[Any], bool] | None = None,
36
- ) -> OutputModels.ToolOutput:
36
+ ) -> OM.ToolOutput:
37
37
  """
38
38
  Categorize a text into a single Islamic studies domain category.
39
39
 
@@ -43,7 +43,7 @@ class TheTool:
43
43
  - logprobs (list | None): Probability data if logprobs enabled
44
44
  - analysis (str | None): Detailed reasoning if with_analysis enabled
45
45
  """
46
- return self.operator.run(
46
+ return self._operator.run(
47
47
  # User parameters
48
48
  text=text,
49
49
  with_analysis=with_analysis,
@@ -54,8 +54,7 @@ class TheTool:
54
54
  validator=validator,
55
55
  # Internal parameters
56
56
  prompt_file="categorizer.yaml",
57
- output_model=OutputModels.CategorizerOutput,
58
- resp_format="parse",
57
+ output_model=OM.CategorizerOutput,
59
58
  mode=None,
60
59
  output_lang=None,
61
60
  )
@@ -70,7 +69,7 @@ class TheTool:
70
69
  logprobs: bool = False,
71
70
  top_logprobs: int | None = None,
72
71
  validator: Callable[[Any], bool] | None = None,
73
- ) -> OutputModels.ToolOutput:
72
+ ) -> OM.ToolOutput:
74
73
  """
75
74
  Extract salient keywords from text.
76
75
 
@@ -80,7 +79,7 @@ class TheTool:
80
79
  - logprobs (list | None): Probability data if logprobs enabled
81
80
  - analysis (str | None): Detailed reasoning if with_analysis enabled
82
81
  """
83
- return self.operator.run(
82
+ return self._operator.run(
84
83
  # User parameters
85
84
  text=text,
86
85
  with_analysis=with_analysis,
@@ -92,8 +91,7 @@ class TheTool:
92
91
  validator=validator,
93
92
  # Internal parameters
94
93
  prompt_file="extract_keywords.yaml",
95
- output_model=OutputModels.ListStrOutput,
96
- resp_format="parse",
94
+ output_model=OM.ListStrOutput,
97
95
  mode=None,
98
96
  )
99
97
 
@@ -107,7 +105,7 @@ class TheTool:
107
105
  logprobs: bool = False,
108
106
  top_logprobs: int | None = None,
109
107
  validator: Callable[[Any], bool] | None = None,
110
- ) -> OutputModels.ToolOutput:
108
+ ) -> OM.ToolOutput:
111
109
  """
112
110
  Perform Named Entity Recognition (NER) over the input text.
113
111
 
@@ -117,7 +115,7 @@ class TheTool:
117
115
  - logprobs (list | None): Probability data if logprobs enabled
118
116
  - analysis (str | None): Detailed reasoning if with_analysis enabled
119
117
  """
120
- return self.operator.run(
118
+ return self._operator.run(
121
119
  # User parameters
122
120
  text=text,
123
121
  with_analysis=with_analysis,
@@ -129,8 +127,7 @@ class TheTool:
129
127
  validator=validator,
130
128
  # Internal parameters
131
129
  prompt_file="extract_entities.yaml",
132
- output_model=OutputModels.ListDictStrStrOutput,
133
- resp_format="parse",
130
+ output_model=OM.ListDictStrStrOutput,
134
131
  mode=None,
135
132
  )
136
133
 
@@ -143,7 +140,7 @@ class TheTool:
143
140
  logprobs: bool = False,
144
141
  top_logprobs: int | None = None,
145
142
  validator: Callable[[Any], bool] | None = None,
146
- ) -> OutputModels.ToolOutput:
143
+ ) -> OM.ToolOutput:
147
144
  """
148
145
  Detect if the input is phrased as a question.
149
146
 
@@ -153,7 +150,7 @@ class TheTool:
153
150
  - logprobs (list | None): Probability data if logprobs enabled
154
151
  - analysis (str | None): Detailed reasoning if with_analysis enabled
155
152
  """
156
- return self.operator.run(
153
+ return self._operator.run(
157
154
  # User parameters
158
155
  text=text,
159
156
  with_analysis=with_analysis,
@@ -164,8 +161,7 @@ class TheTool:
164
161
  validator=validator,
165
162
  # Internal parameters
166
163
  prompt_file="is_question.yaml",
167
- output_model=OutputModels.BoolOutput,
168
- resp_format="parse",
164
+ output_model=OM.BoolOutput,
169
165
  mode=None,
170
166
  output_lang=None,
171
167
  )
@@ -180,7 +176,7 @@ class TheTool:
180
176
  logprobs: bool = False,
181
177
  top_logprobs: int | None = None,
182
178
  validator: Callable[[Any], bool] | None = None,
183
- ) -> OutputModels.ToolOutput:
179
+ ) -> OM.ToolOutput:
184
180
  """
185
181
  Generate a single question from the given text.
186
182
 
@@ -190,7 +186,7 @@ class TheTool:
190
186
  - logprobs (list | None): Probability data if logprobs enabled
191
187
  - analysis (str | None): Detailed reasoning if with_analysis enabled
192
188
  """
193
- return self.operator.run(
189
+ return self._operator.run(
194
190
  # User parameters
195
191
  text=text,
196
192
  with_analysis=with_analysis,
@@ -202,8 +198,7 @@ class TheTool:
202
198
  validator=validator,
203
199
  # Internal parameters
204
200
  prompt_file="text_to_question.yaml",
205
- output_model=OutputModels.StrOutput,
206
- resp_format="parse",
201
+ output_model=OM.StrOutput,
207
202
  mode=None,
208
203
  )
209
204
 
@@ -218,7 +213,7 @@ class TheTool:
218
213
  top_logprobs: int | None = None,
219
214
  mode: Literal["default", "reason"] = "default",
220
215
  validator: Callable[[Any], bool] | None = None,
221
- ) -> OutputModels.ToolOutput:
216
+ ) -> OM.ToolOutput:
222
217
  """
223
218
  Merge multiple questions into a single unified question.
224
219
 
@@ -229,7 +224,7 @@ class TheTool:
229
224
  - analysis (str | None): Detailed reasoning if with_analysis enabled
230
225
  """
231
226
  text = ", ".join(text)
232
- return self.operator.run(
227
+ return self._operator.run(
233
228
  # User parameters
234
229
  text=text,
235
230
  with_analysis=with_analysis,
@@ -241,8 +236,7 @@ class TheTool:
241
236
  validator=validator,
242
237
  # Internal parameters
243
238
  prompt_file="merge_questions.yaml",
244
- output_model=OutputModels.StrOutput,
245
- resp_format="parse",
239
+ output_model=OM.StrOutput,
246
240
  mode=mode,
247
241
  )
248
242
 
@@ -257,7 +251,7 @@ class TheTool:
257
251
  top_logprobs: int | None = None,
258
252
  mode: Literal["positive", "negative", "hard_negative"] = "positive",
259
253
  validator: Callable[[Any], bool] | None = None,
260
- ) -> OutputModels.ToolOutput:
254
+ ) -> OM.ToolOutput:
261
255
  """
262
256
  Rewrite a text with different modes.
263
257
 
@@ -267,7 +261,7 @@ class TheTool:
267
261
  - logprobs (list | None): Probability data if logprobs enabled
268
262
  - analysis (str | None): Detailed reasoning if with_analysis enabled
269
263
  """
270
- return self.operator.run(
264
+ return self._operator.run(
271
265
  # User parameters
272
266
  text=text,
273
267
  with_analysis=with_analysis,
@@ -279,8 +273,7 @@ class TheTool:
279
273
  validator=validator,
280
274
  # Internal parameters
281
275
  prompt_file="rewrite.yaml",
282
- output_model=OutputModels.StrOutput,
283
- resp_format="parse",
276
+ output_model=OM.StrOutput,
284
277
  mode=mode,
285
278
  )
286
279
 
@@ -295,7 +288,7 @@ class TheTool:
295
288
  logprobs: bool = False,
296
289
  top_logprobs: int | None = None,
297
290
  validator: Callable[[Any], bool] | None = None,
298
- ) -> OutputModels.ToolOutput:
291
+ ) -> OM.ToolOutput:
299
292
  """
300
293
  Generate a list of questions about a subject.
301
294
 
@@ -305,7 +298,7 @@ class TheTool:
305
298
  - logprobs (list | None): Probability data if logprobs enabled
306
299
  - analysis (str | None): Detailed reasoning if with_analysis enabled
307
300
  """
308
- return self.operator.run(
301
+ return self._operator.run(
309
302
  # User parameters
310
303
  text=text,
311
304
  number_of_questions=number_of_questions,
@@ -318,8 +311,7 @@ class TheTool:
318
311
  validator=validator,
319
312
  # Internal parameters
320
313
  prompt_file="subject_to_question.yaml",
321
- output_model=OutputModels.ReasonListStrOutput,
322
- resp_format="parse",
314
+ output_model=OM.ReasonListStrOutput,
323
315
  mode=None,
324
316
  )
325
317
 
@@ -333,7 +325,7 @@ class TheTool:
333
325
  logprobs: bool = False,
334
326
  top_logprobs: int | None = None,
335
327
  validator: Callable[[Any], bool] | None = None,
336
- ) -> OutputModels.ToolOutput:
328
+ ) -> OM.ToolOutput:
337
329
  """
338
330
  Summarize the given subject text.
339
331
 
@@ -343,7 +335,7 @@ class TheTool:
343
335
  - logprobs (list | None): Probability data if logprobs enabled
344
336
  - analysis (str | None): Detailed reasoning if with_analysis enabled
345
337
  """
346
- return self.operator.run(
338
+ return self._operator.run(
347
339
  # User parameters
348
340
  text=text,
349
341
  with_analysis=with_analysis,
@@ -355,8 +347,7 @@ class TheTool:
355
347
  validator=validator,
356
348
  # Internal parameters
357
349
  prompt_file="summarize.yaml",
358
- output_model=OutputModels.StrOutput,
359
- resp_format="parse",
350
+ output_model=OM.StrOutput,
360
351
  mode=None,
361
352
  )
362
353
 
@@ -370,7 +361,7 @@ class TheTool:
370
361
  logprobs: bool = False,
371
362
  top_logprobs: int | None = None,
372
363
  validator: Callable[[Any], bool] | None = None,
373
- ) -> OutputModels.ToolOutput:
364
+ ) -> OM.ToolOutput:
374
365
  """
375
366
  Translate text between languages.
376
367
 
@@ -380,7 +371,7 @@ class TheTool:
380
371
  - logprobs (list | None): Probability data if logprobs enabled
381
372
  - analysis (str | None): Detailed reasoning if with_analysis enabled
382
373
  """
383
- return self.operator.run(
374
+ return self._operator.run(
384
375
  # User parameters
385
376
  text=text,
386
377
  target_language=target_language,
@@ -392,8 +383,7 @@ class TheTool:
392
383
  validator=validator,
393
384
  # Internal parameters
394
385
  prompt_file="translate.yaml",
395
- output_model=OutputModels.StrOutput,
396
- resp_format="parse",
386
+ output_model=OM.StrOutput,
397
387
  mode=None,
398
388
  output_lang=None,
399
389
  )
@@ -406,7 +396,7 @@ class TheTool:
406
396
  temperature: float | None = None,
407
397
  logprobs: bool | None = None,
408
398
  top_logprobs: int | None = None,
409
- ) -> OutputModels.ToolOutput:
399
+ ) -> OM.ToolOutput:
410
400
  """
411
401
  Custom tool that can do almost anything!
412
402
 
@@ -414,7 +404,7 @@ class TheTool:
414
404
  ToolOutput: Object with fields:
415
405
  - result (str): The output result
416
406
  """
417
- return self.operator.run(
407
+ return self._operator.run(
418
408
  # User paramaeters
419
409
  text=prompt,
420
410
  output_model=output_model,
@@ -425,7 +415,6 @@ class TheTool:
425
415
  top_logprobs=top_logprobs,
426
416
  # Internal parameters
427
417
  prompt_file="run_custom.yaml",
428
- resp_format="parse",
429
418
  user_prompt=None,
430
419
  with_analysis=False,
431
420
  mode=None,
@@ -1,30 +0,0 @@
1
- hamtaa_texttools-1.1.8.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
2
- texttools/__init__.py,sha256=lFYe1jdssHC1h8qcPpV3whANxiDi8aiiFdY-7L0Ck10,164
3
- texttools/batch/__init__.py,sha256=DJGJTfR6F3Yv4_alsj9g1tesGzdcSV27Zw74DonhW_s,102
4
- texttools/batch/batch_manager.py,sha256=ZgLiO9maCHnx2cJbUjsYXFnlUsMLI2TP3Vc9uKU0BLg,8706
5
- texttools/batch/batch_runner.py,sha256=X0YQmaowO_jUSAFWBHdxOLoRrX_gvmrJDgp9qPlOSEw,10254
6
- texttools/prompts/README.md,sha256=-5YO93CN93QLifqZpUeUnCOCBbDiOTV-cFQeJ7Gg0I4,1377
7
- texttools/prompts/categorizer.yaml,sha256=GMqIIzQFhgnlpkgU1qi3FAD3mD4A2jiWD5TilQ2XnnE,1204
8
- texttools/prompts/extract_entities.yaml,sha256=KiKjeDpHaeh3JVtZ6q1pa3k4DYucUIU9WnEcRTCA-SE,651
9
- texttools/prompts/extract_keywords.yaml,sha256=0O7ypL_OsEOxtvlQ2CZjnsv9637DJwAKprZsf9Vo2_s,769
10
- texttools/prompts/is_question.yaml,sha256=d0-vKRbXWkxvO64ikvxRjEmpAXGpCYIPGhgexvPPjws,471
11
- texttools/prompts/merge_questions.yaml,sha256=0J85GvTirZB4ELwH3sk8ub_WcqqpYf6PrMKr3djlZeo,1792
12
- texttools/prompts/rewrite.yaml,sha256=LO7He_IA3MZKz8a-LxH9DHJpOjpYwaYN1pbjp1Y0tFo,5392
13
- texttools/prompts/run_custom.yaml,sha256=38OkCoVITbuuS9c08UZSP1jZW4WjSmRIi8fR0RAiPu4,108
14
- texttools/prompts/subject_to_question.yaml,sha256=C7x7rNNm6U_ZG9HOn6zuzYOtvJUZ2skuWbL1-aYdd3E,1147
15
- texttools/prompts/summarize.yaml,sha256=o6rxGPfWtZd61Duvm8NVvCJqfq73b-wAuMSKR6UYUqY,459
16
- texttools/prompts/text_to_question.yaml,sha256=UheKYpDn6iyKI8NxunHZtFpNyfCLZZe5cvkuXpurUJY,783
17
- texttools/prompts/translate.yaml,sha256=mGT2uBCei6uucWqVbs4silk-UV060v3G0jnt0P6sr50,634
18
- texttools/tools/__init__.py,sha256=3fPoeB-E5wGxWgv7axztHkeolR7ZDUJudd0xmpPFjao,113
19
- texttools/tools/async_tools.py,sha256=2ZY7Lo6Jj9xoTF8bfdh_g8VOXZ7ljMMesd1_QHXyf4s,15395
20
- texttools/tools/sync_tools.py,sha256=XKgZuzriFnk8B-YihJfs6BKivxjGCgOFfe7hnCpEiXs,15161
21
- texttools/tools/internals/async_operator.py,sha256=fCi70LXasC_2G9iz8uVFptnZEvVeb9TXopMBLi-cFuE,9022
22
- texttools/tools/internals/base_operator.py,sha256=rV2WqGdiHK4ezYz1f1EWcdbKFSFJhBJpORnJzPICFvk,3471
23
- texttools/tools/internals/formatters.py,sha256=tACNLP6PeoqaRpNudVxBaHA25zyWqWYPZQuYysIu88g,941
24
- texttools/tools/internals/operator.py,sha256=UBDScStTUXf8CIhwXb-6e_YOWTLggoiBV71vXRzr0P0,8904
25
- texttools/tools/internals/output_models.py,sha256=ekpbyocmXj_dee7ieOT1zOkMo9cPHT7xcUFCZoUaXA0,1886
26
- texttools/tools/internals/prompt_loader.py,sha256=1khayXcRC5w0Vf2SufpNaN1IUIhbKzS5ATiKheoBcGE,2082
27
- hamtaa_texttools-1.1.8.dist-info/METADATA,sha256=Cfb4VkcUELzRN6TrKdWK5jr4YsGbh_VlAtYVny86cb4,8690
28
- hamtaa_texttools-1.1.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
29
- hamtaa_texttools-1.1.8.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
30
- hamtaa_texttools-1.1.8.dist-info/RECORD,,
@@ -1,3 +0,0 @@
1
- from .batch_runner import BatchJobRunner, BatchConfig
2
-
3
- __all__ = ["BatchJobRunner", "BatchConfig"]
@@ -1,4 +0,0 @@
1
- from .sync_tools import TheTool
2
- from .async_tools import AsyncTheTool
3
-
4
- __all__ = ["TheTool", "AsyncTheTool"]