hamtaa-texttools 1.0.5__py3-none-any.whl → 1.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hamtaa-texttools might be problematic. Click here for more details.
- {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.0.7.dist-info}/METADATA +15 -15
- hamtaa_texttools-1.0.7.dist-info/RECORD +31 -0
- texttools/batch/batch_manager.py +7 -18
- texttools/batch/batch_runner.py +96 -45
- texttools/prompts/README.md +4 -0
- texttools/prompts/{keyword_extractor.yaml → extract_keywords.yaml} +6 -6
- texttools/prompts/{question_merger.yaml → merge_questions.yaml} +5 -5
- texttools/tools/async_the_tool.py +204 -143
- texttools/tools/internals/async_operator.py +98 -204
- texttools/tools/internals/base_operator.py +85 -0
- texttools/tools/internals/operator.py +27 -130
- texttools/tools/internals/prompt_loader.py +12 -22
- texttools/tools/the_tool.py +162 -225
- hamtaa_texttools-1.0.5.dist-info/RECORD +0 -30
- {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.0.7.dist-info}/WHEEL +0 -0
- {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.0.7.dist-info}/licenses/LICENSE +0 -0
- {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.0.7.dist-info}/top_level.txt +0 -0
- /texttools/prompts/{ner_extractor.yaml → extract_entities.yaml} +0 -0
- /texttools/prompts/{question_detector.yaml → is_question.yaml} +0 -0
- /texttools/prompts/{rewriter.yaml → rewrite.yaml} +0 -0
- /texttools/prompts/{custom_tool.yaml → run_custom.yaml} +0 -0
- /texttools/prompts/{subject_question_generator.yaml → subject_to_question.yaml} +0 -0
- /texttools/prompts/{summarizer.yaml → summarize.yaml} +0 -0
- /texttools/prompts/{question_generator.yaml → text_to_question.yaml} +0 -0
- /texttools/prompts/{translator.yaml → translate.yaml} +0 -0
texttools/tools/the_tool.py
CHANGED
|
@@ -8,62 +8,30 @@ import texttools.tools.internals.output_models as OutputModels
|
|
|
8
8
|
|
|
9
9
|
class TheTool:
|
|
10
10
|
"""
|
|
11
|
-
High-level interface exposing specialized text tools for.
|
|
12
|
-
|
|
13
11
|
Each method configures the operator with a specific YAML prompt,
|
|
14
12
|
output schema, and flags, then delegates execution to `operator.run()`.
|
|
15
13
|
|
|
16
|
-
|
|
17
|
-
- categorize: assign a text to one of several Islamic categories.
|
|
18
|
-
- extract_keywords: produce a keyword list from text.
|
|
19
|
-
- extract_entities: simple NER (name/type pairs).
|
|
20
|
-
- detect_question: binary check whether input is a question.
|
|
21
|
-
- generate_question_from_text: produce a new question from a text.
|
|
22
|
-
- merge_questions: combine multiple questions (default/reason modes).
|
|
23
|
-
- rewrite: rephrase questions (same meaning/different wording, or vice versa).
|
|
24
|
-
- generate_questions_from_subject: generate multiple questions given a subject.
|
|
25
|
-
- summarize: produce a concise summary of a subject.
|
|
26
|
-
- translate: translate text between languages.
|
|
27
|
-
|
|
28
|
-
Usage pattern:
|
|
14
|
+
Usage:
|
|
29
15
|
client = OpenAI(...)
|
|
30
|
-
tool = TheTool(client, model="
|
|
31
|
-
result = tool.categorize("
|
|
16
|
+
tool = TheTool(client, model="model-name")
|
|
17
|
+
result = tool.categorize("text ...", with_analysis=True)
|
|
32
18
|
"""
|
|
33
19
|
|
|
34
20
|
def __init__(
|
|
35
21
|
self,
|
|
36
22
|
client: OpenAI,
|
|
37
|
-
|
|
38
|
-
model: str = "google/gemma-3n-e4b-it",
|
|
39
|
-
user_prompt: str | None = None,
|
|
40
|
-
output_lang: str | None = None,
|
|
41
|
-
with_analysis: bool = False,
|
|
42
|
-
temperature: float = 0.0,
|
|
43
|
-
logprobs: bool = False,
|
|
44
|
-
top_logprobs: int = 3,
|
|
23
|
+
model: str,
|
|
45
24
|
):
|
|
46
|
-
|
|
47
|
-
self.operator = Operator(client=client)
|
|
48
|
-
|
|
49
|
-
# Initialize default values
|
|
50
|
-
self.model = model
|
|
51
|
-
self.user_prompt = user_prompt
|
|
52
|
-
self.output_lang = output_lang
|
|
53
|
-
self.with_analysis = with_analysis
|
|
54
|
-
self.temperature = temperature
|
|
55
|
-
self.logprobs = logprobs
|
|
56
|
-
self.top_logprobs = top_logprobs
|
|
25
|
+
self.operator = Operator(client=client, model=model)
|
|
57
26
|
|
|
58
27
|
def categorize(
|
|
59
28
|
self,
|
|
60
29
|
text: str,
|
|
61
|
-
|
|
62
|
-
user_prompt: str | None = None,
|
|
30
|
+
with_analysis: bool = False,
|
|
63
31
|
output_lang: str | None = None,
|
|
64
|
-
|
|
65
|
-
temperature: float | None =
|
|
66
|
-
logprobs: bool
|
|
32
|
+
user_prompt: str | None = None,
|
|
33
|
+
temperature: float | None = 0.0,
|
|
34
|
+
logprobs: bool = False,
|
|
67
35
|
top_logprobs: int | None = None,
|
|
68
36
|
) -> dict[str, str]:
|
|
69
37
|
"""
|
|
@@ -79,32 +47,29 @@ class TheTool:
|
|
|
79
47
|
Example: {"result": "باورهای دینی"}
|
|
80
48
|
"""
|
|
81
49
|
return self.operator.run(
|
|
50
|
+
# User parameters
|
|
51
|
+
text=text,
|
|
52
|
+
with_analysis=with_analysis,
|
|
53
|
+
output_lang=output_lang,
|
|
54
|
+
user_prompt=user_prompt,
|
|
55
|
+
temperature=temperature,
|
|
56
|
+
logprobs=logprobs,
|
|
57
|
+
top_logprobs=top_logprobs,
|
|
82
58
|
# Internal parameters
|
|
83
59
|
prompt_file="categorizer.yaml",
|
|
84
60
|
output_model=OutputModels.CategorizerOutput,
|
|
85
61
|
resp_format="parse",
|
|
86
|
-
|
|
87
|
-
text=text,
|
|
88
|
-
model=self.model if model is None else model,
|
|
89
|
-
user_prompt=self.user_prompt if user_prompt is None else user_prompt,
|
|
90
|
-
output_lang=self.output_lang if output_lang is None else output_lang,
|
|
91
|
-
with_analysis=self.with_analysis
|
|
92
|
-
if with_analysis is None
|
|
93
|
-
else with_analysis,
|
|
94
|
-
temperature=self.temperature if temperature is None else temperature,
|
|
95
|
-
logprobs=self.logprobs if logprobs is None else logprobs,
|
|
96
|
-
top_logprobs=self.top_logprobs if top_logprobs is None else top_logprobs,
|
|
62
|
+
mode=None,
|
|
97
63
|
)
|
|
98
64
|
|
|
99
65
|
def extract_keywords(
|
|
100
66
|
self,
|
|
101
67
|
text: str,
|
|
102
|
-
|
|
103
|
-
user_prompt: str | None = None,
|
|
68
|
+
with_analysis: bool = False,
|
|
104
69
|
output_lang: str | None = None,
|
|
105
|
-
|
|
106
|
-
temperature: float | None =
|
|
107
|
-
logprobs: bool
|
|
70
|
+
user_prompt: str | None = None,
|
|
71
|
+
temperature: float | None = 0.0,
|
|
72
|
+
logprobs: bool = False,
|
|
108
73
|
top_logprobs: int | None = None,
|
|
109
74
|
) -> dict[str, list[str]]:
|
|
110
75
|
"""
|
|
@@ -118,32 +83,29 @@ class TheTool:
|
|
|
118
83
|
{"result": [<keyword1>, <keyword2>, ...]}
|
|
119
84
|
"""
|
|
120
85
|
return self.operator.run(
|
|
86
|
+
# User parameters
|
|
87
|
+
text=text,
|
|
88
|
+
with_analysis=with_analysis,
|
|
89
|
+
output_lang=output_lang,
|
|
90
|
+
user_prompt=user_prompt,
|
|
91
|
+
temperature=temperature,
|
|
92
|
+
logprobs=logprobs,
|
|
93
|
+
top_logprobs=top_logprobs,
|
|
121
94
|
# Internal parameters
|
|
122
|
-
prompt_file="
|
|
95
|
+
prompt_file="extract_keywords.yaml",
|
|
123
96
|
output_model=OutputModels.ListStrOutput,
|
|
124
97
|
resp_format="parse",
|
|
125
|
-
|
|
126
|
-
text=text,
|
|
127
|
-
model=self.model if model is None else model,
|
|
128
|
-
user_prompt=self.user_prompt if user_prompt is None else user_prompt,
|
|
129
|
-
output_lang=self.output_lang if output_lang is None else output_lang,
|
|
130
|
-
with_analysis=self.with_analysis
|
|
131
|
-
if with_analysis is None
|
|
132
|
-
else with_analysis,
|
|
133
|
-
temperature=self.temperature if temperature is None else temperature,
|
|
134
|
-
logprobs=self.logprobs if logprobs is None else logprobs,
|
|
135
|
-
top_logprobs=self.top_logprobs if top_logprobs is None else top_logprobs,
|
|
98
|
+
mode=None,
|
|
136
99
|
)
|
|
137
100
|
|
|
138
101
|
def extract_entities(
|
|
139
102
|
self,
|
|
140
103
|
text: str,
|
|
141
|
-
|
|
142
|
-
user_prompt: str | None = None,
|
|
104
|
+
with_analysis: bool = False,
|
|
143
105
|
output_lang: str | None = None,
|
|
144
|
-
|
|
145
|
-
temperature: float | None =
|
|
146
|
-
logprobs: bool
|
|
106
|
+
user_prompt: str | None = None,
|
|
107
|
+
temperature: float | None = 0.0,
|
|
108
|
+
logprobs: bool = False,
|
|
147
109
|
top_logprobs: int | None = None,
|
|
148
110
|
) -> dict[str, list[dict[str, str]]]:
|
|
149
111
|
"""
|
|
@@ -157,31 +119,28 @@ class TheTool:
|
|
|
157
119
|
{"result": [{"text": <entity>, "type": <entity_type>}, ...]}
|
|
158
120
|
"""
|
|
159
121
|
return self.operator.run(
|
|
122
|
+
# User parameters
|
|
123
|
+
text=text,
|
|
124
|
+
with_analysis=with_analysis,
|
|
125
|
+
output_lang=output_lang,
|
|
126
|
+
user_prompt=user_prompt,
|
|
127
|
+
temperature=temperature,
|
|
128
|
+
logprobs=logprobs,
|
|
129
|
+
top_logprobs=top_logprobs,
|
|
160
130
|
# Internal parameters
|
|
161
|
-
prompt_file="
|
|
131
|
+
prompt_file="extract_entities.yaml",
|
|
162
132
|
output_model=OutputModels.ListDictStrStrOutput,
|
|
163
133
|
resp_format="parse",
|
|
164
|
-
|
|
165
|
-
text=text,
|
|
166
|
-
model=self.model if model is None else model,
|
|
167
|
-
user_prompt=self.user_prompt if user_prompt is None else user_prompt,
|
|
168
|
-
output_lang=self.output_lang if output_lang is None else output_lang,
|
|
169
|
-
with_analysis=self.with_analysis
|
|
170
|
-
if with_analysis is None
|
|
171
|
-
else with_analysis,
|
|
172
|
-
temperature=self.temperature if temperature is None else temperature,
|
|
173
|
-
logprobs=self.logprobs if logprobs is None else logprobs,
|
|
174
|
-
top_logprobs=self.top_logprobs if top_logprobs is None else top_logprobs,
|
|
134
|
+
mode=None,
|
|
175
135
|
)
|
|
176
136
|
|
|
177
|
-
def
|
|
137
|
+
def is_question(
|
|
178
138
|
self,
|
|
179
139
|
text: str,
|
|
180
|
-
|
|
140
|
+
with_analysis: bool = False,
|
|
181
141
|
user_prompt: str | None = None,
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
logprobs: bool | None = None,
|
|
142
|
+
temperature: float | None = 0.0,
|
|
143
|
+
logprobs: bool = False,
|
|
185
144
|
top_logprobs: int | None = None,
|
|
186
145
|
) -> dict[str, bool]:
|
|
187
146
|
"""
|
|
@@ -195,32 +154,29 @@ class TheTool:
|
|
|
195
154
|
{"result": "true"} or {"result": "false"}
|
|
196
155
|
"""
|
|
197
156
|
return self.operator.run(
|
|
157
|
+
# User parameters
|
|
158
|
+
text=text,
|
|
159
|
+
with_analysis=with_analysis,
|
|
160
|
+
user_prompt=user_prompt,
|
|
161
|
+
temperature=temperature,
|
|
162
|
+
logprobs=logprobs,
|
|
163
|
+
top_logprobs=top_logprobs,
|
|
198
164
|
# Internal parameters
|
|
199
|
-
prompt_file="
|
|
165
|
+
prompt_file="is_question.yaml",
|
|
200
166
|
output_model=OutputModels.BoolOutput,
|
|
201
167
|
resp_format="parse",
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
text=text,
|
|
205
|
-
model=self.model if model is None else model,
|
|
206
|
-
user_prompt=self.user_prompt if user_prompt is None else user_prompt,
|
|
207
|
-
with_analysis=self.with_analysis
|
|
208
|
-
if with_analysis is None
|
|
209
|
-
else with_analysis,
|
|
210
|
-
temperature=self.temperature if temperature is None else temperature,
|
|
211
|
-
logprobs=self.logprobs if logprobs is None else logprobs,
|
|
212
|
-
top_logprobs=self.top_logprobs if top_logprobs is None else top_logprobs,
|
|
168
|
+
mode=None,
|
|
169
|
+
output_lang=None,
|
|
213
170
|
)
|
|
214
171
|
|
|
215
|
-
def
|
|
172
|
+
def text_to_question(
|
|
216
173
|
self,
|
|
217
174
|
text: str,
|
|
218
|
-
|
|
219
|
-
user_prompt: str | None = None,
|
|
175
|
+
with_analysis: bool = False,
|
|
220
176
|
output_lang: str | None = None,
|
|
221
|
-
|
|
222
|
-
temperature: float | None =
|
|
223
|
-
logprobs: bool
|
|
177
|
+
user_prompt: str | None = None,
|
|
178
|
+
temperature: float | None = 0.0,
|
|
179
|
+
logprobs: bool = False,
|
|
224
180
|
top_logprobs: int | None = None,
|
|
225
181
|
) -> dict[str, str]:
|
|
226
182
|
"""
|
|
@@ -234,34 +190,31 @@ class TheTool:
|
|
|
234
190
|
{"result": <generated_question>}
|
|
235
191
|
"""
|
|
236
192
|
return self.operator.run(
|
|
193
|
+
# User parameters
|
|
194
|
+
text=text,
|
|
195
|
+
with_analysis=with_analysis,
|
|
196
|
+
output_lang=output_lang,
|
|
197
|
+
user_prompt=user_prompt,
|
|
198
|
+
temperature=temperature,
|
|
199
|
+
logprobs=logprobs,
|
|
200
|
+
top_logprobs=top_logprobs,
|
|
237
201
|
# Internal parameters
|
|
238
|
-
prompt_file="
|
|
202
|
+
prompt_file="text_to_question.yaml",
|
|
239
203
|
output_model=OutputModels.StrOutput,
|
|
240
204
|
resp_format="parse",
|
|
241
|
-
|
|
242
|
-
text=text,
|
|
243
|
-
model=self.model if model is None else model,
|
|
244
|
-
user_prompt=self.user_prompt if user_prompt is None else user_prompt,
|
|
245
|
-
output_lang=self.output_lang if output_lang is None else output_lang,
|
|
246
|
-
with_analysis=self.with_analysis
|
|
247
|
-
if with_analysis is None
|
|
248
|
-
else with_analysis,
|
|
249
|
-
temperature=self.temperature if temperature is None else temperature,
|
|
250
|
-
logprobs=self.logprobs if logprobs is None else logprobs,
|
|
251
|
-
top_logprobs=self.top_logprobs if top_logprobs is None else top_logprobs,
|
|
205
|
+
mode=None,
|
|
252
206
|
)
|
|
253
207
|
|
|
254
208
|
def merge_questions(
|
|
255
209
|
self,
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
model: str | None = None,
|
|
259
|
-
user_prompt: str | None = None,
|
|
210
|
+
text: list[str],
|
|
211
|
+
with_analysis: bool = False,
|
|
260
212
|
output_lang: str | None = None,
|
|
261
|
-
|
|
262
|
-
temperature: float | None =
|
|
263
|
-
logprobs: bool
|
|
213
|
+
user_prompt: str | None = None,
|
|
214
|
+
temperature: float | None = 0.0,
|
|
215
|
+
logprobs: bool = False,
|
|
264
216
|
top_logprobs: int | None = None,
|
|
217
|
+
mode: Literal["default", "reason"] = "default",
|
|
265
218
|
) -> dict[str, str]:
|
|
266
219
|
"""
|
|
267
220
|
Merge multiple questions into a single unified question.
|
|
@@ -276,37 +229,33 @@ class TheTool:
|
|
|
276
229
|
Returns:
|
|
277
230
|
{"result": <merged_question>}
|
|
278
231
|
"""
|
|
279
|
-
text = ", ".join(
|
|
232
|
+
text = ", ".join(text)
|
|
280
233
|
return self.operator.run(
|
|
234
|
+
# User parameters
|
|
235
|
+
text=text,
|
|
236
|
+
with_analysis=with_analysis,
|
|
237
|
+
output_lang=output_lang,
|
|
238
|
+
user_prompt=user_prompt,
|
|
239
|
+
temperature=temperature,
|
|
240
|
+
logprobs=logprobs,
|
|
241
|
+
top_logprobs=top_logprobs,
|
|
281
242
|
# Internal parameters
|
|
282
|
-
prompt_file="
|
|
243
|
+
prompt_file="merge_questions.yaml",
|
|
283
244
|
output_model=OutputModels.StrOutput,
|
|
284
245
|
resp_format="parse",
|
|
285
|
-
# User parameters
|
|
286
|
-
text=text,
|
|
287
246
|
mode=mode,
|
|
288
|
-
model=self.model if model is None else model,
|
|
289
|
-
user_prompt=self.user_prompt if user_prompt is None else user_prompt,
|
|
290
|
-
output_lang=self.output_lang if output_lang is None else output_lang,
|
|
291
|
-
with_analysis=self.with_analysis
|
|
292
|
-
if with_analysis is None
|
|
293
|
-
else with_analysis,
|
|
294
|
-
temperature=self.temperature if temperature is None else temperature,
|
|
295
|
-
logprobs=self.logprobs if logprobs is None else logprobs,
|
|
296
|
-
top_logprobs=self.top_logprobs if top_logprobs is None else top_logprobs,
|
|
297
247
|
)
|
|
298
248
|
|
|
299
249
|
def rewrite(
|
|
300
250
|
self,
|
|
301
251
|
text: str,
|
|
302
|
-
|
|
303
|
-
model: str | None = None,
|
|
304
|
-
user_prompt: str | None = None,
|
|
252
|
+
with_analysis: bool = False,
|
|
305
253
|
output_lang: str | None = None,
|
|
306
|
-
|
|
307
|
-
temperature: float | None =
|
|
308
|
-
logprobs: bool
|
|
254
|
+
user_prompt: str | None = None,
|
|
255
|
+
temperature: float | None = 0.0,
|
|
256
|
+
logprobs: bool = False,
|
|
309
257
|
top_logprobs: int | None = None,
|
|
258
|
+
mode: Literal["positive", "negative", "hard_negative"] = "positive",
|
|
310
259
|
) -> dict[str, str]:
|
|
311
260
|
"""
|
|
312
261
|
Rewrite a question with different wording or meaning.
|
|
@@ -322,34 +271,30 @@ class TheTool:
|
|
|
322
271
|
{"result": <rewritten_question>}
|
|
323
272
|
"""
|
|
324
273
|
return self.operator.run(
|
|
274
|
+
# User parameters
|
|
275
|
+
text=text,
|
|
276
|
+
with_analysis=with_analysis,
|
|
277
|
+
output_lang=output_lang,
|
|
278
|
+
user_prompt=user_prompt,
|
|
279
|
+
temperature=temperature,
|
|
280
|
+
logprobs=logprobs,
|
|
281
|
+
top_logprobs=top_logprobs,
|
|
325
282
|
# Internal parameters
|
|
326
|
-
prompt_file="
|
|
283
|
+
prompt_file="rewrite.yaml",
|
|
327
284
|
output_model=OutputModels.StrOutput,
|
|
328
285
|
resp_format="parse",
|
|
329
|
-
# User parameters
|
|
330
|
-
text=text,
|
|
331
286
|
mode=mode,
|
|
332
|
-
model=self.model if model is None else model,
|
|
333
|
-
user_prompt=self.user_prompt if user_prompt is None else user_prompt,
|
|
334
|
-
output_lang=self.output_lang if output_lang is None else output_lang,
|
|
335
|
-
with_analysis=self.with_analysis
|
|
336
|
-
if with_analysis is None
|
|
337
|
-
else with_analysis,
|
|
338
|
-
temperature=self.temperature if temperature is None else temperature,
|
|
339
|
-
logprobs=self.logprobs if logprobs is None else logprobs,
|
|
340
|
-
top_logprobs=self.top_logprobs if top_logprobs is None else top_logprobs,
|
|
341
287
|
)
|
|
342
288
|
|
|
343
|
-
def
|
|
289
|
+
def subject_to_question(
|
|
344
290
|
self,
|
|
345
291
|
text: str,
|
|
346
292
|
number_of_questions: int,
|
|
347
|
-
|
|
348
|
-
user_prompt: str | None = None,
|
|
293
|
+
with_analysis: bool = False,
|
|
349
294
|
output_lang: str | None = None,
|
|
350
|
-
|
|
351
|
-
temperature: float | None =
|
|
352
|
-
logprobs: bool
|
|
295
|
+
user_prompt: str | None = None,
|
|
296
|
+
temperature: float | None = 0.0,
|
|
297
|
+
logprobs: bool = False,
|
|
353
298
|
top_logprobs: int | None = None,
|
|
354
299
|
) -> dict[str, list[str]]:
|
|
355
300
|
"""
|
|
@@ -365,33 +310,30 @@ class TheTool:
|
|
|
365
310
|
{"result": [<question1>, <question2>, ...]}
|
|
366
311
|
"""
|
|
367
312
|
return self.operator.run(
|
|
368
|
-
# Internal parameters
|
|
369
|
-
prompt_file="subject_question_generator.yaml",
|
|
370
|
-
output_model=OutputModels.ReasonListStrOutput,
|
|
371
|
-
resp_format="parse",
|
|
372
313
|
# User parameters
|
|
373
314
|
text=text,
|
|
374
315
|
number_of_questions=number_of_questions,
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
316
|
+
with_analysis=with_analysis,
|
|
317
|
+
output_lang=output_lang,
|
|
318
|
+
user_prompt=user_prompt,
|
|
319
|
+
temperature=temperature,
|
|
320
|
+
logprobs=logprobs,
|
|
321
|
+
top_logprobs=top_logprobs,
|
|
322
|
+
# Internal parameters
|
|
323
|
+
prompt_file="subject_to_question.yaml",
|
|
324
|
+
output_model=OutputModels.ReasonListStrOutput,
|
|
325
|
+
resp_format="parse",
|
|
326
|
+
mode=None,
|
|
384
327
|
)
|
|
385
328
|
|
|
386
329
|
def summarize(
|
|
387
330
|
self,
|
|
388
331
|
text: str,
|
|
389
|
-
|
|
390
|
-
user_prompt: str | None = None,
|
|
332
|
+
with_analysis: bool = False,
|
|
391
333
|
output_lang: str | None = None,
|
|
392
|
-
|
|
393
|
-
temperature: float | None =
|
|
394
|
-
logprobs: bool
|
|
334
|
+
user_prompt: str | None = None,
|
|
335
|
+
temperature: float | None = 0.0,
|
|
336
|
+
logprobs: bool = False,
|
|
395
337
|
top_logprobs: int | None = None,
|
|
396
338
|
) -> dict[str, str]:
|
|
397
339
|
"""
|
|
@@ -405,32 +347,30 @@ class TheTool:
|
|
|
405
347
|
{"result": <summary>}
|
|
406
348
|
"""
|
|
407
349
|
return self.operator.run(
|
|
350
|
+
# User parameters
|
|
351
|
+
text=text,
|
|
352
|
+
with_analysis=with_analysis,
|
|
353
|
+
output_lang=output_lang,
|
|
354
|
+
user_prompt=user_prompt,
|
|
355
|
+
temperature=temperature,
|
|
356
|
+
logprobs=logprobs,
|
|
357
|
+
top_logprobs=top_logprobs,
|
|
408
358
|
# Internal parameters
|
|
409
|
-
prompt_file="
|
|
359
|
+
prompt_file="summarize.yaml",
|
|
410
360
|
output_model=OutputModels.StrOutput,
|
|
411
361
|
resp_format="parse",
|
|
412
|
-
|
|
413
|
-
text=text,
|
|
414
|
-
model=self.model if model is None else model,
|
|
415
|
-
user_prompt=self.user_prompt if user_prompt is None else user_prompt,
|
|
416
|
-
output_lang=self.output_lang if output_lang is None else output_lang,
|
|
417
|
-
with_analysis=self.with_analysis
|
|
418
|
-
if with_analysis is None
|
|
419
|
-
else with_analysis,
|
|
420
|
-
temperature=self.temperature if temperature is None else temperature,
|
|
421
|
-
logprobs=self.logprobs if logprobs is None else logprobs,
|
|
422
|
-
top_logprobs=self.top_logprobs if top_logprobs is None else top_logprobs,
|
|
362
|
+
mode=None,
|
|
423
363
|
)
|
|
424
364
|
|
|
425
365
|
def translate(
|
|
426
366
|
self,
|
|
427
367
|
text: str,
|
|
428
368
|
target_language: str,
|
|
429
|
-
|
|
369
|
+
with_analysis: bool = False,
|
|
370
|
+
output_lang: str | None = None,
|
|
430
371
|
user_prompt: str | None = None,
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
logprobs: bool | None = None,
|
|
372
|
+
temperature: float | None = 0.0,
|
|
373
|
+
logprobs: bool = False,
|
|
434
374
|
top_logprobs: int | None = None,
|
|
435
375
|
) -> dict[str, str]:
|
|
436
376
|
"""
|
|
@@ -445,32 +385,29 @@ class TheTool:
|
|
|
445
385
|
{"result": <translated_text>}
|
|
446
386
|
"""
|
|
447
387
|
return self.operator.run(
|
|
448
|
-
# Internal parameters
|
|
449
|
-
prompt_file="translator.yaml",
|
|
450
|
-
output_model=OutputModels.StrOutput,
|
|
451
|
-
resp_format="parse",
|
|
452
|
-
output_lang=False,
|
|
453
388
|
# User parameters
|
|
454
389
|
text=text,
|
|
455
390
|
target_language=target_language,
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
391
|
+
with_analysis=with_analysis,
|
|
392
|
+
output_lang=output_lang,
|
|
393
|
+
user_prompt=user_prompt,
|
|
394
|
+
temperature=temperature,
|
|
395
|
+
logprobs=logprobs,
|
|
396
|
+
top_logprobs=top_logprobs,
|
|
397
|
+
# Internal parameters
|
|
398
|
+
prompt_file="translate.yaml",
|
|
399
|
+
output_model=OutputModels.StrOutput,
|
|
400
|
+
resp_format="parse",
|
|
401
|
+
mode=None,
|
|
464
402
|
)
|
|
465
403
|
|
|
466
|
-
def
|
|
404
|
+
def run_custom(
|
|
467
405
|
self,
|
|
468
406
|
prompt: str,
|
|
469
407
|
output_model: Any,
|
|
470
|
-
model: str | None = None,
|
|
471
408
|
output_lang: str | None = None,
|
|
472
409
|
temperature: float | None = None,
|
|
473
|
-
logprobs:
|
|
410
|
+
logprobs: bool | None = None,
|
|
474
411
|
top_logprobs: int | None = None,
|
|
475
412
|
) -> dict[str, Any]:
|
|
476
413
|
"""
|
|
@@ -484,18 +421,18 @@ class TheTool:
|
|
|
484
421
|
{"result": <Any>}
|
|
485
422
|
"""
|
|
486
423
|
return self.operator.run(
|
|
487
|
-
# Internal parameters
|
|
488
|
-
prompt_file="custom_tool.yaml",
|
|
489
|
-
resp_format="parse",
|
|
490
|
-
user_prompt=False,
|
|
491
|
-
with_analysis=False,
|
|
492
424
|
# User paramaeters
|
|
493
425
|
text=prompt,
|
|
494
426
|
output_model=output_model,
|
|
495
427
|
output_model_str=output_model.model_json_schema(),
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
428
|
+
output_lang=output_lang,
|
|
429
|
+
temperature=temperature,
|
|
430
|
+
logprobs=logprobs,
|
|
431
|
+
top_logprobs=top_logprobs,
|
|
432
|
+
# Internal parameters
|
|
433
|
+
prompt_file="run_custom.yaml",
|
|
434
|
+
resp_format="parse",
|
|
435
|
+
user_prompt=None,
|
|
436
|
+
with_analysis=False,
|
|
437
|
+
mode=None,
|
|
501
438
|
)
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
hamtaa_texttools-1.0.5.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
|
|
2
|
-
texttools/__init__.py,sha256=v3tQCH_Cjj47fCpuhK6sKSVAqEjNkc-cZbY4OJa4IZw,202
|
|
3
|
-
texttools/batch/__init__.py,sha256=q50JsQsmQGp_8RW0KNasYeYWVV0R4FUNZ-ujXwEJemY,143
|
|
4
|
-
texttools/batch/batch_manager.py,sha256=aYnHy82b4FJmhi2TWjXtxg67dN6PZOu2gQSusx373vE,9328
|
|
5
|
-
texttools/batch/batch_runner.py,sha256=OHxeFT0YhEuDwZVAE07PUEC_7cuICWnnim3SuyZsx4U,7814
|
|
6
|
-
texttools/formatters/base_formatter.py,sha256=xxnbujAr01NqZ49Y61LVFpIbj6kTEmV6JiUH_qCsIFk,1180
|
|
7
|
-
texttools/formatters/user_merge_formatter.py,sha256=U_d7npTkC9QDgtEFjAjIuDlPfxVj3S1RyziidKqjciw,1086
|
|
8
|
-
texttools/prompts/README.md,sha256=z8XW7ovh0yT1WGhkSPzYKixEtiGuDzeof6MALXaxUUY,1365
|
|
9
|
-
texttools/prompts/categorizer.yaml,sha256=GMqIIzQFhgnlpkgU1qi3FAD3mD4A2jiWD5TilQ2XnnE,1204
|
|
10
|
-
texttools/prompts/custom_tool.yaml,sha256=38OkCoVITbuuS9c08UZSP1jZW4WjSmRIi8fR0RAiPu4,108
|
|
11
|
-
texttools/prompts/keyword_extractor.yaml,sha256=R05Ac_qnP4sUvhOGCW3XpjlJFdz1KgU4CgVCOXflY8M,775
|
|
12
|
-
texttools/prompts/ner_extractor.yaml,sha256=KiKjeDpHaeh3JVtZ6q1pa3k4DYucUIU9WnEcRTCA-SE,651
|
|
13
|
-
texttools/prompts/question_detector.yaml,sha256=d0-vKRbXWkxvO64ikvxRjEmpAXGpCYIPGhgexvPPjws,471
|
|
14
|
-
texttools/prompts/question_generator.yaml,sha256=UheKYpDn6iyKI8NxunHZtFpNyfCLZZe5cvkuXpurUJY,783
|
|
15
|
-
texttools/prompts/question_merger.yaml,sha256=b72QAk9Gs8k1xb2lSDXx44u-3Ku5vIuWL_6han4UaO0,1797
|
|
16
|
-
texttools/prompts/rewriter.yaml,sha256=LO7He_IA3MZKz8a-LxH9DHJpOjpYwaYN1pbjp1Y0tFo,5392
|
|
17
|
-
texttools/prompts/subject_question_generator.yaml,sha256=C7x7rNNm6U_ZG9HOn6zuzYOtvJUZ2skuWbL1-aYdd3E,1147
|
|
18
|
-
texttools/prompts/summarizer.yaml,sha256=o6rxGPfWtZd61Duvm8NVvCJqfq73b-wAuMSKR6UYUqY,459
|
|
19
|
-
texttools/prompts/translator.yaml,sha256=mGT2uBCei6uucWqVbs4silk-UV060v3G0jnt0P6sr50,634
|
|
20
|
-
texttools/tools/__init__.py,sha256=hG1I28Q7BJ1Dbs95x6QMKXdsAlC5Eh_tqC-EbAibwiU,114
|
|
21
|
-
texttools/tools/async_the_tool.py,sha256=m5b8t1eAGDDN44nOf-h9l-8rLxg7a859nZ9QePVlRzI,8827
|
|
22
|
-
texttools/tools/the_tool.py,sha256=oRDsg8ZqMcUiWX6WXdtw9C1XkgWdi93GcBXCqTcUCMo,19406
|
|
23
|
-
texttools/tools/internals/async_operator.py,sha256=jvpeVffRYnm6inUB7jncbmiOAztayMPzsIi7UH1IrHs,9910
|
|
24
|
-
texttools/tools/internals/operator.py,sha256=4lrV8UisJoSUKGI23iL8IjK_MWg-ev43ywAXxCob3nU,10033
|
|
25
|
-
texttools/tools/internals/output_models.py,sha256=Rf2x-UuGlmQHrvYIqnD11YuzMH_mPuir62HoMJQa2uk,1528
|
|
26
|
-
texttools/tools/internals/prompt_loader.py,sha256=8RFhZE3HOcnQdtndSP2qULD6YQbZ34EVsTbXR-Zr-NM,2510
|
|
27
|
-
hamtaa_texttools-1.0.5.dist-info/METADATA,sha256=l2qrSuF_s2JHwcybcGdq4c2hhcE-D6eqNzPvJZPyHaE,7780
|
|
28
|
-
hamtaa_texttools-1.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
29
|
-
hamtaa_texttools-1.0.5.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
|
|
30
|
-
hamtaa_texttools-1.0.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|