hamtaa-texttools 1.0.1__py3-none-any.whl → 1.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hamtaa-texttools might be problematic. Click here for more details.
- hamtaa_texttools-1.1.7.dist-info/METADATA +228 -0
- hamtaa_texttools-1.1.7.dist-info/RECORD +30 -0
- {hamtaa_texttools-1.0.1.dist-info → hamtaa_texttools-1.1.7.dist-info}/licenses/LICENSE +20 -20
- {hamtaa_texttools-1.0.1.dist-info → hamtaa_texttools-1.1.7.dist-info}/top_level.txt +0 -0
- texttools/__init__.py +4 -9
- texttools/batch/__init__.py +3 -0
- texttools/{utils/batch_manager → batch}/batch_manager.py +226 -240
- texttools/batch/batch_runner.py +254 -0
- texttools/prompts/README.md +35 -0
- texttools/prompts/categorizer.yaml +28 -0
- texttools/prompts/extract_entities.yaml +20 -0
- texttools/prompts/extract_keywords.yaml +18 -0
- texttools/prompts/is_question.yaml +14 -0
- texttools/prompts/merge_questions.yaml +46 -0
- texttools/prompts/rewrite.yaml +111 -0
- texttools/prompts/run_custom.yaml +7 -0
- texttools/prompts/subject_to_question.yaml +22 -0
- texttools/prompts/summarize.yaml +14 -0
- texttools/prompts/text_to_question.yaml +20 -0
- texttools/prompts/translate.yaml +15 -0
- texttools/tools/__init__.py +4 -3
- texttools/tools/async_the_tool.py +435 -0
- texttools/tools/internals/async_operator.py +242 -0
- texttools/tools/internals/base_operator.py +100 -0
- texttools/tools/internals/formatters.py +24 -0
- texttools/tools/internals/operator.py +242 -0
- texttools/tools/internals/output_models.py +62 -0
- texttools/tools/internals/prompt_loader.py +60 -0
- texttools/tools/the_tool.py +433 -291
- hamtaa_texttools-1.0.1.dist-info/METADATA +0 -129
- hamtaa_texttools-1.0.1.dist-info/RECORD +0 -18
- texttools/formatters/base_formatter.py +0 -33
- texttools/formatters/user_merge_formatter/user_merge_formatter.py +0 -47
- texttools/prompts/__init__.py +0 -0
- texttools/tools/operator.py +0 -236
- texttools/tools/output_models.py +0 -54
- texttools/tools/prompt_loader.py +0 -84
- texttools/utils/__init__.py +0 -4
- texttools/utils/batch_manager/__init__.py +0 -4
- texttools/utils/batch_manager/batch_runner.py +0 -212
- {hamtaa_texttools-1.0.1.dist-info → hamtaa_texttools-1.1.7.dist-info}/WHEEL +0 -0
texttools/tools/the_tool.py
CHANGED
|
@@ -1,291 +1,433 @@
|
|
|
1
|
-
from typing import Literal, Any
|
|
2
|
-
|
|
3
|
-
from openai import OpenAI
|
|
4
|
-
|
|
5
|
-
from texttools.tools.operator import Operator
|
|
6
|
-
import texttools.tools.output_models as OutputModels
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class TheTool:
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
self
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
"""
|
|
83
|
-
self.operator.
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
) ->
|
|
222
|
-
"""
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
1
|
+
from typing import Literal, Any, Callable
|
|
2
|
+
|
|
3
|
+
from openai import OpenAI
|
|
4
|
+
|
|
5
|
+
from texttools.tools.internals.operator import Operator
|
|
6
|
+
import texttools.tools.internals.output_models as OutputModels
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class TheTool:
|
|
10
|
+
"""
|
|
11
|
+
Each method configures the operator with a specific YAML prompt,
|
|
12
|
+
output schema, and flags, then delegates execution to `operator.run()`.
|
|
13
|
+
|
|
14
|
+
Usage:
|
|
15
|
+
client = OpenAI(...)
|
|
16
|
+
tool = TheTool(client, model="model-name")
|
|
17
|
+
result = tool.categorize("text ...", with_analysis=True)
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
client: OpenAI,
|
|
23
|
+
model: str,
|
|
24
|
+
):
|
|
25
|
+
self.operator = Operator(client=client, model=model)
|
|
26
|
+
|
|
27
|
+
def categorize(
|
|
28
|
+
self,
|
|
29
|
+
text: str,
|
|
30
|
+
with_analysis: bool = False,
|
|
31
|
+
user_prompt: str | None = None,
|
|
32
|
+
temperature: float | None = 0.0,
|
|
33
|
+
logprobs: bool = False,
|
|
34
|
+
top_logprobs: int | None = None,
|
|
35
|
+
validator: Callable[[Any], bool] | None = None,
|
|
36
|
+
) -> OutputModels.ToolOutput:
|
|
37
|
+
"""
|
|
38
|
+
Categorize a text into a single Islamic studies domain category.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
ToolOutput: Object containing:
|
|
42
|
+
- result (str): The assigned Islamic studies category
|
|
43
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
44
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
45
|
+
"""
|
|
46
|
+
return self.operator.run(
|
|
47
|
+
# User parameters
|
|
48
|
+
text=text,
|
|
49
|
+
with_analysis=with_analysis,
|
|
50
|
+
user_prompt=user_prompt,
|
|
51
|
+
temperature=temperature,
|
|
52
|
+
logprobs=logprobs,
|
|
53
|
+
top_logprobs=top_logprobs,
|
|
54
|
+
validator=validator,
|
|
55
|
+
# Internal parameters
|
|
56
|
+
prompt_file="categorizer.yaml",
|
|
57
|
+
output_model=OutputModels.CategorizerOutput,
|
|
58
|
+
resp_format="parse",
|
|
59
|
+
mode=None,
|
|
60
|
+
output_lang=None,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
def extract_keywords(
|
|
64
|
+
self,
|
|
65
|
+
text: str,
|
|
66
|
+
with_analysis: bool = False,
|
|
67
|
+
output_lang: str | None = None,
|
|
68
|
+
user_prompt: str | None = None,
|
|
69
|
+
temperature: float | None = 0.0,
|
|
70
|
+
logprobs: bool = False,
|
|
71
|
+
top_logprobs: int | None = None,
|
|
72
|
+
validator: Callable[[Any], bool] | None = None,
|
|
73
|
+
) -> OutputModels.ToolOutput:
|
|
74
|
+
"""
|
|
75
|
+
Extract salient keywords from text.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
ToolOutput: Object containing:
|
|
79
|
+
- result (list[str]): List of extracted keywords
|
|
80
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
81
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
82
|
+
"""
|
|
83
|
+
return self.operator.run(
|
|
84
|
+
# User parameters
|
|
85
|
+
text=text,
|
|
86
|
+
with_analysis=with_analysis,
|
|
87
|
+
output_lang=output_lang,
|
|
88
|
+
user_prompt=user_prompt,
|
|
89
|
+
temperature=temperature,
|
|
90
|
+
logprobs=logprobs,
|
|
91
|
+
top_logprobs=top_logprobs,
|
|
92
|
+
validator=validator,
|
|
93
|
+
# Internal parameters
|
|
94
|
+
prompt_file="extract_keywords.yaml",
|
|
95
|
+
output_model=OutputModels.ListStrOutput,
|
|
96
|
+
resp_format="parse",
|
|
97
|
+
mode=None,
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
def extract_entities(
|
|
101
|
+
self,
|
|
102
|
+
text: str,
|
|
103
|
+
with_analysis: bool = False,
|
|
104
|
+
output_lang: str | None = None,
|
|
105
|
+
user_prompt: str | None = None,
|
|
106
|
+
temperature: float | None = 0.0,
|
|
107
|
+
logprobs: bool = False,
|
|
108
|
+
top_logprobs: int | None = None,
|
|
109
|
+
validator: Callable[[Any], bool] | None = None,
|
|
110
|
+
) -> OutputModels.ToolOutput:
|
|
111
|
+
"""
|
|
112
|
+
Perform Named Entity Recognition (NER) over the input text.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
ToolOutput: Object containing:
|
|
116
|
+
- result (list[dict]): List of entities with 'text' and 'type' keys
|
|
117
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
118
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
119
|
+
"""
|
|
120
|
+
return self.operator.run(
|
|
121
|
+
# User parameters
|
|
122
|
+
text=text,
|
|
123
|
+
with_analysis=with_analysis,
|
|
124
|
+
output_lang=output_lang,
|
|
125
|
+
user_prompt=user_prompt,
|
|
126
|
+
temperature=temperature,
|
|
127
|
+
logprobs=logprobs,
|
|
128
|
+
top_logprobs=top_logprobs,
|
|
129
|
+
validator=validator,
|
|
130
|
+
# Internal parameters
|
|
131
|
+
prompt_file="extract_entities.yaml",
|
|
132
|
+
output_model=OutputModels.ListDictStrStrOutput,
|
|
133
|
+
resp_format="parse",
|
|
134
|
+
mode=None,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def is_question(
|
|
138
|
+
self,
|
|
139
|
+
text: str,
|
|
140
|
+
with_analysis: bool = False,
|
|
141
|
+
user_prompt: str | None = None,
|
|
142
|
+
temperature: float | None = 0.0,
|
|
143
|
+
logprobs: bool = False,
|
|
144
|
+
top_logprobs: int | None = None,
|
|
145
|
+
validator: Callable[[Any], bool] | None = None,
|
|
146
|
+
) -> OutputModels.ToolOutput:
|
|
147
|
+
"""
|
|
148
|
+
Detect if the input is phrased as a question.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
ToolOutput: Object containing:
|
|
152
|
+
- result (bool): True if text is a question, False otherwise
|
|
153
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
154
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
155
|
+
"""
|
|
156
|
+
return self.operator.run(
|
|
157
|
+
# User parameters
|
|
158
|
+
text=text,
|
|
159
|
+
with_analysis=with_analysis,
|
|
160
|
+
user_prompt=user_prompt,
|
|
161
|
+
temperature=temperature,
|
|
162
|
+
logprobs=logprobs,
|
|
163
|
+
top_logprobs=top_logprobs,
|
|
164
|
+
validator=validator,
|
|
165
|
+
# Internal parameters
|
|
166
|
+
prompt_file="is_question.yaml",
|
|
167
|
+
output_model=OutputModels.BoolOutput,
|
|
168
|
+
resp_format="parse",
|
|
169
|
+
mode=None,
|
|
170
|
+
output_lang=None,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
def text_to_question(
|
|
174
|
+
self,
|
|
175
|
+
text: str,
|
|
176
|
+
with_analysis: bool = False,
|
|
177
|
+
output_lang: str | None = None,
|
|
178
|
+
user_prompt: str | None = None,
|
|
179
|
+
temperature: float | None = 0.0,
|
|
180
|
+
logprobs: bool = False,
|
|
181
|
+
top_logprobs: int | None = None,
|
|
182
|
+
validator: Callable[[Any], bool] | None = None,
|
|
183
|
+
) -> OutputModels.ToolOutput:
|
|
184
|
+
"""
|
|
185
|
+
Generate a single question from the given text.
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
ToolOutput: Object containing:
|
|
189
|
+
- result (str): The generated question
|
|
190
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
191
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
192
|
+
"""
|
|
193
|
+
return self.operator.run(
|
|
194
|
+
# User parameters
|
|
195
|
+
text=text,
|
|
196
|
+
with_analysis=with_analysis,
|
|
197
|
+
output_lang=output_lang,
|
|
198
|
+
user_prompt=user_prompt,
|
|
199
|
+
temperature=temperature,
|
|
200
|
+
logprobs=logprobs,
|
|
201
|
+
top_logprobs=top_logprobs,
|
|
202
|
+
validator=validator,
|
|
203
|
+
# Internal parameters
|
|
204
|
+
prompt_file="text_to_question.yaml",
|
|
205
|
+
output_model=OutputModels.StrOutput,
|
|
206
|
+
resp_format="parse",
|
|
207
|
+
mode=None,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
def merge_questions(
|
|
211
|
+
self,
|
|
212
|
+
text: list[str],
|
|
213
|
+
with_analysis: bool = False,
|
|
214
|
+
output_lang: str | None = None,
|
|
215
|
+
user_prompt: str | None = None,
|
|
216
|
+
temperature: float | None = 0.0,
|
|
217
|
+
logprobs: bool = False,
|
|
218
|
+
top_logprobs: int | None = None,
|
|
219
|
+
mode: Literal["default", "reason"] = "default",
|
|
220
|
+
validator: Callable[[Any], bool] | None = None,
|
|
221
|
+
) -> OutputModels.ToolOutput:
|
|
222
|
+
"""
|
|
223
|
+
Merge multiple questions into a single unified question.
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
ToolOutput: Object containing:
|
|
227
|
+
- result (str): The merged question
|
|
228
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
229
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
230
|
+
"""
|
|
231
|
+
text = ", ".join(text)
|
|
232
|
+
return self.operator.run(
|
|
233
|
+
# User parameters
|
|
234
|
+
text=text,
|
|
235
|
+
with_analysis=with_analysis,
|
|
236
|
+
output_lang=output_lang,
|
|
237
|
+
user_prompt=user_prompt,
|
|
238
|
+
temperature=temperature,
|
|
239
|
+
logprobs=logprobs,
|
|
240
|
+
top_logprobs=top_logprobs,
|
|
241
|
+
validator=validator,
|
|
242
|
+
# Internal parameters
|
|
243
|
+
prompt_file="merge_questions.yaml",
|
|
244
|
+
output_model=OutputModels.StrOutput,
|
|
245
|
+
resp_format="parse",
|
|
246
|
+
mode=mode,
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
def rewrite(
|
|
250
|
+
self,
|
|
251
|
+
text: str,
|
|
252
|
+
with_analysis: bool = False,
|
|
253
|
+
output_lang: str | None = None,
|
|
254
|
+
user_prompt: str | None = None,
|
|
255
|
+
temperature: float | None = 0.0,
|
|
256
|
+
logprobs: bool = False,
|
|
257
|
+
top_logprobs: int | None = None,
|
|
258
|
+
mode: Literal["positive", "negative", "hard_negative"] = "positive",
|
|
259
|
+
validator: Callable[[Any], bool] | None = None,
|
|
260
|
+
) -> OutputModels.ToolOutput:
|
|
261
|
+
"""
|
|
262
|
+
Rewrite a text with different modes.
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
ToolOutput: Object containing:
|
|
266
|
+
- result (str): The rewritten text
|
|
267
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
268
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
269
|
+
"""
|
|
270
|
+
return self.operator.run(
|
|
271
|
+
# User parameters
|
|
272
|
+
text=text,
|
|
273
|
+
with_analysis=with_analysis,
|
|
274
|
+
output_lang=output_lang,
|
|
275
|
+
user_prompt=user_prompt,
|
|
276
|
+
temperature=temperature,
|
|
277
|
+
logprobs=logprobs,
|
|
278
|
+
top_logprobs=top_logprobs,
|
|
279
|
+
validator=validator,
|
|
280
|
+
# Internal parameters
|
|
281
|
+
prompt_file="rewrite.yaml",
|
|
282
|
+
output_model=OutputModels.StrOutput,
|
|
283
|
+
resp_format="parse",
|
|
284
|
+
mode=mode,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
def subject_to_question(
|
|
288
|
+
self,
|
|
289
|
+
text: str,
|
|
290
|
+
number_of_questions: int,
|
|
291
|
+
with_analysis: bool = False,
|
|
292
|
+
output_lang: str | None = None,
|
|
293
|
+
user_prompt: str | None = None,
|
|
294
|
+
temperature: float | None = 0.0,
|
|
295
|
+
logprobs: bool = False,
|
|
296
|
+
top_logprobs: int | None = None,
|
|
297
|
+
validator: Callable[[Any], bool] | None = None,
|
|
298
|
+
) -> OutputModels.ToolOutput:
|
|
299
|
+
"""
|
|
300
|
+
Generate a list of questions about a subject.
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
ToolOutput: Object containing:
|
|
304
|
+
- result (list[str]): List of generated questions
|
|
305
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
306
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
307
|
+
"""
|
|
308
|
+
return self.operator.run(
|
|
309
|
+
# User parameters
|
|
310
|
+
text=text,
|
|
311
|
+
number_of_questions=number_of_questions,
|
|
312
|
+
with_analysis=with_analysis,
|
|
313
|
+
output_lang=output_lang,
|
|
314
|
+
user_prompt=user_prompt,
|
|
315
|
+
temperature=temperature,
|
|
316
|
+
logprobs=logprobs,
|
|
317
|
+
top_logprobs=top_logprobs,
|
|
318
|
+
validator=validator,
|
|
319
|
+
# Internal parameters
|
|
320
|
+
prompt_file="subject_to_question.yaml",
|
|
321
|
+
output_model=OutputModels.ReasonListStrOutput,
|
|
322
|
+
resp_format="parse",
|
|
323
|
+
mode=None,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
def summarize(
|
|
327
|
+
self,
|
|
328
|
+
text: str,
|
|
329
|
+
with_analysis: bool = False,
|
|
330
|
+
output_lang: str | None = None,
|
|
331
|
+
user_prompt: str | None = None,
|
|
332
|
+
temperature: float | None = 0.0,
|
|
333
|
+
logprobs: bool = False,
|
|
334
|
+
top_logprobs: int | None = None,
|
|
335
|
+
validator: Callable[[Any], bool] | None = None,
|
|
336
|
+
) -> OutputModels.ToolOutput:
|
|
337
|
+
"""
|
|
338
|
+
Summarize the given subject text.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
ToolOutput: Object containing:
|
|
342
|
+
- result (str): The summary text
|
|
343
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
344
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
345
|
+
"""
|
|
346
|
+
return self.operator.run(
|
|
347
|
+
# User parameters
|
|
348
|
+
text=text,
|
|
349
|
+
with_analysis=with_analysis,
|
|
350
|
+
output_lang=output_lang,
|
|
351
|
+
user_prompt=user_prompt,
|
|
352
|
+
temperature=temperature,
|
|
353
|
+
logprobs=logprobs,
|
|
354
|
+
top_logprobs=top_logprobs,
|
|
355
|
+
validator=validator,
|
|
356
|
+
# Internal parameters
|
|
357
|
+
prompt_file="summarize.yaml",
|
|
358
|
+
output_model=OutputModels.StrOutput,
|
|
359
|
+
resp_format="parse",
|
|
360
|
+
mode=None,
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
def translate(
|
|
364
|
+
self,
|
|
365
|
+
text: str,
|
|
366
|
+
target_language: str,
|
|
367
|
+
with_analysis: bool = False,
|
|
368
|
+
user_prompt: str | None = None,
|
|
369
|
+
temperature: float | None = 0.0,
|
|
370
|
+
logprobs: bool = False,
|
|
371
|
+
top_logprobs: int | None = None,
|
|
372
|
+
validator: Callable[[Any], bool] | None = None,
|
|
373
|
+
) -> OutputModels.ToolOutput:
|
|
374
|
+
"""
|
|
375
|
+
Translate text between languages.
|
|
376
|
+
|
|
377
|
+
Returns:
|
|
378
|
+
ToolOutput: Object containing:
|
|
379
|
+
- result (str): The translated text
|
|
380
|
+
- logprobs (list | None): Probability data if logprobs enabled
|
|
381
|
+
- analysis (str | None): Detailed reasoning if with_analysis enabled
|
|
382
|
+
"""
|
|
383
|
+
return self.operator.run(
|
|
384
|
+
# User parameters
|
|
385
|
+
text=text,
|
|
386
|
+
target_language=target_language,
|
|
387
|
+
with_analysis=with_analysis,
|
|
388
|
+
user_prompt=user_prompt,
|
|
389
|
+
temperature=temperature,
|
|
390
|
+
logprobs=logprobs,
|
|
391
|
+
top_logprobs=top_logprobs,
|
|
392
|
+
validator=validator,
|
|
393
|
+
# Internal parameters
|
|
394
|
+
prompt_file="translate.yaml",
|
|
395
|
+
output_model=OutputModels.StrOutput,
|
|
396
|
+
resp_format="parse",
|
|
397
|
+
mode=None,
|
|
398
|
+
output_lang=None,
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
def run_custom(
|
|
402
|
+
self,
|
|
403
|
+
prompt: str,
|
|
404
|
+
output_model: Any,
|
|
405
|
+
output_lang: str | None = None,
|
|
406
|
+
temperature: float | None = None,
|
|
407
|
+
logprobs: bool | None = None,
|
|
408
|
+
top_logprobs: int | None = None,
|
|
409
|
+
) -> OutputModels.ToolOutput:
|
|
410
|
+
"""
|
|
411
|
+
Custom tool that can do almost anything!
|
|
412
|
+
|
|
413
|
+
Returns:
|
|
414
|
+
ToolOutput: Object with fields:
|
|
415
|
+
- result (str): The output result
|
|
416
|
+
"""
|
|
417
|
+
return self.operator.run(
|
|
418
|
+
# User paramaeters
|
|
419
|
+
text=prompt,
|
|
420
|
+
output_model=output_model,
|
|
421
|
+
output_model_str=output_model.model_json_schema(),
|
|
422
|
+
output_lang=output_lang,
|
|
423
|
+
temperature=temperature,
|
|
424
|
+
logprobs=logprobs,
|
|
425
|
+
top_logprobs=top_logprobs,
|
|
426
|
+
# Internal parameters
|
|
427
|
+
prompt_file="run_custom.yaml",
|
|
428
|
+
resp_format="parse",
|
|
429
|
+
user_prompt=None,
|
|
430
|
+
with_analysis=False,
|
|
431
|
+
mode=None,
|
|
432
|
+
validator=None,
|
|
433
|
+
)
|