hamtaa-texttools 1.0.1__py3-none-any.whl → 1.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hamtaa-texttools might be problematic. Click here for more details.

Files changed (41) hide show
  1. hamtaa_texttools-1.1.7.dist-info/METADATA +228 -0
  2. hamtaa_texttools-1.1.7.dist-info/RECORD +30 -0
  3. {hamtaa_texttools-1.0.1.dist-info → hamtaa_texttools-1.1.7.dist-info}/licenses/LICENSE +20 -20
  4. {hamtaa_texttools-1.0.1.dist-info → hamtaa_texttools-1.1.7.dist-info}/top_level.txt +0 -0
  5. texttools/__init__.py +4 -9
  6. texttools/batch/__init__.py +3 -0
  7. texttools/{utils/batch_manager → batch}/batch_manager.py +226 -240
  8. texttools/batch/batch_runner.py +254 -0
  9. texttools/prompts/README.md +35 -0
  10. texttools/prompts/categorizer.yaml +28 -0
  11. texttools/prompts/extract_entities.yaml +20 -0
  12. texttools/prompts/extract_keywords.yaml +18 -0
  13. texttools/prompts/is_question.yaml +14 -0
  14. texttools/prompts/merge_questions.yaml +46 -0
  15. texttools/prompts/rewrite.yaml +111 -0
  16. texttools/prompts/run_custom.yaml +7 -0
  17. texttools/prompts/subject_to_question.yaml +22 -0
  18. texttools/prompts/summarize.yaml +14 -0
  19. texttools/prompts/text_to_question.yaml +20 -0
  20. texttools/prompts/translate.yaml +15 -0
  21. texttools/tools/__init__.py +4 -3
  22. texttools/tools/async_the_tool.py +435 -0
  23. texttools/tools/internals/async_operator.py +242 -0
  24. texttools/tools/internals/base_operator.py +100 -0
  25. texttools/tools/internals/formatters.py +24 -0
  26. texttools/tools/internals/operator.py +242 -0
  27. texttools/tools/internals/output_models.py +62 -0
  28. texttools/tools/internals/prompt_loader.py +60 -0
  29. texttools/tools/the_tool.py +433 -291
  30. hamtaa_texttools-1.0.1.dist-info/METADATA +0 -129
  31. hamtaa_texttools-1.0.1.dist-info/RECORD +0 -18
  32. texttools/formatters/base_formatter.py +0 -33
  33. texttools/formatters/user_merge_formatter/user_merge_formatter.py +0 -47
  34. texttools/prompts/__init__.py +0 -0
  35. texttools/tools/operator.py +0 -236
  36. texttools/tools/output_models.py +0 -54
  37. texttools/tools/prompt_loader.py +0 -84
  38. texttools/utils/__init__.py +0 -4
  39. texttools/utils/batch_manager/__init__.py +0 -4
  40. texttools/utils/batch_manager/batch_runner.py +0 -212
  41. {hamtaa_texttools-1.0.1.dist-info → hamtaa_texttools-1.1.7.dist-info}/WHEEL +0 -0
@@ -1,291 +1,433 @@
1
- from typing import Literal, Any
2
-
3
- from openai import OpenAI
4
-
5
- from texttools.tools.operator import Operator
6
- import texttools.tools.output_models as OutputModels
7
-
8
-
9
- class TheTool:
10
- """
11
- High-level interface exposing specialized text tools for.
12
-
13
- Each method configures the operator with a specific YAML prompt,
14
- output schema, and flags, then delegates execution to `operator.run()`.
15
-
16
- Supported capabilities:
17
- - categorize: assign a text to one of several Islamic categories.
18
- - extract_keywords: produce a keyword list from text.
19
- - extract_entities: simple NER (name/type pairs).
20
- - detect_question: binary check whether input is a question.
21
- - generate_question_from_text: produce a new question from a text.
22
- - merge_questions: combine multiple questions (default/reason modes).
23
- - rewrite_question: rephrase questions (same meaning/different wording, or vice versa).
24
- - generate_questions_from_subject: generate multiple questions given a subject.
25
- - summarize: produce a concise summary of a subject.
26
- - translate: translate text between languages.
27
-
28
- Usage pattern:
29
- client = OpenAI(...)
30
- tool = TheTool(client, model="gemma-3")
31
- result = tool.categorize("متن ورودی ...", with_analysis=True)
32
- """
33
-
34
- def __init__(
35
- self,
36
- client: OpenAI,
37
- *,
38
- model: str,
39
- temperature: float = 0.0,
40
- **client_kwargs: Any,
41
- ):
42
- self.operator = Operator(
43
- client=client,
44
- model=model,
45
- temperature=temperature,
46
- **client_kwargs,
47
- )
48
-
49
- def categorize(self, text: str, with_analysis: bool = False) -> dict[str, str]:
50
- """
51
- Categorize a text into a single Islamic studies domain category.
52
-
53
- Args:
54
- text: Input string to categorize.
55
- with_analysis: If True, first runs an LLM "analysis" step and
56
- conditions the main prompt on that analysis.
57
-
58
- Returns:
59
- {"result": <category string>}
60
- Example: {"result": "باورهای دینی"}
61
- """
62
- self.operator.PROMPT_FILE = "categorizer.yaml"
63
- self.operator.OUTPUT_MODEL = OutputModels.CategorizerOutput
64
- self.operator.WITH_ANALYSIS = with_analysis
65
- self.operator.USE_MODES = False
66
-
67
- results = self.operator.run(text)
68
- return results
69
-
70
- def extract_keywords(
71
- self, text: str, with_analysis: bool = False
72
- ) -> dict[str, list[str]]:
73
- """
74
- Extract salient keywords from text.
75
-
76
- Args:
77
- text: Input string to analyze.
78
- with_analysis: Whether to run an extra LLM reasoning step.
79
-
80
- Returns:
81
- {"result": [<keyword1>, <keyword2>, ...]}
82
- """
83
- self.operator.PROMPT_FILE = "keyword_extractor.yaml"
84
- self.operator.OUTPUT_MODEL = OutputModels.ListStrOutput
85
- self.operator.WITH_ANALYSIS = with_analysis
86
- self.operator.USE_MODES = False
87
-
88
- results = self.operator.run(text)
89
- return results
90
-
91
- def extract_entities(
92
- self, text: str, with_analysis: bool = False
93
- ) -> dict[str, list[dict[str, str]]]:
94
- """
95
- Perform Named Entity Recognition (NER) over the input text.
96
-
97
- Args:
98
- text: Input string.
99
- with_analysis: Whether to run an extra LLM reasoning step.
100
-
101
- Returns:
102
- {"result": [{"text": <entity>, "type": <entity_type>}, ...]}
103
- """
104
- self.operator.PROMPT_FILE = "ner_extractor.yaml"
105
- self.operator.OUTPUT_MODEL = OutputModels.ListDictStrStrOutput
106
- self.operator.WITH_ANALYSIS = with_analysis
107
- self.operator.USE_MODES = False
108
-
109
- results = self.operator.run(text)
110
- return results
111
-
112
- def detect_question(
113
- self, question: str, with_analysis: bool = False
114
- ) -> dict[str, str]:
115
- """
116
- Detect if the input is phrased as a question.
117
-
118
- Args:
119
- question: Input string to evaluate.
120
- with_analysis: Whether to include an analysis step.
121
-
122
- Returns:
123
- {"result": "true"} or {"result": "false"}
124
- """
125
- self.operator.PROMPT_FILE = "question_detector.yaml"
126
- self.operator.OUTPUT_MODEL = OutputModels.StrOutput
127
- self.operator.WITH_ANALYSIS = with_analysis
128
- self.operator.USE_MODES = False
129
-
130
- results = self.operator.run(question)
131
- return results
132
-
133
- def generate_question_from_text(
134
- self, text: str, with_analysis: bool = False
135
- ) -> dict[str, str]:
136
- """
137
- Generate a single question from the given text.
138
-
139
- Args:
140
- text: Source text to derive a question from.
141
- with_analysis: Whether to use analysis before generation.
142
-
143
- Returns:
144
- {"result": <generated_question>}
145
- """
146
- self.operator.PROMPT_FILE = "question_generator.yaml"
147
- self.operator.OUTPUT_MODEL = OutputModels.StrOutput
148
- self.operator.WITH_ANALYSIS = with_analysis
149
- self.operator.USE_MODES = False
150
-
151
- results = self.operator.run(text)
152
- return results
153
-
154
- def merge_questions(
155
- self,
156
- questions: list[str],
157
- mode: Literal["default_mode", "reason_mode"] = "default_mode",
158
- with_analysis: bool = False,
159
- ) -> dict[str, str]:
160
- """
161
- Merge multiple questions into a single unified question.
162
-
163
- Args:
164
- questions: List of question strings.
165
- mode: Merge strategy:
166
- - "default_mode": simple merging.
167
- - "reason_mode": merging with reasoning explanation.
168
- with_analysis: Whether to use an analysis step.
169
-
170
- Returns:
171
- {"result": <merged_question>}
172
- """
173
- question_str = ", ".join(questions)
174
-
175
- self.operator.PROMPT_FILE = "question_merger.yaml"
176
- self.operator.OUTPUT_MODEL = OutputModels.StrOutput
177
- self.operator.WITH_ANALYSIS = with_analysis
178
- self.operator.USE_MODES = True
179
- self.operator.MODE = mode
180
-
181
- results = self.operator.run(question_str)
182
- return results
183
-
184
- def rewrite_question(
185
- self,
186
- question: str,
187
- mode: Literal[
188
- "same_meaning_different_wording_mode",
189
- "different_meaning_similar_wording_mode",
190
- ] = "same_meaning_different_wording_mode",
191
- with_analysis: bool = False,
192
- ) -> dict[str, str]:
193
- """
194
- Rewrite a question with different wording or meaning.
195
-
196
- Args:
197
- question: Input question to rewrite.
198
- mode: Rewrite strategy:
199
- - "same_meaning_different_wording_mode": keep meaning, change words.
200
- - "different_meaning_similar_wording_mode": alter meaning, preserve wording style.
201
- with_analysis: Whether to include an analysis step.
202
-
203
- Returns:
204
- {"result": <rewritten_question>}
205
- """
206
- self.operator.PROMPT_FILE = "question_rewriter.yaml"
207
- self.operator.OUTPUT_MODEL = OutputModels.StrOutput
208
- self.operator.WITH_ANALYSIS = with_analysis
209
- self.operator.USE_MODES = True
210
- self.operator.MODE = mode
211
-
212
- results = self.operator.run(question)
213
- return results
214
-
215
- def generate_questions_from_subject(
216
- self,
217
- subject: str,
218
- number_of_questions: int,
219
- language: str = "English",
220
- with_analysis: bool = False,
221
- ) -> dict[str, list[str]]:
222
- """
223
- Generate a list of questions about a subject.
224
-
225
- Args:
226
- subject: Topic of interest.
227
- number_of_questions: Number of questions to produce.
228
- language: Target language for generated questions.
229
- with_analysis: Whether to include an analysis step.
230
-
231
- Returns:
232
- {"result": [<question1>, <question2>, ...]}
233
- """
234
- self.operator.PROMPT_FILE = "subject_question_generator.yaml"
235
- self.operator.OUTPUT_MODEL = OutputModels.ReasonListStrOutput
236
- self.operator.WITH_ANALYSIS = with_analysis
237
- self.operator.USE_MODES = False
238
-
239
- results = self.operator.run(
240
- subject,
241
- number_of_questions=number_of_questions,
242
- language=language,
243
- )
244
- return results
245
-
246
- def summarize(self, subject: str, with_analysis: bool = False) -> dict[str, str]:
247
- """
248
- Summarize the given subject text.
249
-
250
- Args:
251
- subject: Input text to summarize.
252
- with_analysis: Whether to include an analysis step.
253
-
254
- Returns:
255
- {"result": <summary>}
256
- """
257
- self.operator.PROMPT_FILE = "summarizer.yaml"
258
- self.operator.OUTPUT_MODEL = OutputModels.StrOutput
259
- self.operator.WITH_ANALYSIS = with_analysis
260
- self.operator.USE_MODES = False
261
-
262
- results = self.operator.run(subject)
263
- return results
264
-
265
- def translate(
266
- self,
267
- text: str,
268
- target_language: str,
269
- with_analysis: bool = False,
270
- ) -> dict[str, str]:
271
- """
272
- Translate text between languages.
273
-
274
- Args:
275
- text: Input string to translate.
276
- target_language: Language code or name to translate into.
277
- with_analysis: Whether to include an analysis step.
278
-
279
- Returns:
280
- {"result": <translated_text>}
281
- """
282
- self.operator.PROMPT_FILE = "translator.yaml"
283
- self.operator.OUTPUT_MODEL = OutputModels.StrOutput
284
- self.operator.WITH_ANALYSIS = with_analysis
285
- self.operator.USE_MODES = False
286
-
287
- results = self.operator.run(
288
- text,
289
- target_language=target_language,
290
- )
291
- return results
1
+ from typing import Literal, Any, Callable
2
+
3
+ from openai import OpenAI
4
+
5
+ from texttools.tools.internals.operator import Operator
6
+ import texttools.tools.internals.output_models as OutputModels
7
+
8
+
9
+ class TheTool:
10
+ """
11
+ Each method configures the operator with a specific YAML prompt,
12
+ output schema, and flags, then delegates execution to `operator.run()`.
13
+
14
+ Usage:
15
+ client = OpenAI(...)
16
+ tool = TheTool(client, model="model-name")
17
+ result = tool.categorize("text ...", with_analysis=True)
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ client: OpenAI,
23
+ model: str,
24
+ ):
25
+ self.operator = Operator(client=client, model=model)
26
+
27
+ def categorize(
28
+ self,
29
+ text: str,
30
+ with_analysis: bool = False,
31
+ user_prompt: str | None = None,
32
+ temperature: float | None = 0.0,
33
+ logprobs: bool = False,
34
+ top_logprobs: int | None = None,
35
+ validator: Callable[[Any], bool] | None = None,
36
+ ) -> OutputModels.ToolOutput:
37
+ """
38
+ Categorize a text into a single Islamic studies domain category.
39
+
40
+ Returns:
41
+ ToolOutput: Object containing:
42
+ - result (str): The assigned Islamic studies category
43
+ - logprobs (list | None): Probability data if logprobs enabled
44
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
45
+ """
46
+ return self.operator.run(
47
+ # User parameters
48
+ text=text,
49
+ with_analysis=with_analysis,
50
+ user_prompt=user_prompt,
51
+ temperature=temperature,
52
+ logprobs=logprobs,
53
+ top_logprobs=top_logprobs,
54
+ validator=validator,
55
+ # Internal parameters
56
+ prompt_file="categorizer.yaml",
57
+ output_model=OutputModels.CategorizerOutput,
58
+ resp_format="parse",
59
+ mode=None,
60
+ output_lang=None,
61
+ )
62
+
63
+ def extract_keywords(
64
+ self,
65
+ text: str,
66
+ with_analysis: bool = False,
67
+ output_lang: str | None = None,
68
+ user_prompt: str | None = None,
69
+ temperature: float | None = 0.0,
70
+ logprobs: bool = False,
71
+ top_logprobs: int | None = None,
72
+ validator: Callable[[Any], bool] | None = None,
73
+ ) -> OutputModels.ToolOutput:
74
+ """
75
+ Extract salient keywords from text.
76
+
77
+ Returns:
78
+ ToolOutput: Object containing:
79
+ - result (list[str]): List of extracted keywords
80
+ - logprobs (list | None): Probability data if logprobs enabled
81
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
82
+ """
83
+ return self.operator.run(
84
+ # User parameters
85
+ text=text,
86
+ with_analysis=with_analysis,
87
+ output_lang=output_lang,
88
+ user_prompt=user_prompt,
89
+ temperature=temperature,
90
+ logprobs=logprobs,
91
+ top_logprobs=top_logprobs,
92
+ validator=validator,
93
+ # Internal parameters
94
+ prompt_file="extract_keywords.yaml",
95
+ output_model=OutputModels.ListStrOutput,
96
+ resp_format="parse",
97
+ mode=None,
98
+ )
99
+
100
+ def extract_entities(
101
+ self,
102
+ text: str,
103
+ with_analysis: bool = False,
104
+ output_lang: str | None = None,
105
+ user_prompt: str | None = None,
106
+ temperature: float | None = 0.0,
107
+ logprobs: bool = False,
108
+ top_logprobs: int | None = None,
109
+ validator: Callable[[Any], bool] | None = None,
110
+ ) -> OutputModels.ToolOutput:
111
+ """
112
+ Perform Named Entity Recognition (NER) over the input text.
113
+
114
+ Returns:
115
+ ToolOutput: Object containing:
116
+ - result (list[dict]): List of entities with 'text' and 'type' keys
117
+ - logprobs (list | None): Probability data if logprobs enabled
118
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
119
+ """
120
+ return self.operator.run(
121
+ # User parameters
122
+ text=text,
123
+ with_analysis=with_analysis,
124
+ output_lang=output_lang,
125
+ user_prompt=user_prompt,
126
+ temperature=temperature,
127
+ logprobs=logprobs,
128
+ top_logprobs=top_logprobs,
129
+ validator=validator,
130
+ # Internal parameters
131
+ prompt_file="extract_entities.yaml",
132
+ output_model=OutputModels.ListDictStrStrOutput,
133
+ resp_format="parse",
134
+ mode=None,
135
+ )
136
+
137
+ def is_question(
138
+ self,
139
+ text: str,
140
+ with_analysis: bool = False,
141
+ user_prompt: str | None = None,
142
+ temperature: float | None = 0.0,
143
+ logprobs: bool = False,
144
+ top_logprobs: int | None = None,
145
+ validator: Callable[[Any], bool] | None = None,
146
+ ) -> OutputModels.ToolOutput:
147
+ """
148
+ Detect if the input is phrased as a question.
149
+
150
+ Returns:
151
+ ToolOutput: Object containing:
152
+ - result (bool): True if text is a question, False otherwise
153
+ - logprobs (list | None): Probability data if logprobs enabled
154
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
155
+ """
156
+ return self.operator.run(
157
+ # User parameters
158
+ text=text,
159
+ with_analysis=with_analysis,
160
+ user_prompt=user_prompt,
161
+ temperature=temperature,
162
+ logprobs=logprobs,
163
+ top_logprobs=top_logprobs,
164
+ validator=validator,
165
+ # Internal parameters
166
+ prompt_file="is_question.yaml",
167
+ output_model=OutputModels.BoolOutput,
168
+ resp_format="parse",
169
+ mode=None,
170
+ output_lang=None,
171
+ )
172
+
173
+ def text_to_question(
174
+ self,
175
+ text: str,
176
+ with_analysis: bool = False,
177
+ output_lang: str | None = None,
178
+ user_prompt: str | None = None,
179
+ temperature: float | None = 0.0,
180
+ logprobs: bool = False,
181
+ top_logprobs: int | None = None,
182
+ validator: Callable[[Any], bool] | None = None,
183
+ ) -> OutputModels.ToolOutput:
184
+ """
185
+ Generate a single question from the given text.
186
+
187
+ Returns:
188
+ ToolOutput: Object containing:
189
+ - result (str): The generated question
190
+ - logprobs (list | None): Probability data if logprobs enabled
191
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
192
+ """
193
+ return self.operator.run(
194
+ # User parameters
195
+ text=text,
196
+ with_analysis=with_analysis,
197
+ output_lang=output_lang,
198
+ user_prompt=user_prompt,
199
+ temperature=temperature,
200
+ logprobs=logprobs,
201
+ top_logprobs=top_logprobs,
202
+ validator=validator,
203
+ # Internal parameters
204
+ prompt_file="text_to_question.yaml",
205
+ output_model=OutputModels.StrOutput,
206
+ resp_format="parse",
207
+ mode=None,
208
+ )
209
+
210
+ def merge_questions(
211
+ self,
212
+ text: list[str],
213
+ with_analysis: bool = False,
214
+ output_lang: str | None = None,
215
+ user_prompt: str | None = None,
216
+ temperature: float | None = 0.0,
217
+ logprobs: bool = False,
218
+ top_logprobs: int | None = None,
219
+ mode: Literal["default", "reason"] = "default",
220
+ validator: Callable[[Any], bool] | None = None,
221
+ ) -> OutputModels.ToolOutput:
222
+ """
223
+ Merge multiple questions into a single unified question.
224
+
225
+ Returns:
226
+ ToolOutput: Object containing:
227
+ - result (str): The merged question
228
+ - logprobs (list | None): Probability data if logprobs enabled
229
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
230
+ """
231
+ text = ", ".join(text)
232
+ return self.operator.run(
233
+ # User parameters
234
+ text=text,
235
+ with_analysis=with_analysis,
236
+ output_lang=output_lang,
237
+ user_prompt=user_prompt,
238
+ temperature=temperature,
239
+ logprobs=logprobs,
240
+ top_logprobs=top_logprobs,
241
+ validator=validator,
242
+ # Internal parameters
243
+ prompt_file="merge_questions.yaml",
244
+ output_model=OutputModels.StrOutput,
245
+ resp_format="parse",
246
+ mode=mode,
247
+ )
248
+
249
+ def rewrite(
250
+ self,
251
+ text: str,
252
+ with_analysis: bool = False,
253
+ output_lang: str | None = None,
254
+ user_prompt: str | None = None,
255
+ temperature: float | None = 0.0,
256
+ logprobs: bool = False,
257
+ top_logprobs: int | None = None,
258
+ mode: Literal["positive", "negative", "hard_negative"] = "positive",
259
+ validator: Callable[[Any], bool] | None = None,
260
+ ) -> OutputModels.ToolOutput:
261
+ """
262
+ Rewrite a text with different modes.
263
+
264
+ Returns:
265
+ ToolOutput: Object containing:
266
+ - result (str): The rewritten text
267
+ - logprobs (list | None): Probability data if logprobs enabled
268
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
269
+ """
270
+ return self.operator.run(
271
+ # User parameters
272
+ text=text,
273
+ with_analysis=with_analysis,
274
+ output_lang=output_lang,
275
+ user_prompt=user_prompt,
276
+ temperature=temperature,
277
+ logprobs=logprobs,
278
+ top_logprobs=top_logprobs,
279
+ validator=validator,
280
+ # Internal parameters
281
+ prompt_file="rewrite.yaml",
282
+ output_model=OutputModels.StrOutput,
283
+ resp_format="parse",
284
+ mode=mode,
285
+ )
286
+
287
+ def subject_to_question(
288
+ self,
289
+ text: str,
290
+ number_of_questions: int,
291
+ with_analysis: bool = False,
292
+ output_lang: str | None = None,
293
+ user_prompt: str | None = None,
294
+ temperature: float | None = 0.0,
295
+ logprobs: bool = False,
296
+ top_logprobs: int | None = None,
297
+ validator: Callable[[Any], bool] | None = None,
298
+ ) -> OutputModels.ToolOutput:
299
+ """
300
+ Generate a list of questions about a subject.
301
+
302
+ Returns:
303
+ ToolOutput: Object containing:
304
+ - result (list[str]): List of generated questions
305
+ - logprobs (list | None): Probability data if logprobs enabled
306
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
307
+ """
308
+ return self.operator.run(
309
+ # User parameters
310
+ text=text,
311
+ number_of_questions=number_of_questions,
312
+ with_analysis=with_analysis,
313
+ output_lang=output_lang,
314
+ user_prompt=user_prompt,
315
+ temperature=temperature,
316
+ logprobs=logprobs,
317
+ top_logprobs=top_logprobs,
318
+ validator=validator,
319
+ # Internal parameters
320
+ prompt_file="subject_to_question.yaml",
321
+ output_model=OutputModels.ReasonListStrOutput,
322
+ resp_format="parse",
323
+ mode=None,
324
+ )
325
+
326
+ def summarize(
327
+ self,
328
+ text: str,
329
+ with_analysis: bool = False,
330
+ output_lang: str | None = None,
331
+ user_prompt: str | None = None,
332
+ temperature: float | None = 0.0,
333
+ logprobs: bool = False,
334
+ top_logprobs: int | None = None,
335
+ validator: Callable[[Any], bool] | None = None,
336
+ ) -> OutputModels.ToolOutput:
337
+ """
338
+ Summarize the given subject text.
339
+
340
+ Returns:
341
+ ToolOutput: Object containing:
342
+ - result (str): The summary text
343
+ - logprobs (list | None): Probability data if logprobs enabled
344
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
345
+ """
346
+ return self.operator.run(
347
+ # User parameters
348
+ text=text,
349
+ with_analysis=with_analysis,
350
+ output_lang=output_lang,
351
+ user_prompt=user_prompt,
352
+ temperature=temperature,
353
+ logprobs=logprobs,
354
+ top_logprobs=top_logprobs,
355
+ validator=validator,
356
+ # Internal parameters
357
+ prompt_file="summarize.yaml",
358
+ output_model=OutputModels.StrOutput,
359
+ resp_format="parse",
360
+ mode=None,
361
+ )
362
+
363
+ def translate(
364
+ self,
365
+ text: str,
366
+ target_language: str,
367
+ with_analysis: bool = False,
368
+ user_prompt: str | None = None,
369
+ temperature: float | None = 0.0,
370
+ logprobs: bool = False,
371
+ top_logprobs: int | None = None,
372
+ validator: Callable[[Any], bool] | None = None,
373
+ ) -> OutputModels.ToolOutput:
374
+ """
375
+ Translate text between languages.
376
+
377
+ Returns:
378
+ ToolOutput: Object containing:
379
+ - result (str): The translated text
380
+ - logprobs (list | None): Probability data if logprobs enabled
381
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
382
+ """
383
+ return self.operator.run(
384
+ # User parameters
385
+ text=text,
386
+ target_language=target_language,
387
+ with_analysis=with_analysis,
388
+ user_prompt=user_prompt,
389
+ temperature=temperature,
390
+ logprobs=logprobs,
391
+ top_logprobs=top_logprobs,
392
+ validator=validator,
393
+ # Internal parameters
394
+ prompt_file="translate.yaml",
395
+ output_model=OutputModels.StrOutput,
396
+ resp_format="parse",
397
+ mode=None,
398
+ output_lang=None,
399
+ )
400
+
401
+ def run_custom(
402
+ self,
403
+ prompt: str,
404
+ output_model: Any,
405
+ output_lang: str | None = None,
406
+ temperature: float | None = None,
407
+ logprobs: bool | None = None,
408
+ top_logprobs: int | None = None,
409
+ ) -> OutputModels.ToolOutput:
410
+ """
411
+ Custom tool that can do almost anything!
412
+
413
+ Returns:
414
+ ToolOutput: Object with fields:
415
+ - result (str): The output result
416
+ """
417
+ return self.operator.run(
418
+ # User paramaeters
419
+ text=prompt,
420
+ output_model=output_model,
421
+ output_model_str=output_model.model_json_schema(),
422
+ output_lang=output_lang,
423
+ temperature=temperature,
424
+ logprobs=logprobs,
425
+ top_logprobs=top_logprobs,
426
+ # Internal parameters
427
+ prompt_file="run_custom.yaml",
428
+ resp_format="parse",
429
+ user_prompt=None,
430
+ with_analysis=False,
431
+ mode=None,
432
+ validator=None,
433
+ )