hamtaa-texttools 1.0.6__py3-none-any.whl → 1.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hamtaa-texttools
3
- Version: 1.0.6
3
+ Version: 1.0.8
4
4
  Summary: TextTools is a high-level NLP toolkit built on top of modern LLMs.
5
5
  Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
6
  License: MIT License
@@ -42,8 +42,6 @@ It provides both **sync (`TheTool`)** and **async (`AsyncTheTool`)** APIs for ma
42
42
 
43
43
  It provides ready-to-use utilities for **translation, question detection, keyword extraction, categorization, NER extractor, and more** — designed to help you integrate AI-powered text processing into your applications with minimal effort.
44
44
 
45
- **Thread Safety:** All methods in AsyncTheTool are thread-safe, allowing concurrent usage across multiple threads without conflicts.
46
-
47
45
  ---
48
46
 
49
47
  ## ✨ Features
@@ -78,7 +76,11 @@ Note: This doubles token usage per call because it triggers an additional LLM re
78
76
 
79
77
  - **`user_prompt="..."`** → Allows you to inject a custom instruction or prompt into the model alongside the main template. This gives you fine-grained control over how the model interprets or modifies the input text.
80
78
 
81
- All these flags can be used individually or together to tailor the behavior of any tool in **TextTools**.
79
+ - **`temperature=0.0`** Determines how creative the model should respond. Takes a float number from `0.0` to `1.0`.
80
+
81
+ All these parameters can be used individually or together to tailor the behavior of any tool in **TextTools**.
82
+
83
+ **Note:** There might be some tools that don't support some of the parameters above.
82
84
 
83
85
  ---
84
86
 
@@ -104,7 +106,6 @@ pip install -U hamtaa-texttools
104
106
 
105
107
  ```python
106
108
  from openai import OpenAI
107
- from pydantic import BaseModel
108
109
  from texttools import TheTool
109
110
 
110
111
  # Create your OpenAI client
@@ -114,29 +115,19 @@ client = OpenAI(base_url = "your_url", API_KEY = "your_api_key")
114
115
  model = "gpt-4o-mini"
115
116
 
116
117
  # Create an instance of TheTool
117
- # Note: You can give parameters to TheTool so that you don't need to give them to each tool
118
- the_tool = TheTool(client=client, model=model, with_analysis=True, output_lang="English")
118
+ the_tool = TheTool(client=client, model=model)
119
119
 
120
120
  # Example: Question Detection
121
121
  detection = the_tool.is_question("Is this project open source?", logprobs=True, top_logprobs=2)
122
122
  print(detection["result"])
123
123
  print(detection["logprobs"])
124
- # Output: True
124
+ # Output: True \n --logprobs
125
125
 
126
126
  # Example: Translation
127
- # Note: You can overwrite with_analysis if defined at TheTool
128
- print(the_tool.translate("سلام، حالت چطوره؟", target_language="English", with_analysis=False)["result"])
129
- # Output: "Hi! How are you?"
130
-
131
- # Example: Custom Tool
132
- # Note: Output model should only contain result key
133
- # Everything else will be ignored
134
- class Custom(BaseModel):
135
- result: list[list[dict[str, int]]]
136
-
137
- custom_prompt = "Something"
138
- custom_result = the_tool.run_custom(custom_prompt, Custom)
139
- print(custom_result)
127
+ translation = the_tool.translate("سلام، حالت چطوره؟" target_language="English", with_analysis=True)
128
+ print(translation["result"])
129
+ print(translation["analysis"])
130
+ # Output: "Hi! How are you?" \n --analysis
140
131
  ```
141
132
 
142
133
  ---
@@ -149,7 +140,7 @@ from openai import AsyncOpenAI
149
140
  from texttools import AsyncTheTool
150
141
 
151
142
  async def main():
152
- # Create your async OpenAI client
143
+ # Create your AsyncOpenAI client
153
144
  async_client = AsyncOpenAI(base_url="your_url", api_key="your_api_key")
154
145
 
155
146
  # Specify the model
@@ -0,0 +1,30 @@
1
+ hamtaa_texttools-1.0.8.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
2
+ texttools/__init__.py,sha256=v3tQCH_Cjj47fCpuhK6sKSVAqEjNkc-cZbY4OJa4IZw,202
3
+ texttools/batch/__init__.py,sha256=q50JsQsmQGp_8RW0KNasYeYWVV0R4FUNZ-ujXwEJemY,143
4
+ texttools/batch/batch_manager.py,sha256=leVIFkR-3HpDkQi_MK3TgFNnHYsCN-wbS4mTWoPmO3c,8828
5
+ texttools/batch/batch_runner.py,sha256=cgiCYLIBQQC0dBWM8_lVP9c5QLJoAmS2ijMtp0p3U2o,10313
6
+ texttools/prompts/README.md,sha256=rclMaCV1N8gT1KcpZu0-ka0dKGNg2f1CEcRMdQkgQOc,1379
7
+ texttools/prompts/categorizer.yaml,sha256=GMqIIzQFhgnlpkgU1qi3FAD3mD4A2jiWD5TilQ2XnnE,1204
8
+ texttools/prompts/extract_entities.yaml,sha256=KiKjeDpHaeh3JVtZ6q1pa3k4DYucUIU9WnEcRTCA-SE,651
9
+ texttools/prompts/extract_keywords.yaml,sha256=0O7ypL_OsEOxtvlQ2CZjnsv9637DJwAKprZsf9Vo2_s,769
10
+ texttools/prompts/is_question.yaml,sha256=d0-vKRbXWkxvO64ikvxRjEmpAXGpCYIPGhgexvPPjws,471
11
+ texttools/prompts/merge_questions.yaml,sha256=0J85GvTirZB4ELwH3sk8ub_WcqqpYf6PrMKr3djlZeo,1792
12
+ texttools/prompts/rewrite.yaml,sha256=LO7He_IA3MZKz8a-LxH9DHJpOjpYwaYN1pbjp1Y0tFo,5392
13
+ texttools/prompts/run_custom.yaml,sha256=38OkCoVITbuuS9c08UZSP1jZW4WjSmRIi8fR0RAiPu4,108
14
+ texttools/prompts/subject_to_question.yaml,sha256=C7x7rNNm6U_ZG9HOn6zuzYOtvJUZ2skuWbL1-aYdd3E,1147
15
+ texttools/prompts/summarize.yaml,sha256=o6rxGPfWtZd61Duvm8NVvCJqfq73b-wAuMSKR6UYUqY,459
16
+ texttools/prompts/text_to_question.yaml,sha256=UheKYpDn6iyKI8NxunHZtFpNyfCLZZe5cvkuXpurUJY,783
17
+ texttools/prompts/translate.yaml,sha256=mGT2uBCei6uucWqVbs4silk-UV060v3G0jnt0P6sr50,634
18
+ texttools/tools/__init__.py,sha256=hG1I28Q7BJ1Dbs95x6QMKXdsAlC5Eh_tqC-EbAibwiU,114
19
+ texttools/tools/async_the_tool.py,sha256=9VY6ym7SvQqlokt0mwAwnytmu1CUIehDmnAvx74Z78o,12480
20
+ texttools/tools/the_tool.py,sha256=JtQolr6i_6xogtuhX6IhyudFyAsitQy5NfSxUJAS-iA,12246
21
+ texttools/tools/internals/async_operator.py,sha256=GPTHsjldQlmNYXkGCyypc9ENIxjeHUIHWWJ_ltUyNfs,6006
22
+ texttools/tools/internals/base_operator.py,sha256=5tZy6QEGWJI9fnVIYwOecqv8teOFBIpWFOxQav11VRM,2941
23
+ texttools/tools/internals/formatters.py,sha256=tACNLP6PeoqaRpNudVxBaHA25zyWqWYPZQuYysIu88g,941
24
+ texttools/tools/internals/operator.py,sha256=gl_vzcLueYi6cbxjIHhOCBtyhM6UXwJJ9Mstj8bA-Mg,5896
25
+ texttools/tools/internals/output_models.py,sha256=Rf2x-UuGlmQHrvYIqnD11YuzMH_mPuir62HoMJQa2uk,1528
26
+ texttools/tools/internals/prompt_loader.py,sha256=rbitJD3e8vAdcooP1Yx6KnSI83g28ho-FegfZ1cJ4j4,1979
27
+ hamtaa_texttools-1.0.8.dist-info/METADATA,sha256=9yLqF7h34BREgmpbVxrEH2sXfglyJSNiddewvoeSy1c,7148
28
+ hamtaa_texttools-1.0.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
29
+ hamtaa_texttools-1.0.8.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
30
+ hamtaa_texttools-1.0.8.dist-info/RECORD,,
@@ -1,277 +1,383 @@
1
- from typing import Literal
1
+ from typing import Literal, Any
2
2
 
3
3
  from openai import AsyncOpenAI
4
4
 
5
- import texttools.tools.internals.output_models as OutputModels
6
5
  from texttools.tools.internals.async_operator import AsyncOperator
6
+ import texttools.tools.internals.output_models as OutputModels
7
7
 
8
8
 
9
9
  class AsyncTheTool:
10
10
  """
11
11
  Async counterpart to TheTool.
12
12
 
13
+ Each method configures the async operator with a specific YAML prompt,
14
+ output schema, and flags, then delegates execution to `operator.run()`.
15
+
13
16
  Usage:
14
17
  async_client = AsyncOpenAI(...)
15
- tool = TheToolAsync(async_client, model="gemma-3")
16
- result = await tool.categorize("متن ...", with_analysis=True)
18
+ tool = TheToolAsync(async_client, model="model-name")
19
+ result = await tool.categorize("text ...", with_analysis=True)
17
20
  """
18
21
 
19
22
  def __init__(
20
23
  self,
21
24
  client: AsyncOpenAI,
22
- *,
23
25
  model: str,
24
- temperature: float = 0.0,
25
26
  ):
26
- self.operator = AsyncOperator(
27
- client=client,
28
- model=model,
29
- temperature=temperature,
30
- )
27
+ self.operator = AsyncOperator(client=client, model=model)
31
28
 
32
29
  async def categorize(
33
30
  self,
34
31
  text: str,
35
32
  with_analysis: bool = False,
36
- user_prompt: str = "",
33
+ user_prompt: str | None = None,
34
+ temperature: float | None = 0.0,
37
35
  logprobs: bool = False,
38
- top_logprobs: int = 8,
39
- max_tokens: int | None = None,
36
+ top_logprobs: int | None = None,
40
37
  ) -> dict[str, str]:
41
- results = await self.operator.run(
42
- text,
43
- prompt_file="categorizer.yaml",
44
- output_model=OutputModels.CategorizerOutput,
38
+ """
39
+ Categorize a text into a single Islamic studies domain category.
40
+
41
+ Returns:
42
+ {"result": <category string>} + ("logprobs" and "analysis" if enabled)
43
+ """
44
+ return await self.operator.run(
45
+ # User parameters
46
+ text=text,
45
47
  with_analysis=with_analysis,
46
- resp_format="parse",
47
48
  user_prompt=user_prompt,
49
+ temperature=temperature,
48
50
  logprobs=logprobs,
49
51
  top_logprobs=top_logprobs,
50
- max_tokens=max_tokens,
52
+ # Internal parameters
53
+ prompt_file="categorizer.yaml",
54
+ output_model=OutputModels.CategorizerOutput,
55
+ resp_format="parse",
56
+ mode=None,
57
+ output_lang=None,
51
58
  )
52
- return results
53
59
 
54
60
  async def extract_keywords(
55
61
  self,
56
62
  text: str,
57
- output_lang: str | None = None,
58
63
  with_analysis: bool = False,
59
- user_prompt: str = "",
64
+ output_lang: str | None = None,
65
+ user_prompt: str | None = None,
66
+ temperature: float | None = 0.0,
60
67
  logprobs: bool = False,
61
- top_logprobs: int = 3,
62
- max_tokens: int | None = None,
68
+ top_logprobs: int | None = None,
63
69
  ) -> dict[str, list[str]]:
64
- results = await self.operator.run(
65
- text,
66
- prompt_file="keyword_extractor.yaml",
67
- output_model=OutputModels.ListStrOutput,
70
+ """
71
+ Extract salient keywords from text.
72
+
73
+ Returns:
74
+ {"result": [<keyword1>, <keyword2>, ...]} + ("logprobs" and "analysis" if enabled)
75
+ """
76
+ return await self.operator.run(
77
+ # User parameters
78
+ text=text,
68
79
  with_analysis=with_analysis,
69
- resp_format="parse",
70
- user_prompt=user_prompt,
71
80
  output_lang=output_lang,
81
+ user_prompt=user_prompt,
82
+ temperature=temperature,
72
83
  logprobs=logprobs,
73
84
  top_logprobs=top_logprobs,
74
- max_tokens=max_tokens,
85
+ # Internal parameters
86
+ prompt_file="extract_keywords.yaml",
87
+ output_model=OutputModels.ListStrOutput,
88
+ resp_format="parse",
89
+ mode=None,
75
90
  )
76
- return results
77
91
 
78
92
  async def extract_entities(
79
93
  self,
80
94
  text: str,
81
- output_lang: str | None = None,
82
95
  with_analysis: bool = False,
83
- user_prompt: str = "",
96
+ output_lang: str | None = None,
97
+ user_prompt: str | None = None,
98
+ temperature: float | None = 0.0,
84
99
  logprobs: bool = False,
85
- top_logprobs: int = 3,
86
- max_tokens: int | None = None,
100
+ top_logprobs: int | None = None,
87
101
  ) -> dict[str, list[dict[str, str]]]:
88
- results = await self.operator.run(
89
- text,
90
- prompt_file="ner_extractor.yaml",
91
- output_model=OutputModels.ListDictStrStrOutput,
102
+ """
103
+ Perform Named Entity Recognition (NER) over the input text.
104
+
105
+ Returns:
106
+ {"result": [{"text": <entity>, "type": <entity_type>}, ...]} + ("logprobs" and "analysis" if enabled)
107
+ """
108
+ return await self.operator.run(
109
+ # User parameters
110
+ text=text,
92
111
  with_analysis=with_analysis,
93
- resp_format="parse",
94
- user_prompt=user_prompt,
95
112
  output_lang=output_lang,
113
+ user_prompt=user_prompt,
114
+ temperature=temperature,
96
115
  logprobs=logprobs,
97
116
  top_logprobs=top_logprobs,
98
- max_tokens=max_tokens,
117
+ # Internal parameters
118
+ prompt_file="extract_entities.yaml",
119
+ output_model=OutputModels.ListDictStrStrOutput,
120
+ resp_format="parse",
121
+ mode=None,
99
122
  )
100
- return results
101
123
 
102
124
  async def is_question(
103
125
  self,
104
- question: str,
105
- output_lang: str | None = None,
126
+ text: str,
106
127
  with_analysis: bool = False,
107
- user_prompt: str = "",
128
+ user_prompt: str | None = None,
129
+ temperature: float | None = 0.0,
108
130
  logprobs: bool = False,
109
- top_logprobs: int = 2,
110
- max_tokens: int | None = None,
131
+ top_logprobs: int | None = None,
111
132
  ) -> dict[str, bool]:
112
- results = await self.operator.run(
113
- question,
114
- prompt_file="is_question.yaml",
115
- output_model=OutputModels.BoolOutput,
133
+ """
134
+ Detect if the input is phrased as a question.
135
+
136
+ Returns:
137
+ {"result": True} or {"result": False} + ("logprobs" and "analysis" if enabled)
138
+ """
139
+ return await self.operator.run(
140
+ # User parameters
141
+ text=text,
116
142
  with_analysis=with_analysis,
117
- resp_format="parse",
118
143
  user_prompt=user_prompt,
119
- output_lang=output_lang,
144
+ temperature=temperature,
120
145
  logprobs=logprobs,
121
146
  top_logprobs=top_logprobs,
122
- max_tokens=max_tokens,
147
+ # Internal parameters
148
+ prompt_file="is_question.yaml",
149
+ output_model=OutputModels.BoolOutput,
150
+ resp_format="parse",
151
+ mode=None,
152
+ output_lang=None,
123
153
  )
124
- return results
125
154
 
126
155
  async def text_to_question(
127
156
  self,
128
157
  text: str,
129
- output_lang: str | None = None,
130
158
  with_analysis: bool = False,
131
- user_prompt: str = "",
159
+ output_lang: str | None = None,
160
+ user_prompt: str | None = None,
161
+ temperature: float | None = 0.0,
132
162
  logprobs: bool = False,
133
- top_logprobs: int = 3,
134
- max_tokens: int | None = None,
163
+ top_logprobs: int | None = None,
135
164
  ) -> dict[str, str]:
136
- results = await self.operator.run(
137
- text,
138
- prompt_file="text_to_question.yaml",
139
- output_model=OutputModels.StrOutput,
165
+ """
166
+ Generate a single question from the given text.
167
+
168
+ Returns:
169
+ {"result": <generated_question>} + ("logprobs" and "analysis" if enabled)
170
+ """
171
+ return await self.operator.run(
172
+ # User parameters
173
+ text=text,
140
174
  with_analysis=with_analysis,
141
- resp_format="parse",
142
- user_prompt=user_prompt,
143
175
  output_lang=output_lang,
176
+ user_prompt=user_prompt,
177
+ temperature=temperature,
144
178
  logprobs=logprobs,
145
179
  top_logprobs=top_logprobs,
146
- max_tokens=max_tokens,
180
+ # Internal parameters
181
+ prompt_file="text_to_question.yaml",
182
+ output_model=OutputModels.StrOutput,
183
+ resp_format="parse",
184
+ mode=None,
147
185
  )
148
- return results
149
186
 
150
187
  async def merge_questions(
151
188
  self,
152
- questions: list[str],
153
- output_lang: str | None = None,
154
- mode: Literal["default", "reason"] = "default",
189
+ text: list[str],
155
190
  with_analysis: bool = False,
156
- user_prompt: str = "",
191
+ output_lang: str | None = None,
192
+ user_prompt: str | None = None,
193
+ temperature: float | None = 0.0,
157
194
  logprobs: bool = False,
158
- top_logprobs: int = 3,
159
- max_tokens: int | None = None,
195
+ top_logprobs: int | None = None,
196
+ mode: Literal["default", "reason"] = "default",
160
197
  ) -> dict[str, str]:
161
- question_str = ", ".join(questions)
162
- results = await self.operator.run(
163
- question_str,
164
- prompt_file="question_merger.yaml",
165
- output_model=OutputModels.StrOutput,
198
+ """
199
+ Merge multiple questions into a single unified question.
200
+
201
+ Returns:
202
+ {"result": <merged_question>} + ("logprobs" and "analysis" if enabled)
203
+ """
204
+ text = ", ".join(text)
205
+ return await self.operator.run(
206
+ # User parameters
207
+ text=text,
166
208
  with_analysis=with_analysis,
167
- use_modes=True,
168
- mode=mode,
169
- resp_format="parse",
170
- user_prompt=user_prompt,
171
209
  output_lang=output_lang,
210
+ user_prompt=user_prompt,
211
+ temperature=temperature,
172
212
  logprobs=logprobs,
173
213
  top_logprobs=top_logprobs,
174
- max_tokens=max_tokens,
214
+ # Internal parameters
215
+ prompt_file="merge_questions.yaml",
216
+ output_model=OutputModels.StrOutput,
217
+ resp_format="parse",
218
+ mode=mode,
175
219
  )
176
- return results
177
220
 
178
221
  async def rewrite(
179
222
  self,
180
- question: str,
181
- output_lang: str | None = None,
182
- mode: Literal["positive", "negative", "hard_negative"] = "positive",
223
+ text: str,
183
224
  with_analysis: bool = False,
184
- user_prompt: str = "",
225
+ output_lang: str | None = None,
226
+ user_prompt: str | None = None,
227
+ temperature: float | None = 0.0,
185
228
  logprobs: bool = False,
186
- top_logprobs: int = 3,
187
- max_tokens: int | None = None,
229
+ top_logprobs: int | None = None,
230
+ mode: Literal["positive", "negative", "hard_negative"] = "positive",
188
231
  ) -> dict[str, str]:
189
- results = await self.operator.run(
190
- question,
191
- prompt_file="rewriter.yaml",
192
- output_model=OutputModels.StrOutput,
232
+ """
233
+ Rewrite a text with different modes.
234
+
235
+ Returns:
236
+ {"result": <rewritten_text>} + ("logprobs" and "analysis" if enabled)
237
+ """
238
+ return await self.operator.run(
239
+ # User parameters
240
+ text=text,
193
241
  with_analysis=with_analysis,
194
- use_modes=True,
195
- mode=mode,
196
- resp_format="parse",
197
- user_prompt=user_prompt,
198
242
  output_lang=output_lang,
243
+ user_prompt=user_prompt,
244
+ temperature=temperature,
199
245
  logprobs=logprobs,
200
246
  top_logprobs=top_logprobs,
201
- max_tokens=max_tokens,
247
+ # Internal parameters
248
+ prompt_file="rewrite.yaml",
249
+ output_model=OutputModels.StrOutput,
250
+ resp_format="parse",
251
+ mode=mode,
202
252
  )
203
- return results
204
253
 
205
254
  async def subject_to_question(
206
255
  self,
207
- subject: str,
256
+ text: str,
208
257
  number_of_questions: int,
209
- output_lang: str | None = None,
210
258
  with_analysis: bool = False,
211
- user_prompt: str = "",
259
+ output_lang: str | None = None,
260
+ user_prompt: str | None = None,
261
+ temperature: float | None = 0.0,
212
262
  logprobs: bool = False,
213
- top_logprobs: int = 3,
214
- max_tokens: int | None = None,
263
+ top_logprobs: int | None = None,
215
264
  ) -> dict[str, list[str]]:
216
- results = await self.operator.run(
217
- subject,
218
- prompt_file="subject_to_question.yaml",
219
- output_model=OutputModels.ReasonListStrOutput,
220
- with_analysis=with_analysis,
221
- resp_format="parse",
222
- user_prompt=user_prompt,
265
+ """
266
+ Generate a list of questions about a subject.
267
+
268
+ Returns:
269
+ {"result": [<question1>, <question2>, ...]} + ("logprobs" and "analysis" if enabled)
270
+ """
271
+ return await self.operator.run(
272
+ # User parameters
273
+ text=text,
223
274
  number_of_questions=number_of_questions,
275
+ with_analysis=with_analysis,
224
276
  output_lang=output_lang,
277
+ user_prompt=user_prompt,
278
+ temperature=temperature,
225
279
  logprobs=logprobs,
226
280
  top_logprobs=top_logprobs,
227
- max_tokens=max_tokens,
281
+ # Internal parameters
282
+ prompt_file="subject_to_question.yaml",
283
+ output_model=OutputModels.ReasonListStrOutput,
284
+ resp_format="parse",
285
+ mode=None,
228
286
  )
229
- return results
230
287
 
231
288
  async def summarize(
232
289
  self,
233
290
  text: str,
234
- output_lang: str | None = None,
235
291
  with_analysis: bool = False,
236
- user_prompt: str = "",
292
+ output_lang: str | None = None,
293
+ user_prompt: str | None = None,
294
+ temperature: float | None = 0.0,
237
295
  logprobs: bool = False,
238
- top_logprobs: int = 3,
239
- max_tokens: int | None = None,
296
+ top_logprobs: int | None = None,
240
297
  ) -> dict[str, str]:
241
- results = await self.operator.run(
242
- text,
243
- prompt_file="summarizer.yaml",
244
- output_model=OutputModels.StrOutput,
298
+ """
299
+ Summarize the given subject text.
300
+
301
+ Returns:
302
+ {"result": <summary>} + ("logprobs" and "analysis" if enabled)
303
+ """
304
+ return await self.operator.run(
305
+ # User parameters
306
+ text=text,
245
307
  with_analysis=with_analysis,
246
- resp_format="parse",
247
- user_prompt=user_prompt,
248
308
  output_lang=output_lang,
309
+ user_prompt=user_prompt,
310
+ temperature=temperature,
249
311
  logprobs=logprobs,
250
312
  top_logprobs=top_logprobs,
251
- max_tokens=max_tokens,
313
+ # Internal parameters
314
+ prompt_file="summarize.yaml",
315
+ output_model=OutputModels.StrOutput,
316
+ resp_format="parse",
317
+ mode=None,
252
318
  )
253
- return results
254
319
 
255
320
  async def translate(
256
321
  self,
257
322
  text: str,
258
323
  target_language: str,
259
324
  with_analysis: bool = False,
260
- user_prompt: str = "",
325
+ user_prompt: str | None = None,
326
+ temperature: float | None = 0.0,
261
327
  logprobs: bool = False,
262
- top_logprobs: int = 3,
263
- max_tokens: int | None = None,
328
+ top_logprobs: int | None = None,
264
329
  ) -> dict[str, str]:
265
- results = await self.operator.run(
266
- text,
267
- prompt_file="translator.yaml",
268
- output_model=OutputModels.StrOutput,
330
+ """
331
+ Translate text between languages.
332
+
333
+ Returns:
334
+ {"result": <translated_text>} + ("logprobs" and "analysis" if enabled)
335
+ """
336
+ return await self.operator.run(
337
+ # User parameters
338
+ text=text,
339
+ target_language=target_language,
269
340
  with_analysis=with_analysis,
270
- resp_format="parse",
271
341
  user_prompt=user_prompt,
272
- target_language=target_language,
342
+ temperature=temperature,
343
+ logprobs=logprobs,
344
+ top_logprobs=top_logprobs,
345
+ # Internal parameters
346
+ prompt_file="translate.yaml",
347
+ output_model=OutputModels.StrOutput,
348
+ resp_format="parse",
349
+ mode=None,
350
+ output_lang=None,
351
+ )
352
+
353
+ async def run_custom(
354
+ self,
355
+ prompt: str,
356
+ output_model: Any,
357
+ output_lang: str | None = None,
358
+ temperature: float | None = None,
359
+ logprobs: bool | None = None,
360
+ top_logprobs: int | None = None,
361
+ ) -> dict[str, Any]:
362
+ """
363
+ Custom tool that can do almost anything!
364
+
365
+ Returns:
366
+ {"result": <Any>}
367
+ """
368
+ return await self.operator.run(
369
+ # User paramaeters
370
+ text=prompt,
371
+ output_model=output_model,
372
+ output_model_str=output_model.model_json_schema(),
373
+ output_lang=output_lang,
374
+ temperature=temperature,
273
375
  logprobs=logprobs,
274
376
  top_logprobs=top_logprobs,
275
- max_tokens=max_tokens,
377
+ # Internal parameters
378
+ prompt_file="run_custom.yaml",
379
+ resp_format="parse",
380
+ user_prompt=None,
381
+ with_analysis=False,
382
+ mode=None,
276
383
  )
277
- return results