hamtaa-texttools 1.1.1__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. hamtaa_texttools-1.2.0.dist-info/METADATA +212 -0
  2. hamtaa_texttools-1.2.0.dist-info/RECORD +34 -0
  3. texttools/__init__.py +6 -8
  4. texttools/batch/__init__.py +0 -4
  5. texttools/batch/config.py +40 -0
  6. texttools/batch/{batch_manager.py → manager.py} +41 -42
  7. texttools/batch/runner.py +228 -0
  8. texttools/core/__init__.py +0 -0
  9. texttools/core/engine.py +254 -0
  10. texttools/core/exceptions.py +22 -0
  11. texttools/core/internal_models.py +58 -0
  12. texttools/core/operators/async_operator.py +194 -0
  13. texttools/core/operators/sync_operator.py +192 -0
  14. texttools/models.py +88 -0
  15. texttools/prompts/categorize.yaml +36 -0
  16. texttools/prompts/check_fact.yaml +24 -0
  17. texttools/prompts/extract_entities.yaml +7 -3
  18. texttools/prompts/extract_keywords.yaml +80 -18
  19. texttools/prompts/is_question.yaml +6 -2
  20. texttools/prompts/merge_questions.yaml +12 -5
  21. texttools/prompts/propositionize.yaml +24 -0
  22. texttools/prompts/rewrite.yaml +9 -10
  23. texttools/prompts/run_custom.yaml +2 -2
  24. texttools/prompts/subject_to_question.yaml +7 -3
  25. texttools/prompts/summarize.yaml +6 -2
  26. texttools/prompts/text_to_question.yaml +12 -6
  27. texttools/prompts/translate.yaml +7 -2
  28. texttools/py.typed +0 -0
  29. texttools/tools/__init__.py +0 -4
  30. texttools/tools/async_tools.py +1093 -0
  31. texttools/tools/sync_tools.py +1092 -0
  32. hamtaa_texttools-1.1.1.dist-info/METADATA +0 -183
  33. hamtaa_texttools-1.1.1.dist-info/RECORD +0 -30
  34. texttools/batch/batch_runner.py +0 -263
  35. texttools/prompts/README.md +0 -35
  36. texttools/prompts/categorizer.yaml +0 -28
  37. texttools/tools/async_the_tool.py +0 -414
  38. texttools/tools/internals/async_operator.py +0 -179
  39. texttools/tools/internals/base_operator.py +0 -91
  40. texttools/tools/internals/formatters.py +0 -24
  41. texttools/tools/internals/operator.py +0 -179
  42. texttools/tools/internals/output_models.py +0 -59
  43. texttools/tools/internals/prompt_loader.py +0 -57
  44. texttools/tools/the_tool.py +0 -412
  45. {hamtaa_texttools-1.1.1.dist-info → hamtaa_texttools-1.2.0.dist-info}/WHEEL +0 -0
  46. {hamtaa_texttools-1.1.1.dist-info → hamtaa_texttools-1.2.0.dist-info}/licenses/LICENSE +0 -0
  47. {hamtaa_texttools-1.1.1.dist-info → hamtaa_texttools-1.2.0.dist-info}/top_level.txt +0 -0
@@ -1,179 +0,0 @@
1
- from typing import Any, TypeVar, Type, Literal
2
- import logging
3
-
4
- from openai import OpenAI
5
- from pydantic import BaseModel
6
-
7
- from texttools.tools.internals.output_models import ToolOutput
8
- from texttools.tools.internals.base_operator import BaseOperator
9
- from texttools.tools.internals.formatters import Formatter
10
- from texttools.tools.internals.prompt_loader import PromptLoader
11
-
12
- # Base Model type for output models
13
- T = TypeVar("T", bound=BaseModel)
14
-
15
- # Configure logger
16
- logger = logging.getLogger("operator")
17
- logger.setLevel(logging.INFO)
18
-
19
-
20
- class Operator(BaseOperator):
21
- """
22
- Core engine for running text-processing operations with an LLM (Sync).
23
-
24
- It wires together:
25
- - `PromptLoader` → loads YAML prompt templates.
26
- - `UserMergeFormatter` → applies formatting to messages (e.g., merging).
27
- - OpenAI client → executes completions/parsed completions.
28
- """
29
-
30
- def __init__(self, client: OpenAI, model: str):
31
- self.client = client
32
- self.model = model
33
-
34
- def _analyze(self, prompt_configs: dict[str, str], temperature: float) -> str:
35
- analyze_prompt = prompt_configs["analyze_template"]
36
- analyze_message = [self._build_user_message(analyze_prompt)]
37
- completion = self.client.chat.completions.create(
38
- model=self.model,
39
- messages=analyze_message,
40
- temperature=temperature,
41
- )
42
- analysis = completion.choices[0].message.content.strip()
43
- return analysis
44
-
45
- def _parse_completion(
46
- self,
47
- message: list[dict[str, str]],
48
- output_model: Type[T],
49
- temperature: float,
50
- logprobs: bool = False,
51
- top_logprobs: int = 3,
52
- ) -> tuple[Type[T], Any]:
53
- request_kwargs = {
54
- "model": self.model,
55
- "messages": message,
56
- "response_format": output_model,
57
- "temperature": temperature,
58
- }
59
-
60
- if logprobs:
61
- request_kwargs["logprobs"] = True
62
- request_kwargs["top_logprobs"] = top_logprobs
63
-
64
- completion = self.client.beta.chat.completions.parse(**request_kwargs)
65
- parsed = completion.choices[0].message.parsed
66
- return parsed, completion
67
-
68
- def _vllm_completion(
69
- self,
70
- message: list[dict[str, str]],
71
- output_model: Type[T],
72
- temperature: float,
73
- logprobs: bool = False,
74
- top_logprobs: int = 3,
75
- ) -> tuple[Type[T], Any]:
76
- json_schema = output_model.model_json_schema()
77
-
78
- # Build kwargs dynamically
79
- request_kwargs = {
80
- "model": self.model,
81
- "messages": message,
82
- "extra_body": {"guided_json": json_schema},
83
- "temperature": temperature,
84
- }
85
-
86
- if logprobs:
87
- request_kwargs["logprobs"] = True
88
- request_kwargs["top_logprobs"] = top_logprobs
89
-
90
- completion = self.client.chat.completions.create(**request_kwargs)
91
- response = completion.choices[0].message.content
92
-
93
- # Convert the string response to output model
94
- parsed = self._convert_to_output_model(response, output_model)
95
- return parsed, completion
96
-
97
- def run(
98
- self,
99
- # User parameters
100
- text: str,
101
- with_analysis: bool,
102
- output_lang: str | None,
103
- user_prompt: str | None,
104
- temperature: float,
105
- logprobs: bool,
106
- top_logprobs: int | None,
107
- # Internal parameters
108
- prompt_file: str,
109
- output_model: Type[T],
110
- resp_format: Literal["vllm", "parse"],
111
- mode: str | None,
112
- **extra_kwargs,
113
- ) -> dict[str, Any]:
114
- """
115
- Execute the LLM pipeline with the given input text.
116
- """
117
- prompt_loader = PromptLoader()
118
- formatter = Formatter()
119
-
120
- try:
121
- prompt_configs = prompt_loader.load(
122
- prompt_file=prompt_file,
123
- text=text.strip(),
124
- mode=mode,
125
- **extra_kwargs,
126
- )
127
-
128
- messages: list[dict[str, str]] = []
129
-
130
- if with_analysis:
131
- analysis = self._analyze(prompt_configs, temperature)
132
- messages.append(
133
- self._build_user_message(f"Based on this analysis: {analysis}")
134
- )
135
-
136
- if output_lang:
137
- messages.append(
138
- self._build_user_message(
139
- f"Respond only in the {output_lang} language."
140
- )
141
- )
142
-
143
- if user_prompt:
144
- messages.append(
145
- self._build_user_message(f"Consider this instruction {user_prompt}")
146
- )
147
-
148
- messages.append(self._build_user_message(prompt_configs["main_template"]))
149
- messages = formatter.user_merge_format(messages)
150
-
151
- if resp_format == "vllm":
152
- parsed, completion = self._vllm_completion(
153
- messages, output_model, temperature, logprobs, top_logprobs
154
- )
155
- elif resp_format == "parse":
156
- parsed, completion = self._parse_completion(
157
- messages, output_model, temperature, logprobs, top_logprobs
158
- )
159
-
160
- # Ensure output_model has a `result` field
161
- if not hasattr(parsed, "result"):
162
- logger.error(
163
- "The provided output_model must define a field named 'result'"
164
- )
165
-
166
- output = ToolOutput(result="", analysis="", logprobs=[], errors=[])
167
-
168
- output.result = parsed.result
169
-
170
- if logprobs:
171
- output.logprobs = self._extract_logprobs(completion)
172
-
173
- if with_analysis:
174
- output.analysis = analysis
175
-
176
- return output
177
- except Exception as e:
178
- logger.error(f"TheTool failed: {e}")
179
- return ToolOutput(result="", analysis="", logprobs=[], errors=[str(e)])
@@ -1,59 +0,0 @@
1
- from typing import Literal, Any
2
-
3
- from pydantic import BaseModel, Field
4
-
5
-
6
- class ToolOutput(BaseModel):
7
- result: str
8
- analysis: str
9
- logprobs: list[dict[str, Any]]
10
- errors: list[str]
11
-
12
-
13
- class StrOutput(BaseModel):
14
- result: str = Field(..., description="The output string")
15
-
16
-
17
- class BoolOutput(BaseModel):
18
- result: bool = Field(
19
- ..., description="Boolean indicating the output state", example=True
20
- )
21
-
22
-
23
- class ListStrOutput(BaseModel):
24
- result: list[str] = Field(
25
- ..., description="The output list of strings", example=["text_1", "text_2"]
26
- )
27
-
28
-
29
- class ListDictStrStrOutput(BaseModel):
30
- result: list[dict[str, str]] = Field(
31
- ...,
32
- description="List of dictionaries containing string key-value pairs",
33
- example=[{"text": "Mohammad", "type": "PER"}],
34
- )
35
-
36
-
37
- class ReasonListStrOutput(BaseModel):
38
- reason: str = Field(..., description="Thinking process that led to the output")
39
- result: list[str] = Field(..., description="The output list of strings")
40
-
41
-
42
- class CategorizerOutput(BaseModel):
43
- reason: str = Field(
44
- ..., description="Explanation of why the input belongs to the category"
45
- )
46
- result: Literal[
47
- "باورهای دینی",
48
- "اخلاق اسلامی",
49
- "احکام و فقه",
50
- "تاریخ اسلام و شخصیت ها",
51
- "منابع دینی",
52
- "دین و جامعه/سیاست",
53
- "عرفان و معنویت",
54
- "هیچکدام",
55
- ] = Field(
56
- ...,
57
- description="Predicted category label",
58
- example="اخلاق اسلامی",
59
- )
@@ -1,57 +0,0 @@
1
- from functools import lru_cache
2
- from pathlib import Path
3
- import yaml
4
-
5
-
6
- class PromptLoader:
7
- """
8
- Utility for loading and formatting YAML prompt templates.
9
-
10
- Responsibilities:
11
- - Load and parse YAML prompt definitions.
12
- - Select the right template (by mode, if applicable).
13
- - Inject variables (`{input}`, plus any extra kwargs) into the templates.
14
- - Return a dict with:
15
- {
16
- "main_template": "...",
17
- "analyze_template": "..." | None
18
- }
19
- """
20
-
21
- MAIN_TEMPLATE: str = "main_template"
22
- ANALYZE_TEMPLATE: str = "analyze_template"
23
-
24
- # Use lru_cache to load each file once
25
- @lru_cache(maxsize=32)
26
- def _load_templates(self, prompt_file: str, mode: str | None) -> dict[str, str]:
27
- base_dir = Path(__file__).parent.parent.parent / Path("prompts")
28
- prompt_path = base_dir / prompt_file
29
- data = yaml.safe_load(prompt_path.read_text(encoding="utf-8"))
30
-
31
- return {
32
- self.MAIN_TEMPLATE: data[self.MAIN_TEMPLATE][mode]
33
- if mode
34
- else data[self.MAIN_TEMPLATE],
35
- self.ANALYZE_TEMPLATE: data.get(self.ANALYZE_TEMPLATE)[mode]
36
- if mode
37
- else data.get(self.ANALYZE_TEMPLATE),
38
- }
39
-
40
- def _build_format_args(self, text: str, **extra_kwargs) -> dict[str, str]:
41
- # Base formatting args
42
- format_args = {"input": text}
43
- # Merge extras
44
- format_args.update(extra_kwargs)
45
- return format_args
46
-
47
- def load(
48
- self, prompt_file: str, text: str, mode: str, **extra_kwargs
49
- ) -> dict[str, str]:
50
- template_configs = self._load_templates(prompt_file, mode)
51
- format_args = self._build_format_args(text, **extra_kwargs)
52
-
53
- # Inject variables inside each template
54
- for key in template_configs.keys():
55
- template_configs[key] = template_configs[key].format(**format_args)
56
-
57
- return template_configs
@@ -1,412 +0,0 @@
1
- from typing import Literal, Any
2
-
3
- from openai import OpenAI
4
-
5
- from texttools.tools.internals.operator import Operator
6
- import texttools.tools.internals.output_models as OutputModels
7
-
8
-
9
- class TheTool:
10
- """
11
- Each method configures the operator with a specific YAML prompt,
12
- output schema, and flags, then delegates execution to `operator.run()`.
13
-
14
- Usage:
15
- client = OpenAI(...)
16
- tool = TheTool(client, model="model-name")
17
- result = tool.categorize("text ...", with_analysis=True)
18
- """
19
-
20
- def __init__(
21
- self,
22
- client: OpenAI,
23
- model: str,
24
- ):
25
- self.operator = Operator(client=client, model=model)
26
-
27
- def categorize(
28
- self,
29
- text: str,
30
- with_analysis: bool = False,
31
- user_prompt: str | None = None,
32
- temperature: float | None = 0.0,
33
- logprobs: bool = False,
34
- top_logprobs: int | None = None,
35
- ) -> dict[str, str]:
36
- """
37
- Categorize a text into a single Islamic studies domain category.
38
-
39
- Returns:
40
- ToolOutput: Object containing:
41
- - result (str): The assigned Islamic studies category
42
- - logprobs (list | None): Probability data if logprobs enabled
43
- - analysis (str | None): Detailed reasoning if with_analysis enabled
44
- """
45
- return self.operator.run(
46
- # User parameters
47
- text=text,
48
- with_analysis=with_analysis,
49
- user_prompt=user_prompt,
50
- temperature=temperature,
51
- logprobs=logprobs,
52
- top_logprobs=top_logprobs,
53
- # Internal parameters
54
- prompt_file="categorizer.yaml",
55
- output_model=OutputModels.CategorizerOutput,
56
- resp_format="parse",
57
- mode=None,
58
- output_lang=None,
59
- )
60
-
61
- def extract_keywords(
62
- self,
63
- text: str,
64
- with_analysis: bool = False,
65
- output_lang: str | None = None,
66
- user_prompt: str | None = None,
67
- temperature: float | None = 0.0,
68
- logprobs: bool = False,
69
- top_logprobs: int | None = None,
70
- ) -> dict[str, list[str]]:
71
- """
72
- Extract salient keywords from text.
73
-
74
- Returns:
75
- ToolOutput: Object containing:
76
- - result (list[str]): List of extracted keywords
77
- - logprobs (list | None): Probability data if logprobs enabled
78
- - analysis (str | None): Detailed reasoning if with_analysis enabled
79
- """
80
- return self.operator.run(
81
- # User parameters
82
- text=text,
83
- with_analysis=with_analysis,
84
- output_lang=output_lang,
85
- user_prompt=user_prompt,
86
- temperature=temperature,
87
- logprobs=logprobs,
88
- top_logprobs=top_logprobs,
89
- # Internal parameters
90
- prompt_file="extract_keywords.yaml",
91
- output_model=OutputModels.ListStrOutput,
92
- resp_format="parse",
93
- mode=None,
94
- )
95
-
96
- def extract_entities(
97
- self,
98
- text: str,
99
- with_analysis: bool = False,
100
- output_lang: str | None = None,
101
- user_prompt: str | None = None,
102
- temperature: float | None = 0.0,
103
- logprobs: bool = False,
104
- top_logprobs: int | None = None,
105
- ) -> dict[str, list[dict[str, str]]]:
106
- """
107
- Perform Named Entity Recognition (NER) over the input text.
108
-
109
- Returns:
110
- ToolOutput: Object containing:
111
- - result (list[dict]): List of entities with 'text' and 'type' keys
112
- - logprobs (list | None): Probability data if logprobs enabled
113
- - analysis (str | None): Detailed reasoning if with_analysis enabled
114
- """
115
- return self.operator.run(
116
- # User parameters
117
- text=text,
118
- with_analysis=with_analysis,
119
- output_lang=output_lang,
120
- user_prompt=user_prompt,
121
- temperature=temperature,
122
- logprobs=logprobs,
123
- top_logprobs=top_logprobs,
124
- # Internal parameters
125
- prompt_file="extract_entities.yaml",
126
- output_model=OutputModels.ListDictStrStrOutput,
127
- resp_format="parse",
128
- mode=None,
129
- )
130
-
131
- def is_question(
132
- self,
133
- text: str,
134
- with_analysis: bool = False,
135
- user_prompt: str | None = None,
136
- temperature: float | None = 0.0,
137
- logprobs: bool = False,
138
- top_logprobs: int | None = None,
139
- ) -> dict[str, bool]:
140
- """
141
- Detect if the input is phrased as a question.
142
-
143
- Returns:
144
- ToolOutput: Object containing:
145
- - result (bool): True if text is a question, False otherwise
146
- - logprobs (list | None): Probability data if logprobs enabled
147
- - analysis (str | None): Detailed reasoning if with_analysis enabled
148
- """
149
- return self.operator.run(
150
- # User parameters
151
- text=text,
152
- with_analysis=with_analysis,
153
- user_prompt=user_prompt,
154
- temperature=temperature,
155
- logprobs=logprobs,
156
- top_logprobs=top_logprobs,
157
- # Internal parameters
158
- prompt_file="is_question.yaml",
159
- output_model=OutputModels.BoolOutput,
160
- resp_format="parse",
161
- mode=None,
162
- output_lang=None,
163
- )
164
-
165
- def text_to_question(
166
- self,
167
- text: str,
168
- with_analysis: bool = False,
169
- output_lang: str | None = None,
170
- user_prompt: str | None = None,
171
- temperature: float | None = 0.0,
172
- logprobs: bool = False,
173
- top_logprobs: int | None = None,
174
- ) -> dict[str, str]:
175
- """
176
- Generate a single question from the given text.
177
-
178
- Returns:
179
- ToolOutput: Object containing:
180
- - result (str): The generated question
181
- - logprobs (list | None): Probability data if logprobs enabled
182
- - analysis (str | None): Detailed reasoning if with_analysis enabled
183
- """
184
- return self.operator.run(
185
- # User parameters
186
- text=text,
187
- with_analysis=with_analysis,
188
- output_lang=output_lang,
189
- user_prompt=user_prompt,
190
- temperature=temperature,
191
- logprobs=logprobs,
192
- top_logprobs=top_logprobs,
193
- # Internal parameters
194
- prompt_file="text_to_question.yaml",
195
- output_model=OutputModels.StrOutput,
196
- resp_format="parse",
197
- mode=None,
198
- )
199
-
200
- def merge_questions(
201
- self,
202
- text: list[str],
203
- with_analysis: bool = False,
204
- output_lang: str | None = None,
205
- user_prompt: str | None = None,
206
- temperature: float | None = 0.0,
207
- logprobs: bool = False,
208
- top_logprobs: int | None = None,
209
- mode: Literal["default", "reason"] = "default",
210
- ) -> dict[str, str]:
211
- """
212
- Merge multiple questions into a single unified question.
213
-
214
- Returns:
215
- ToolOutput: Object containing:
216
- - result (str): The merged question
217
- - logprobs (list | None): Probability data if logprobs enabled
218
- - analysis (str | None): Detailed reasoning if with_analysis enabled
219
- """
220
- text = ", ".join(text)
221
- return self.operator.run(
222
- # User parameters
223
- text=text,
224
- with_analysis=with_analysis,
225
- output_lang=output_lang,
226
- user_prompt=user_prompt,
227
- temperature=temperature,
228
- logprobs=logprobs,
229
- top_logprobs=top_logprobs,
230
- # Internal parameters
231
- prompt_file="merge_questions.yaml",
232
- output_model=OutputModels.StrOutput,
233
- resp_format="parse",
234
- mode=mode,
235
- )
236
-
237
- def rewrite(
238
- self,
239
- text: str,
240
- with_analysis: bool = False,
241
- output_lang: str | None = None,
242
- user_prompt: str | None = None,
243
- temperature: float | None = 0.0,
244
- logprobs: bool = False,
245
- top_logprobs: int | None = None,
246
- mode: Literal["positive", "negative", "hard_negative"] = "positive",
247
- ) -> dict[str, str]:
248
- """
249
- Rewrite a text with different modes.
250
-
251
- Returns:
252
- ToolOutput: Object containing:
253
- - result (str): The rewritten text
254
- - logprobs (list | None): Probability data if logprobs enabled
255
- - analysis (str | None): Detailed reasoning if with_analysis enabled
256
- """
257
- return self.operator.run(
258
- # User parameters
259
- text=text,
260
- with_analysis=with_analysis,
261
- output_lang=output_lang,
262
- user_prompt=user_prompt,
263
- temperature=temperature,
264
- logprobs=logprobs,
265
- top_logprobs=top_logprobs,
266
- # Internal parameters
267
- prompt_file="rewrite.yaml",
268
- output_model=OutputModels.StrOutput,
269
- resp_format="parse",
270
- mode=mode,
271
- )
272
-
273
- def subject_to_question(
274
- self,
275
- text: str,
276
- number_of_questions: int,
277
- with_analysis: bool = False,
278
- output_lang: str | None = None,
279
- user_prompt: str | None = None,
280
- temperature: float | None = 0.0,
281
- logprobs: bool = False,
282
- top_logprobs: int | None = None,
283
- ) -> dict[str, list[str]]:
284
- """
285
- Generate a list of questions about a subject.
286
-
287
- Returns:
288
- ToolOutput: Object containing:
289
- - result (list[str]): List of generated questions
290
- - logprobs (list | None): Probability data if logprobs enabled
291
- - analysis (str | None): Detailed reasoning if with_analysis enabled
292
- """
293
- return self.operator.run(
294
- # User parameters
295
- text=text,
296
- number_of_questions=number_of_questions,
297
- with_analysis=with_analysis,
298
- output_lang=output_lang,
299
- user_prompt=user_prompt,
300
- temperature=temperature,
301
- logprobs=logprobs,
302
- top_logprobs=top_logprobs,
303
- # Internal parameters
304
- prompt_file="subject_to_question.yaml",
305
- output_model=OutputModels.ReasonListStrOutput,
306
- resp_format="parse",
307
- mode=None,
308
- )
309
-
310
- def summarize(
311
- self,
312
- text: str,
313
- with_analysis: bool = False,
314
- output_lang: str | None = None,
315
- user_prompt: str | None = None,
316
- temperature: float | None = 0.0,
317
- logprobs: bool = False,
318
- top_logprobs: int | None = None,
319
- ) -> dict[str, str]:
320
- """
321
- Summarize the given subject text.
322
-
323
- Returns:
324
- ToolOutput: Object containing:
325
- - result (str): The summary text
326
- - logprobs (list | None): Probability data if logprobs enabled
327
- - analysis (str | None): Detailed reasoning if with_analysis enabled
328
- """
329
- return self.operator.run(
330
- # User parameters
331
- text=text,
332
- with_analysis=with_analysis,
333
- output_lang=output_lang,
334
- user_prompt=user_prompt,
335
- temperature=temperature,
336
- logprobs=logprobs,
337
- top_logprobs=top_logprobs,
338
- # Internal parameters
339
- prompt_file="summarize.yaml",
340
- output_model=OutputModels.StrOutput,
341
- resp_format="parse",
342
- mode=None,
343
- )
344
-
345
- def translate(
346
- self,
347
- text: str,
348
- target_language: str,
349
- with_analysis: bool = False,
350
- user_prompt: str | None = None,
351
- temperature: float | None = 0.0,
352
- logprobs: bool = False,
353
- top_logprobs: int | None = None,
354
- ) -> dict[str, str]:
355
- """
356
- Translate text between languages.
357
-
358
- Returns:
359
- ToolOutput: Object containing:
360
- - result (str): The translated text
361
- - logprobs (list | None): Probability data if logprobs enabled
362
- - analysis (str | None): Detailed reasoning if with_analysis enabled
363
- """
364
- return self.operator.run(
365
- # User parameters
366
- text=text,
367
- target_language=target_language,
368
- with_analysis=with_analysis,
369
- user_prompt=user_prompt,
370
- temperature=temperature,
371
- logprobs=logprobs,
372
- top_logprobs=top_logprobs,
373
- # Internal parameters
374
- prompt_file="translate.yaml",
375
- output_model=OutputModels.StrOutput,
376
- resp_format="parse",
377
- mode=None,
378
- output_lang=None,
379
- )
380
-
381
- def run_custom(
382
- self,
383
- prompt: str,
384
- output_model: Any,
385
- output_lang: str | None = None,
386
- temperature: float | None = None,
387
- logprobs: bool | None = None,
388
- top_logprobs: int | None = None,
389
- ) -> dict[str, Any]:
390
- """
391
- Custom tool that can do almost anything!
392
-
393
- Returns:
394
- ToolOutput: Object with fields:
395
- - result (str): The output result
396
- """
397
- return self.operator.run(
398
- # User paramaeters
399
- text=prompt,
400
- output_model=output_model,
401
- output_model_str=output_model.model_json_schema(),
402
- output_lang=output_lang,
403
- temperature=temperature,
404
- logprobs=logprobs,
405
- top_logprobs=top_logprobs,
406
- # Internal parameters
407
- prompt_file="run_custom.yaml",
408
- resp_format="parse",
409
- user_prompt=None,
410
- with_analysis=False,
411
- mode=None,
412
- )