hamtaa-texttools 1.0.9__tar.gz → 1.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hamtaa-texttools might be problematic. Click here for more details.

Files changed (36) hide show
  1. {hamtaa_texttools-1.0.9/hamtaa_texttools.egg-info → hamtaa_texttools-1.1.1}/PKG-INFO +9 -9
  2. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/README.md +7 -7
  3. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1/hamtaa_texttools.egg-info}/PKG-INFO +9 -9
  4. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/hamtaa_texttools.egg-info/requires.txt +1 -1
  5. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/pyproject.toml +32 -32
  6. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/tools/async_the_tool.py +42 -11
  7. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/tools/internals/async_operator.py +10 -8
  8. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/tools/internals/operator.py +8 -6
  9. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/tools/internals/output_models.py +8 -1
  10. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/tools/the_tool.py +42 -11
  11. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/LICENSE +0 -0
  12. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/MANIFEST.in +0 -0
  13. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/hamtaa_texttools.egg-info/SOURCES.txt +0 -0
  14. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/hamtaa_texttools.egg-info/dependency_links.txt +0 -0
  15. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/hamtaa_texttools.egg-info/top_level.txt +0 -0
  16. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/setup.cfg +0 -0
  17. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/__init__.py +0 -0
  18. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/batch/__init__.py +0 -0
  19. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/batch/batch_manager.py +0 -0
  20. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/batch/batch_runner.py +0 -0
  21. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/README.md +0 -0
  22. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/categorizer.yaml +0 -0
  23. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/extract_entities.yaml +0 -0
  24. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/extract_keywords.yaml +0 -0
  25. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/is_question.yaml +0 -0
  26. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/merge_questions.yaml +0 -0
  27. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/rewrite.yaml +0 -0
  28. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/run_custom.yaml +0 -0
  29. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/subject_to_question.yaml +0 -0
  30. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/summarize.yaml +0 -0
  31. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/text_to_question.yaml +0 -0
  32. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/prompts/translate.yaml +0 -0
  33. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/tools/__init__.py +0 -0
  34. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/tools/internals/base_operator.py +0 -0
  35. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/tools/internals/formatters.py +0 -0
  36. {hamtaa_texttools-1.0.9 → hamtaa_texttools-1.1.1}/texttools/tools/internals/prompt_loader.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hamtaa-texttools
3
- Version: 1.0.9
3
+ Version: 1.1.1
4
4
  Summary: A high-level NLP toolkit built on top of modern LLMs.
5
5
  Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
6
  License: MIT License
@@ -29,7 +29,7 @@ Requires-Python: >=3.8
29
29
  Description-Content-Type: text/markdown
30
30
  License-File: LICENSE
31
31
  Requires-Dist: openai==1.97.1
32
- Requires-Dist: PyYAML>=6.0
32
+ Requires-Dist: pyyaml>=6.0
33
33
  Dynamic: license-file
34
34
 
35
35
  # TextTools
@@ -63,7 +63,7 @@ Each tool is designed to work out-of-the-box with structured outputs (JSON / Pyd
63
63
 
64
64
  ---
65
65
 
66
- ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, and `user_prompt` parameters
66
+ ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt` and `temperature` parameters
67
67
 
68
68
  TextTools provides several optional flags to customize LLM behavior:
69
69
 
@@ -119,14 +119,14 @@ the_tool = TheTool(client=client, model=model)
119
119
 
120
120
  # Example: Question Detection
121
121
  detection = the_tool.is_question("Is this project open source?", logprobs=True, top_logprobs=2)
122
- print(detection["result"])
123
- print(detection["logprobs"])
122
+ print(detection.result)
123
+ print(detection.logprobs)
124
124
  # Output: True \n --logprobs
125
125
 
126
126
  # Example: Translation
127
127
  translation = the_tool.translate("سلام، حالت چطوره؟" target_language="English", with_analysis=True)
128
- print(translation["result"])
129
- print(translation["analysis"])
128
+ print(translation.result)
129
+ print(translation.analysis)
130
130
  # Output: "Hi! How are you?" \n --analysis
131
131
  ```
132
132
 
@@ -150,8 +150,8 @@ async def main():
150
150
  the_tool = AsyncTheTool(client=async_client, model=model)
151
151
 
152
152
  # Example: Async Translation
153
- result = await the_tool.translate("سلام، حالت چطوره؟", target_language="English")
154
- print(result["result"])
153
+ translation = await the_tool.translate("سلام، حالت چطوره؟", target_language="English")
154
+ print(translation.result)
155
155
  # Output: "Hi! How are you?"
156
156
 
157
157
  asyncio.run(main())
@@ -29,7 +29,7 @@ Each tool is designed to work out-of-the-box with structured outputs (JSON / Pyd
29
29
 
30
30
  ---
31
31
 
32
- ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, and `user_prompt` parameters
32
+ ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt` and `temperature` parameters
33
33
 
34
34
  TextTools provides several optional flags to customize LLM behavior:
35
35
 
@@ -85,14 +85,14 @@ the_tool = TheTool(client=client, model=model)
85
85
 
86
86
  # Example: Question Detection
87
87
  detection = the_tool.is_question("Is this project open source?", logprobs=True, top_logprobs=2)
88
- print(detection["result"])
89
- print(detection["logprobs"])
88
+ print(detection.result)
89
+ print(detection.logprobs)
90
90
  # Output: True \n --logprobs
91
91
 
92
92
  # Example: Translation
93
93
  translation = the_tool.translate("سلام، حالت چطوره؟" target_language="English", with_analysis=True)
94
- print(translation["result"])
95
- print(translation["analysis"])
94
+ print(translation.result)
95
+ print(translation.analysis)
96
96
  # Output: "Hi! How are you?" \n --analysis
97
97
  ```
98
98
 
@@ -116,8 +116,8 @@ async def main():
116
116
  the_tool = AsyncTheTool(client=async_client, model=model)
117
117
 
118
118
  # Example: Async Translation
119
- result = await the_tool.translate("سلام، حالت چطوره؟", target_language="English")
120
- print(result["result"])
119
+ translation = await the_tool.translate("سلام، حالت چطوره؟", target_language="English")
120
+ print(translation.result)
121
121
  # Output: "Hi! How are you?"
122
122
 
123
123
  asyncio.run(main())
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hamtaa-texttools
3
- Version: 1.0.9
3
+ Version: 1.1.1
4
4
  Summary: A high-level NLP toolkit built on top of modern LLMs.
5
5
  Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
6
  License: MIT License
@@ -29,7 +29,7 @@ Requires-Python: >=3.8
29
29
  Description-Content-Type: text/markdown
30
30
  License-File: LICENSE
31
31
  Requires-Dist: openai==1.97.1
32
- Requires-Dist: PyYAML>=6.0
32
+ Requires-Dist: pyyaml>=6.0
33
33
  Dynamic: license-file
34
34
 
35
35
  # TextTools
@@ -63,7 +63,7 @@ Each tool is designed to work out-of-the-box with structured outputs (JSON / Pyd
63
63
 
64
64
  ---
65
65
 
66
- ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, and `user_prompt` parameters
66
+ ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, `user_prompt` and `temperature` parameters
67
67
 
68
68
  TextTools provides several optional flags to customize LLM behavior:
69
69
 
@@ -119,14 +119,14 @@ the_tool = TheTool(client=client, model=model)
119
119
 
120
120
  # Example: Question Detection
121
121
  detection = the_tool.is_question("Is this project open source?", logprobs=True, top_logprobs=2)
122
- print(detection["result"])
123
- print(detection["logprobs"])
122
+ print(detection.result)
123
+ print(detection.logprobs)
124
124
  # Output: True \n --logprobs
125
125
 
126
126
  # Example: Translation
127
127
  translation = the_tool.translate("سلام، حالت چطوره؟" target_language="English", with_analysis=True)
128
- print(translation["result"])
129
- print(translation["analysis"])
128
+ print(translation.result)
129
+ print(translation.analysis)
130
130
  # Output: "Hi! How are you?" \n --analysis
131
131
  ```
132
132
 
@@ -150,8 +150,8 @@ async def main():
150
150
  the_tool = AsyncTheTool(client=async_client, model=model)
151
151
 
152
152
  # Example: Async Translation
153
- result = await the_tool.translate("سلام، حالت چطوره؟", target_language="English")
154
- print(result["result"])
153
+ translation = await the_tool.translate("سلام، حالت چطوره؟", target_language="English")
154
+ print(translation.result)
155
155
  # Output: "Hi! How are you?"
156
156
 
157
157
  asyncio.run(main())
@@ -1,2 +1,2 @@
1
1
  openai==1.97.1
2
- PyYAML>=6.0
2
+ pyyaml>=6.0
@@ -1,32 +1,32 @@
1
- [build-system]
2
- requires = ["setuptools>=61.0", "wheel"]
3
- build-backend = "setuptools.build_meta"
4
-
5
- [project]
6
- name = "hamtaa-texttools"
7
- version = "1.0.9"
8
- authors = [
9
- { name = "Tohidi", email = "the.mohammad.tohidi@gmail.com" },
10
- { name = "Montazer", email = "montazerh82@gmail.com" },
11
- { name = "Givechi", email = "mohamad.m.givechi@gmail.com" },
12
- { name = "MoosaviNejad", email = "erfanmoosavi84@gmail.com" },
13
- ]
14
- description = "A high-level NLP toolkit built on top of modern LLMs."
15
- readme = "README.md"
16
- license = {file = "LICENSE"}
17
- requires-python = ">=3.8"
18
- dependencies = [
19
- "openai==1.97.1",
20
- "PyYAML>=6.0",
21
- ]
22
- keywords = ["nlp", "llm", "text-processing", "openai"]
23
-
24
- [tool.setuptools.packages.find]
25
- where = ["."]
26
- include = ["texttools*"]
27
-
28
- [tool.setuptools]
29
- include-package-data = true
30
-
31
- [tool.setuptools.package-data]
32
- "texttools" = ["prompts/*.yaml", "prompts/*.yml"]
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "hamtaa-texttools"
7
+ version = "1.1.1"
8
+ authors = [
9
+ { name = "Tohidi", email = "the.mohammad.tohidi@gmail.com" },
10
+ { name = "Montazer", email = "montazerh82@gmail.com" },
11
+ { name = "Givechi", email = "mohamad.m.givechi@gmail.com" },
12
+ { name = "MoosaviNejad", email = "erfanmoosavi84@gmail.com" },
13
+ ]
14
+ description = "A high-level NLP toolkit built on top of modern LLMs."
15
+ readme = "README.md"
16
+ license = {file = "LICENSE"}
17
+ requires-python = ">=3.8"
18
+ dependencies = [
19
+ "openai==1.97.1",
20
+ "pyyaml>=6.0",
21
+ ]
22
+ keywords = ["nlp", "llm", "text-processing", "openai"]
23
+
24
+ [tool.setuptools.packages.find]
25
+ where = ["."]
26
+ include = ["texttools*"]
27
+
28
+ [tool.setuptools]
29
+ include-package-data = true
30
+
31
+ [tool.setuptools.package-data]
32
+ "texttools" = ["prompts/*.yaml", "prompts/*.yml"]
@@ -39,7 +39,10 @@ class AsyncTheTool:
39
39
  Categorize a text into a single Islamic studies domain category.
40
40
 
41
41
  Returns:
42
- {"result": <category string>} + ("logprobs" and "analysis" if enabled)
42
+ ToolOutput: Object containing:
43
+ - result (str): The assigned Islamic studies category
44
+ - logprobs (list | None): Probability data if logprobs enabled
45
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
43
46
  """
44
47
  return await self.operator.run(
45
48
  # User parameters
@@ -71,7 +74,10 @@ class AsyncTheTool:
71
74
  Extract salient keywords from text.
72
75
 
73
76
  Returns:
74
- {"result": [<keyword1>, <keyword2>, ...]} + ("logprobs" and "analysis" if enabled)
77
+ ToolOutput: Object containing:
78
+ - result (list[str]): List of extracted keywords
79
+ - logprobs (list | None): Probability data if logprobs enabled
80
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
75
81
  """
76
82
  return await self.operator.run(
77
83
  # User parameters
@@ -103,7 +109,10 @@ class AsyncTheTool:
103
109
  Perform Named Entity Recognition (NER) over the input text.
104
110
 
105
111
  Returns:
106
- {"result": [{"text": <entity>, "type": <entity_type>}, ...]} + ("logprobs" and "analysis" if enabled)
112
+ ToolOutput: Object containing:
113
+ - result (list[dict]): List of entities with 'text' and 'type' keys
114
+ - logprobs (list | None): Probability data if logprobs enabled
115
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
107
116
  """
108
117
  return await self.operator.run(
109
118
  # User parameters
@@ -134,7 +143,10 @@ class AsyncTheTool:
134
143
  Detect if the input is phrased as a question.
135
144
 
136
145
  Returns:
137
- {"result": True} or {"result": False} + ("logprobs" and "analysis" if enabled)
146
+ ToolOutput: Object containing:
147
+ - result (bool): True if text is a question, False otherwise
148
+ - logprobs (list | None): Probability data if logprobs enabled
149
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
138
150
  """
139
151
  return await self.operator.run(
140
152
  # User parameters
@@ -166,7 +178,10 @@ class AsyncTheTool:
166
178
  Generate a single question from the given text.
167
179
 
168
180
  Returns:
169
- {"result": <generated_question>} + ("logprobs" and "analysis" if enabled)
181
+ ToolOutput: Object containing:
182
+ - result (str): The generated question
183
+ - logprobs (list | None): Probability data if logprobs enabled
184
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
170
185
  """
171
186
  return await self.operator.run(
172
187
  # User parameters
@@ -199,7 +214,10 @@ class AsyncTheTool:
199
214
  Merge multiple questions into a single unified question.
200
215
 
201
216
  Returns:
202
- {"result": <merged_question>} + ("logprobs" and "analysis" if enabled)
217
+ ToolOutput: Object containing:
218
+ - result (str): The merged question
219
+ - logprobs (list | None): Probability data if logprobs enabled
220
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
203
221
  """
204
222
  text = ", ".join(text)
205
223
  return await self.operator.run(
@@ -233,7 +251,10 @@ class AsyncTheTool:
233
251
  Rewrite a text with different modes.
234
252
 
235
253
  Returns:
236
- {"result": <rewritten_text>} + ("logprobs" and "analysis" if enabled)
254
+ ToolOutput: Object containing:
255
+ - result (str): The rewritten text
256
+ - logprobs (list | None): Probability data if logprobs enabled
257
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
237
258
  """
238
259
  return await self.operator.run(
239
260
  # User parameters
@@ -266,7 +287,10 @@ class AsyncTheTool:
266
287
  Generate a list of questions about a subject.
267
288
 
268
289
  Returns:
269
- {"result": [<question1>, <question2>, ...]} + ("logprobs" and "analysis" if enabled)
290
+ ToolOutput: Object containing:
291
+ - result (list[str]): List of generated questions
292
+ - logprobs (list | None): Probability data if logprobs enabled
293
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
270
294
  """
271
295
  return await self.operator.run(
272
296
  # User parameters
@@ -299,7 +323,10 @@ class AsyncTheTool:
299
323
  Summarize the given subject text.
300
324
 
301
325
  Returns:
302
- {"result": <summary>} + ("logprobs" and "analysis" if enabled)
326
+ ToolOutput: Object containing:
327
+ - result (str): The summary text
328
+ - logprobs (list | None): Probability data if logprobs enabled
329
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
303
330
  """
304
331
  return await self.operator.run(
305
332
  # User parameters
@@ -331,7 +358,10 @@ class AsyncTheTool:
331
358
  Translate text between languages.
332
359
 
333
360
  Returns:
334
- {"result": <translated_text>} + ("logprobs" and "analysis" if enabled)
361
+ ToolOutput: Object containing:
362
+ - result (str): The translated text
363
+ - logprobs (list | None): Probability data if logprobs enabled
364
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
335
365
  """
336
366
  return await self.operator.run(
337
367
  # User parameters
@@ -363,7 +393,8 @@ class AsyncTheTool:
363
393
  Custom tool that can do almost anything!
364
394
 
365
395
  Returns:
366
- {"result": <Any>}
396
+ ToolOutput: Object with fields:
397
+ - result (str): The output result
367
398
  """
368
399
  return await self.operator.run(
369
400
  # User paramaeters
@@ -4,6 +4,7 @@ import logging
4
4
  from openai import AsyncOpenAI
5
5
  from pydantic import BaseModel
6
6
 
7
+ from texttools.tools.internals.output_models import ToolOutput
7
8
  from texttools.tools.internals.base_operator import BaseOperator
8
9
  from texttools.tools.internals.formatters import Formatter
9
10
  from texttools.tools.internals.prompt_loader import PromptLoader
@@ -152,7 +153,7 @@ class AsyncOperator(BaseOperator):
152
153
  messages, output_model, temperature, logprobs, top_logprobs
153
154
  )
154
155
  elif resp_format == "parse":
155
- parsed, completion = await self._vllm_completion(
156
+ parsed, completion = await self._parse_completion(
156
157
  messages, output_model, temperature, logprobs, top_logprobs
157
158
  )
158
159
 
@@ -162,16 +163,17 @@ class AsyncOperator(BaseOperator):
162
163
  "The provided output_model must define a field named 'result'"
163
164
  )
164
165
 
165
- results = {"result": parsed.result}
166
+ output = ToolOutput(result="", analysis="", logprobs=[], errors=[])
167
+
168
+ output.result = parsed.result
166
169
 
167
170
  if logprobs:
168
- results["logprobs"] = self._extract_logprobs(completion)
171
+ output.logprobs = self._extract_logprobs(completion)
169
172
 
170
173
  if with_analysis:
171
- results["analysis"] = analysis
172
-
173
- return results
174
+ output.analysis = analysis
174
175
 
176
+ return output
175
177
  except Exception as e:
176
- logger.error(f"Async TheTool failed: {e}")
177
- return {"error": str(e), "result": ""}
178
+ logger.error(f"AsyncTheTool failed: {e}")
179
+ return ToolOutput(result="", analysis="", logprobs=[], errors=[str(e)])
@@ -4,6 +4,7 @@ import logging
4
4
  from openai import OpenAI
5
5
  from pydantic import BaseModel
6
6
 
7
+ from texttools.tools.internals.output_models import ToolOutput
7
8
  from texttools.tools.internals.base_operator import BaseOperator
8
9
  from texttools.tools.internals.formatters import Formatter
9
10
  from texttools.tools.internals.prompt_loader import PromptLoader
@@ -162,16 +163,17 @@ class Operator(BaseOperator):
162
163
  "The provided output_model must define a field named 'result'"
163
164
  )
164
165
 
165
- result = {"result": parsed.result}
166
+ output = ToolOutput(result="", analysis="", logprobs=[], errors=[])
167
+
168
+ output.result = parsed.result
166
169
 
167
170
  if logprobs:
168
- result["logprobs"] = self._extract_logprobs(completion)
171
+ output.logprobs = self._extract_logprobs(completion)
169
172
 
170
173
  if with_analysis:
171
- result["analysis"] = analysis
172
-
173
- return result
174
+ output.analysis = analysis
174
175
 
176
+ return output
175
177
  except Exception as e:
176
178
  logger.error(f"TheTool failed: {e}")
177
- return {"error": str(e), "result": ""}
179
+ return ToolOutput(result="", analysis="", logprobs=[], errors=[str(e)])
@@ -1,8 +1,15 @@
1
- from typing import Literal
1
+ from typing import Literal, Any
2
2
 
3
3
  from pydantic import BaseModel, Field
4
4
 
5
5
 
6
+ class ToolOutput(BaseModel):
7
+ result: str
8
+ analysis: str
9
+ logprobs: list[dict[str, Any]]
10
+ errors: list[str]
11
+
12
+
6
13
  class StrOutput(BaseModel):
7
14
  result: str = Field(..., description="The output string")
8
15
 
@@ -37,7 +37,10 @@ class TheTool:
37
37
  Categorize a text into a single Islamic studies domain category.
38
38
 
39
39
  Returns:
40
- {"result": <category string>} + ("logprobs" and "analysis" if enabled)
40
+ ToolOutput: Object containing:
41
+ - result (str): The assigned Islamic studies category
42
+ - logprobs (list | None): Probability data if logprobs enabled
43
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
41
44
  """
42
45
  return self.operator.run(
43
46
  # User parameters
@@ -69,7 +72,10 @@ class TheTool:
69
72
  Extract salient keywords from text.
70
73
 
71
74
  Returns:
72
- {"result": [<keyword1>, <keyword2>, ...]} + ("logprobs" and "analysis" if enabled)
75
+ ToolOutput: Object containing:
76
+ - result (list[str]): List of extracted keywords
77
+ - logprobs (list | None): Probability data if logprobs enabled
78
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
73
79
  """
74
80
  return self.operator.run(
75
81
  # User parameters
@@ -101,7 +107,10 @@ class TheTool:
101
107
  Perform Named Entity Recognition (NER) over the input text.
102
108
 
103
109
  Returns:
104
- {"result": [{"text": <entity>, "type": <entity_type>}, ...]} + ("logprobs" and "analysis" if enabled)
110
+ ToolOutput: Object containing:
111
+ - result (list[dict]): List of entities with 'text' and 'type' keys
112
+ - logprobs (list | None): Probability data if logprobs enabled
113
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
105
114
  """
106
115
  return self.operator.run(
107
116
  # User parameters
@@ -132,7 +141,10 @@ class TheTool:
132
141
  Detect if the input is phrased as a question.
133
142
 
134
143
  Returns:
135
- {"result": True} or {"result": False} + ("logprobs" and "analysis" if enabled)
144
+ ToolOutput: Object containing:
145
+ - result (bool): True if text is a question, False otherwise
146
+ - logprobs (list | None): Probability data if logprobs enabled
147
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
136
148
  """
137
149
  return self.operator.run(
138
150
  # User parameters
@@ -164,7 +176,10 @@ class TheTool:
164
176
  Generate a single question from the given text.
165
177
 
166
178
  Returns:
167
- {"result": <generated_question>} + ("logprobs" and "analysis" if enabled)
179
+ ToolOutput: Object containing:
180
+ - result (str): The generated question
181
+ - logprobs (list | None): Probability data if logprobs enabled
182
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
168
183
  """
169
184
  return self.operator.run(
170
185
  # User parameters
@@ -197,7 +212,10 @@ class TheTool:
197
212
  Merge multiple questions into a single unified question.
198
213
 
199
214
  Returns:
200
- {"result": <merged_question>} + ("logprobs" and "analysis" if enabled)
215
+ ToolOutput: Object containing:
216
+ - result (str): The merged question
217
+ - logprobs (list | None): Probability data if logprobs enabled
218
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
201
219
  """
202
220
  text = ", ".join(text)
203
221
  return self.operator.run(
@@ -231,7 +249,10 @@ class TheTool:
231
249
  Rewrite a text with different modes.
232
250
 
233
251
  Returns:
234
- {"result": <rewritten_text>} + ("logprobs" and "analysis" if enabled)
252
+ ToolOutput: Object containing:
253
+ - result (str): The rewritten text
254
+ - logprobs (list | None): Probability data if logprobs enabled
255
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
235
256
  """
236
257
  return self.operator.run(
237
258
  # User parameters
@@ -264,7 +285,10 @@ class TheTool:
264
285
  Generate a list of questions about a subject.
265
286
 
266
287
  Returns:
267
- {"result": [<question1>, <question2>, ...]} + ("logprobs" and "analysis" if enabled)
288
+ ToolOutput: Object containing:
289
+ - result (list[str]): List of generated questions
290
+ - logprobs (list | None): Probability data if logprobs enabled
291
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
268
292
  """
269
293
  return self.operator.run(
270
294
  # User parameters
@@ -297,7 +321,10 @@ class TheTool:
297
321
  Summarize the given subject text.
298
322
 
299
323
  Returns:
300
- {"result": <summary>} + ("logprobs" and "analysis" if enabled)
324
+ ToolOutput: Object containing:
325
+ - result (str): The summary text
326
+ - logprobs (list | None): Probability data if logprobs enabled
327
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
301
328
  """
302
329
  return self.operator.run(
303
330
  # User parameters
@@ -329,7 +356,10 @@ class TheTool:
329
356
  Translate text between languages.
330
357
 
331
358
  Returns:
332
- {"result": <translated_text>} + ("logprobs" and "analysis" if enabled)
359
+ ToolOutput: Object containing:
360
+ - result (str): The translated text
361
+ - logprobs (list | None): Probability data if logprobs enabled
362
+ - analysis (str | None): Detailed reasoning if with_analysis enabled
333
363
  """
334
364
  return self.operator.run(
335
365
  # User parameters
@@ -361,7 +391,8 @@ class TheTool:
361
391
  Custom tool that can do almost anything!
362
392
 
363
393
  Returns:
364
- {"result": <Any>}
394
+ ToolOutput: Object with fields:
395
+ - result (str): The output result
365
396
  """
366
397
  return self.operator.run(
367
398
  # User paramaeters