hamtaa-texttools 1.1.1__tar.gz → 1.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hamtaa-texttools might be problematic. Click here for more details.

Files changed (36) hide show
  1. {hamtaa_texttools-1.1.1/hamtaa_texttools.egg-info → hamtaa_texttools-1.1.2}/PKG-INFO +1 -1
  2. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2/hamtaa_texttools.egg-info}/PKG-INFO +1 -1
  3. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/pyproject.toml +1 -1
  4. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/batch/batch_manager.py +0 -1
  5. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/batch/batch_runner.py +0 -1
  6. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/tools/async_the_tool.py +11 -11
  7. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/tools/internals/async_operator.py +21 -8
  8. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/tools/internals/base_operator.py +5 -8
  9. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/tools/internals/operator.py +22 -9
  10. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/tools/internals/prompt_loader.py +3 -0
  11. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/tools/the_tool.py +11 -11
  12. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/LICENSE +0 -0
  13. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/MANIFEST.in +0 -0
  14. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/README.md +0 -0
  15. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/hamtaa_texttools.egg-info/SOURCES.txt +0 -0
  16. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/hamtaa_texttools.egg-info/dependency_links.txt +0 -0
  17. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/hamtaa_texttools.egg-info/requires.txt +0 -0
  18. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/hamtaa_texttools.egg-info/top_level.txt +0 -0
  19. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/setup.cfg +0 -0
  20. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/__init__.py +0 -0
  21. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/batch/__init__.py +0 -0
  22. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/README.md +0 -0
  23. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/categorizer.yaml +0 -0
  24. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/extract_entities.yaml +0 -0
  25. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/extract_keywords.yaml +0 -0
  26. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/is_question.yaml +0 -0
  27. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/merge_questions.yaml +0 -0
  28. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/rewrite.yaml +0 -0
  29. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/run_custom.yaml +0 -0
  30. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/subject_to_question.yaml +0 -0
  31. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/summarize.yaml +0 -0
  32. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/text_to_question.yaml +0 -0
  33. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/prompts/translate.yaml +0 -0
  34. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/tools/__init__.py +0 -0
  35. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/tools/internals/formatters.py +0 -0
  36. {hamtaa_texttools-1.1.1 → hamtaa_texttools-1.1.2}/texttools/tools/internals/output_models.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hamtaa-texttools
3
- Version: 1.1.1
3
+ Version: 1.1.2
4
4
  Summary: A high-level NLP toolkit built on top of modern LLMs.
5
5
  Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
6
  License: MIT License
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hamtaa-texttools
3
- Version: 1.1.1
3
+ Version: 1.1.2
4
4
  Summary: A high-level NLP toolkit built on top of modern LLMs.
5
5
  Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
6
  License: MIT License
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "hamtaa-texttools"
7
- version = "1.1.1"
7
+ version = "1.1.2"
8
8
  authors = [
9
9
  { name = "Tohidi", email = "the.mohammad.tohidi@gmail.com" },
10
10
  { name = "Montazer", email = "montazerh82@gmail.com" },
@@ -8,7 +8,6 @@ from pydantic import BaseModel
8
8
  from openai import OpenAI
9
9
  from openai.lib._pydantic import to_strict_json_schema
10
10
 
11
- # Configure logger
12
11
  logger = logging.getLogger("batch_runner")
13
12
  logger.setLevel(logging.INFO)
14
13
 
@@ -12,7 +12,6 @@ from pydantic import BaseModel
12
12
 
13
13
  from texttools.batch import SimpleBatchManager
14
14
 
15
- # Configure logger
16
15
  logger = logging.getLogger("batch_runner")
17
16
  logger.setLevel(logging.INFO)
18
17
 
@@ -34,7 +34,7 @@ class AsyncTheTool:
34
34
  temperature: float | None = 0.0,
35
35
  logprobs: bool = False,
36
36
  top_logprobs: int | None = None,
37
- ) -> dict[str, str]:
37
+ ) -> OutputModels.ToolOutput:
38
38
  """
39
39
  Categorize a text into a single Islamic studies domain category.
40
40
 
@@ -69,7 +69,7 @@ class AsyncTheTool:
69
69
  temperature: float | None = 0.0,
70
70
  logprobs: bool = False,
71
71
  top_logprobs: int | None = None,
72
- ) -> dict[str, list[str]]:
72
+ ) -> OutputModels.ToolOutput:
73
73
  """
74
74
  Extract salient keywords from text.
75
75
 
@@ -104,7 +104,7 @@ class AsyncTheTool:
104
104
  temperature: float | None = 0.0,
105
105
  logprobs: bool = False,
106
106
  top_logprobs: int | None = None,
107
- ) -> dict[str, list[dict[str, str]]]:
107
+ ) -> OutputModels.ToolOutput:
108
108
  """
109
109
  Perform Named Entity Recognition (NER) over the input text.
110
110
 
@@ -138,7 +138,7 @@ class AsyncTheTool:
138
138
  temperature: float | None = 0.0,
139
139
  logprobs: bool = False,
140
140
  top_logprobs: int | None = None,
141
- ) -> dict[str, bool]:
141
+ ) -> OutputModels.ToolOutput:
142
142
  """
143
143
  Detect if the input is phrased as a question.
144
144
 
@@ -173,7 +173,7 @@ class AsyncTheTool:
173
173
  temperature: float | None = 0.0,
174
174
  logprobs: bool = False,
175
175
  top_logprobs: int | None = None,
176
- ) -> dict[str, str]:
176
+ ) -> OutputModels.ToolOutput:
177
177
  """
178
178
  Generate a single question from the given text.
179
179
 
@@ -209,7 +209,7 @@ class AsyncTheTool:
209
209
  logprobs: bool = False,
210
210
  top_logprobs: int | None = None,
211
211
  mode: Literal["default", "reason"] = "default",
212
- ) -> dict[str, str]:
212
+ ) -> OutputModels.ToolOutput:
213
213
  """
214
214
  Merge multiple questions into a single unified question.
215
215
 
@@ -246,7 +246,7 @@ class AsyncTheTool:
246
246
  logprobs: bool = False,
247
247
  top_logprobs: int | None = None,
248
248
  mode: Literal["positive", "negative", "hard_negative"] = "positive",
249
- ) -> dict[str, str]:
249
+ ) -> OutputModels.ToolOutput:
250
250
  """
251
251
  Rewrite a text with different modes.
252
252
 
@@ -282,7 +282,7 @@ class AsyncTheTool:
282
282
  temperature: float | None = 0.0,
283
283
  logprobs: bool = False,
284
284
  top_logprobs: int | None = None,
285
- ) -> dict[str, list[str]]:
285
+ ) -> OutputModels.ToolOutput:
286
286
  """
287
287
  Generate a list of questions about a subject.
288
288
 
@@ -318,7 +318,7 @@ class AsyncTheTool:
318
318
  temperature: float | None = 0.0,
319
319
  logprobs: bool = False,
320
320
  top_logprobs: int | None = None,
321
- ) -> dict[str, str]:
321
+ ) -> OutputModels.ToolOutput:
322
322
  """
323
323
  Summarize the given subject text.
324
324
 
@@ -353,7 +353,7 @@ class AsyncTheTool:
353
353
  temperature: float | None = 0.0,
354
354
  logprobs: bool = False,
355
355
  top_logprobs: int | None = None,
356
- ) -> dict[str, str]:
356
+ ) -> OutputModels.ToolOutput:
357
357
  """
358
358
  Translate text between languages.
359
359
 
@@ -388,7 +388,7 @@ class AsyncTheTool:
388
388
  temperature: float | None = None,
389
389
  logprobs: bool | None = None,
390
390
  top_logprobs: int | None = None,
391
- ) -> dict[str, Any]:
391
+ ) -> OutputModels.ToolOutput:
392
392
  """
393
393
  Custom tool that can do almost anything!
394
394
 
@@ -12,7 +12,6 @@ from texttools.tools.internals.prompt_loader import PromptLoader
12
12
  # Base Model type for output models
13
13
  T = TypeVar("T", bound=BaseModel)
14
14
 
15
- # Configure logger
16
15
  logger = logging.getLogger("async_operator")
17
16
  logger.setLevel(logging.INFO)
18
17
 
@@ -32,6 +31,10 @@ class AsyncOperator(BaseOperator):
32
31
  self.model = model
33
32
 
34
33
  async def _analyze(self, prompt_configs: dict[str, str], temperature: float) -> str:
34
+ """
35
+ Calls OpenAI API for analysis using the configured prompt template.
36
+ Returns the analyzed content as a string.
37
+ """
35
38
  analyze_prompt = prompt_configs["analyze_template"]
36
39
  analyze_message = [self._build_user_message(analyze_prompt)]
37
40
  completion = await self.client.chat.completions.create(
@@ -50,6 +53,10 @@ class AsyncOperator(BaseOperator):
50
53
  logprobs: bool = False,
51
54
  top_logprobs: int = 3,
52
55
  ) -> tuple[Type[T], Any]:
56
+ """
57
+ Parses a chat completion using OpenAI's structured output format.
58
+ Returns both the parsed object and the raw completion for logging.
59
+ """
53
60
  request_kwargs = {
54
61
  "model": self.model,
55
62
  "messages": message,
@@ -73,6 +80,10 @@ class AsyncOperator(BaseOperator):
73
80
  logprobs: bool = False,
74
81
  top_logprobs: int = 3,
75
82
  ) -> tuple[Type[T], Any]:
83
+ """
84
+ Generates a completion using vLLM with JSON schema guidance.
85
+ Returns the parsed output model and raw completion.
86
+ """
76
87
  json_schema = output_model.model_json_schema()
77
88
 
78
89
  # Build kwargs dynamically
@@ -110,14 +121,16 @@ class AsyncOperator(BaseOperator):
110
121
  resp_format: Literal["vllm", "parse"],
111
122
  mode: str | None,
112
123
  **extra_kwargs,
113
- ) -> dict[str, Any]:
124
+ ) -> ToolOutput:
114
125
  """
115
126
  Execute the async LLM pipeline with the given input text. (Async)
116
127
  """
117
128
  prompt_loader = PromptLoader()
118
129
  formatter = Formatter()
130
+ output = ToolOutput(result="", analysis="", logprobs=[], errors=[])
119
131
 
120
132
  try:
133
+ # Prompt configs contain two keys: main_template and analyze template, both are string
121
134
  prompt_configs = prompt_loader.load(
122
135
  prompt_file=prompt_file,
123
136
  text=text.strip(),
@@ -159,11 +172,10 @@ class AsyncOperator(BaseOperator):
159
172
 
160
173
  # Ensure output_model has a `result` field
161
174
  if not hasattr(parsed, "result"):
162
- logger.error(
163
- "The provided output_model must define a field named 'result'"
164
- )
165
-
166
- output = ToolOutput(result="", analysis="", logprobs=[], errors=[])
175
+ error = "The provided output_model must define a field named 'result'"
176
+ logger.error(error)
177
+ output.errors.append(error)
178
+ return output
167
179
 
168
180
  output.result = parsed.result
169
181
 
@@ -174,6 +186,7 @@ class AsyncOperator(BaseOperator):
174
186
  output.analysis = analysis
175
187
 
176
188
  return output
189
+
177
190
  except Exception as e:
178
191
  logger.error(f"AsyncTheTool failed: {e}")
179
- return ToolOutput(result="", analysis="", logprobs=[], errors=[str(e)])
192
+ return output.errors.append(str(e))
@@ -10,7 +10,6 @@ from openai import OpenAI, AsyncOpenAI
10
10
  # Base Model type for output models
11
11
  T = TypeVar("T", bound=BaseModel)
12
12
 
13
- # Configure logger
14
13
  logger = logging.getLogger("base_operator")
15
14
  logger.setLevel(logging.INFO)
16
15
 
@@ -40,13 +39,6 @@ class BaseOperator:
40
39
  ) -> Type[T]:
41
40
  """
42
41
  Convert a JSON response string to output model.
43
-
44
- Args:
45
- response_string: The JSON string (may contain code block markers)
46
- output_model: Your Pydantic output model class (e.g., StrOutput, ListStrOutput)
47
-
48
- Returns:
49
- Instance of your output model
50
42
  """
51
43
  # Clean the response string
52
44
  cleaned_json = self._clean_json_response(response_string)
@@ -61,7 +53,12 @@ class BaseOperator:
61
53
  return output_model(**response_dict)
62
54
 
63
55
  def _extract_logprobs(self, completion: dict) -> list[dict[str, Any]]:
56
+ """
57
+ Extracts and filters token probabilities from completion logprobs.
58
+ Skips punctuation and structural tokens, returns cleaned probability data.
59
+ """
64
60
  logprobs_data = []
61
+
65
62
  ignore_pattern = re.compile(r'^(result|[\s\[\]\{\}",:]+)$')
66
63
 
67
64
  for choice in completion.choices:
@@ -12,7 +12,6 @@ from texttools.tools.internals.prompt_loader import PromptLoader
12
12
  # Base Model type for output models
13
13
  T = TypeVar("T", bound=BaseModel)
14
14
 
15
- # Configure logger
16
15
  logger = logging.getLogger("operator")
17
16
  logger.setLevel(logging.INFO)
18
17
 
@@ -32,6 +31,10 @@ class Operator(BaseOperator):
32
31
  self.model = model
33
32
 
34
33
  def _analyze(self, prompt_configs: dict[str, str], temperature: float) -> str:
34
+ """
35
+ Calls OpenAI API for analysis using the configured prompt template.
36
+ Returns the analyzed content as a string.
37
+ """
35
38
  analyze_prompt = prompt_configs["analyze_template"]
36
39
  analyze_message = [self._build_user_message(analyze_prompt)]
37
40
  completion = self.client.chat.completions.create(
@@ -50,6 +53,10 @@ class Operator(BaseOperator):
50
53
  logprobs: bool = False,
51
54
  top_logprobs: int = 3,
52
55
  ) -> tuple[Type[T], Any]:
56
+ """
57
+ Parses a chat completion using OpenAI's structured output format.
58
+ Returns both the parsed object and the raw completion for logging.
59
+ """
53
60
  request_kwargs = {
54
61
  "model": self.model,
55
62
  "messages": message,
@@ -73,6 +80,10 @@ class Operator(BaseOperator):
73
80
  logprobs: bool = False,
74
81
  top_logprobs: int = 3,
75
82
  ) -> tuple[Type[T], Any]:
83
+ """
84
+ Generates a completion using vLLM with JSON schema guidance.
85
+ Returns the parsed output model and raw completion.
86
+ """
76
87
  json_schema = output_model.model_json_schema()
77
88
 
78
89
  # Build kwargs dynamically
@@ -110,14 +121,16 @@ class Operator(BaseOperator):
110
121
  resp_format: Literal["vllm", "parse"],
111
122
  mode: str | None,
112
123
  **extra_kwargs,
113
- ) -> dict[str, Any]:
124
+ ) -> ToolOutput:
114
125
  """
115
126
  Execute the LLM pipeline with the given input text.
116
127
  """
117
128
  prompt_loader = PromptLoader()
118
129
  formatter = Formatter()
130
+ output = ToolOutput(result="", analysis="", logprobs=[], errors=[])
119
131
 
120
132
  try:
133
+ # Prompt configs contain two keys: main_template and analyze template, both are string
121
134
  prompt_configs = prompt_loader.load(
122
135
  prompt_file=prompt_file,
123
136
  text=text.strip(),
@@ -159,11 +172,10 @@ class Operator(BaseOperator):
159
172
 
160
173
  # Ensure output_model has a `result` field
161
174
  if not hasattr(parsed, "result"):
162
- logger.error(
163
- "The provided output_model must define a field named 'result'"
164
- )
165
-
166
- output = ToolOutput(result="", analysis="", logprobs=[], errors=[])
175
+ error = "The provided output_model must define a field named 'result'"
176
+ logger.error(error)
177
+ output.errors.append(error)
178
+ return output
167
179
 
168
180
  output.result = parsed.result
169
181
 
@@ -174,6 +186,7 @@ class Operator(BaseOperator):
174
186
  output.analysis = analysis
175
187
 
176
188
  return output
189
+
177
190
  except Exception as e:
178
- logger.error(f"TheTool failed: {e}")
179
- return ToolOutput(result="", analysis="", logprobs=[], errors=[str(e)])
191
+ logger.error(f"AsyncTheTool failed: {e}")
192
+ return output.errors.append(str(e))
@@ -24,6 +24,9 @@ class PromptLoader:
24
24
  # Use lru_cache to load each file once
25
25
  @lru_cache(maxsize=32)
26
26
  def _load_templates(self, prompt_file: str, mode: str | None) -> dict[str, str]:
27
+ """
28
+ Loads prompt templates from YAML file with optional mode selection.
29
+ """
27
30
  base_dir = Path(__file__).parent.parent.parent / Path("prompts")
28
31
  prompt_path = base_dir / prompt_file
29
32
  data = yaml.safe_load(prompt_path.read_text(encoding="utf-8"))
@@ -32,7 +32,7 @@ class TheTool:
32
32
  temperature: float | None = 0.0,
33
33
  logprobs: bool = False,
34
34
  top_logprobs: int | None = None,
35
- ) -> dict[str, str]:
35
+ ) -> OutputModels.ToolOutput:
36
36
  """
37
37
  Categorize a text into a single Islamic studies domain category.
38
38
 
@@ -67,7 +67,7 @@ class TheTool:
67
67
  temperature: float | None = 0.0,
68
68
  logprobs: bool = False,
69
69
  top_logprobs: int | None = None,
70
- ) -> dict[str, list[str]]:
70
+ ) -> OutputModels.ToolOutput:
71
71
  """
72
72
  Extract salient keywords from text.
73
73
 
@@ -102,7 +102,7 @@ class TheTool:
102
102
  temperature: float | None = 0.0,
103
103
  logprobs: bool = False,
104
104
  top_logprobs: int | None = None,
105
- ) -> dict[str, list[dict[str, str]]]:
105
+ ) -> OutputModels.ToolOutput:
106
106
  """
107
107
  Perform Named Entity Recognition (NER) over the input text.
108
108
 
@@ -136,7 +136,7 @@ class TheTool:
136
136
  temperature: float | None = 0.0,
137
137
  logprobs: bool = False,
138
138
  top_logprobs: int | None = None,
139
- ) -> dict[str, bool]:
139
+ ) -> OutputModels.ToolOutput:
140
140
  """
141
141
  Detect if the input is phrased as a question.
142
142
 
@@ -171,7 +171,7 @@ class TheTool:
171
171
  temperature: float | None = 0.0,
172
172
  logprobs: bool = False,
173
173
  top_logprobs: int | None = None,
174
- ) -> dict[str, str]:
174
+ ) -> OutputModels.ToolOutput:
175
175
  """
176
176
  Generate a single question from the given text.
177
177
 
@@ -207,7 +207,7 @@ class TheTool:
207
207
  logprobs: bool = False,
208
208
  top_logprobs: int | None = None,
209
209
  mode: Literal["default", "reason"] = "default",
210
- ) -> dict[str, str]:
210
+ ) -> OutputModels.ToolOutput:
211
211
  """
212
212
  Merge multiple questions into a single unified question.
213
213
 
@@ -244,7 +244,7 @@ class TheTool:
244
244
  logprobs: bool = False,
245
245
  top_logprobs: int | None = None,
246
246
  mode: Literal["positive", "negative", "hard_negative"] = "positive",
247
- ) -> dict[str, str]:
247
+ ) -> OutputModels.ToolOutput:
248
248
  """
249
249
  Rewrite a text with different modes.
250
250
 
@@ -280,7 +280,7 @@ class TheTool:
280
280
  temperature: float | None = 0.0,
281
281
  logprobs: bool = False,
282
282
  top_logprobs: int | None = None,
283
- ) -> dict[str, list[str]]:
283
+ ) -> OutputModels.ToolOutput:
284
284
  """
285
285
  Generate a list of questions about a subject.
286
286
 
@@ -316,7 +316,7 @@ class TheTool:
316
316
  temperature: float | None = 0.0,
317
317
  logprobs: bool = False,
318
318
  top_logprobs: int | None = None,
319
- ) -> dict[str, str]:
319
+ ) -> OutputModels.ToolOutput:
320
320
  """
321
321
  Summarize the given subject text.
322
322
 
@@ -351,7 +351,7 @@ class TheTool:
351
351
  temperature: float | None = 0.0,
352
352
  logprobs: bool = False,
353
353
  top_logprobs: int | None = None,
354
- ) -> dict[str, str]:
354
+ ) -> OutputModels.ToolOutput:
355
355
  """
356
356
  Translate text between languages.
357
357
 
@@ -386,7 +386,7 @@ class TheTool:
386
386
  temperature: float | None = None,
387
387
  logprobs: bool | None = None,
388
388
  top_logprobs: int | None = None,
389
- ) -> dict[str, Any]:
389
+ ) -> OutputModels.ToolOutput:
390
390
  """
391
391
  Custom tool that can do almost anything!
392
392