hamtaa-texttools 1.1.3__tar.gz → 1.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {hamtaa_texttools-1.1.3/hamtaa_texttools.egg-info → hamtaa_texttools-1.1.4}/PKG-INFO +1 -1
  2. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4/hamtaa_texttools.egg-info}/PKG-INFO +1 -1
  3. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/pyproject.toml +1 -1
  4. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/tools/async_the_tool.py +11 -11
  5. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/tools/internals/async_operator.py +2 -2
  6. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/tools/internals/operator.py +2 -2
  7. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/tools/the_tool.py +11 -11
  8. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/LICENSE +0 -0
  9. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/MANIFEST.in +0 -0
  10. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/README.md +0 -0
  11. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/hamtaa_texttools.egg-info/SOURCES.txt +0 -0
  12. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/hamtaa_texttools.egg-info/dependency_links.txt +0 -0
  13. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/hamtaa_texttools.egg-info/requires.txt +0 -0
  14. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/hamtaa_texttools.egg-info/top_level.txt +0 -0
  15. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/setup.cfg +0 -0
  16. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/__init__.py +0 -0
  17. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/batch/__init__.py +0 -0
  18. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/batch/batch_manager.py +0 -0
  19. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/batch/batch_runner.py +0 -0
  20. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/README.md +0 -0
  21. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/categorizer.yaml +0 -0
  22. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/extract_entities.yaml +0 -0
  23. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/extract_keywords.yaml +0 -0
  24. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/is_question.yaml +0 -0
  25. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/merge_questions.yaml +0 -0
  26. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/rewrite.yaml +0 -0
  27. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/run_custom.yaml +0 -0
  28. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/subject_to_question.yaml +0 -0
  29. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/summarize.yaml +0 -0
  30. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/text_to_question.yaml +0 -0
  31. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/prompts/translate.yaml +0 -0
  32. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/tools/__init__.py +0 -0
  33. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/tools/internals/base_operator.py +0 -0
  34. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/tools/internals/formatters.py +0 -0
  35. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/tools/internals/output_models.py +0 -0
  36. {hamtaa_texttools-1.1.3 → hamtaa_texttools-1.1.4}/texttools/tools/internals/prompt_loader.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hamtaa-texttools
3
- Version: 1.1.3
3
+ Version: 1.1.4
4
4
  Summary: A high-level NLP toolkit built on top of modern LLMs.
5
5
  Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
6
  License: MIT License
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hamtaa-texttools
3
- Version: 1.1.3
3
+ Version: 1.1.4
4
4
  Summary: A high-level NLP toolkit built on top of modern LLMs.
5
5
  Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
6
  License: MIT License
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "hamtaa-texttools"
7
- version = "1.1.3"
7
+ version = "1.1.4"
8
8
  authors = [
9
9
  { name = "Tohidi", email = "the.mohammad.tohidi@gmail.com" },
10
10
  { name = "Montazer", email = "montazerh82@gmail.com" },
@@ -1,4 +1,4 @@
1
- from typing import Literal, Any
1
+ from typing import Literal, Any, Callable
2
2
 
3
3
  from openai import AsyncOpenAI
4
4
 
@@ -34,7 +34,7 @@ class AsyncTheTool:
34
34
  temperature: float | None = 0.0,
35
35
  logprobs: bool = False,
36
36
  top_logprobs: int | None = None,
37
- validator: Any | None = None,
37
+ validator: Callable[[Any], bool] | None = None,
38
38
  ) -> OutputModels.ToolOutput:
39
39
  """
40
40
  Categorize a text into a single Islamic studies domain category.
@@ -71,7 +71,7 @@ class AsyncTheTool:
71
71
  temperature: float | None = 0.0,
72
72
  logprobs: bool = False,
73
73
  top_logprobs: int | None = None,
74
- validator: Any | None = None,
74
+ validator: Callable[[Any], bool] | None = None,
75
75
  ) -> OutputModels.ToolOutput:
76
76
  """
77
77
  Extract salient keywords from text.
@@ -108,7 +108,7 @@ class AsyncTheTool:
108
108
  temperature: float | None = 0.0,
109
109
  logprobs: bool = False,
110
110
  top_logprobs: int | None = None,
111
- validator: Any | None = None,
111
+ validator: Callable[[Any], bool] | None = None,
112
112
  ) -> OutputModels.ToolOutput:
113
113
  """
114
114
  Perform Named Entity Recognition (NER) over the input text.
@@ -144,7 +144,7 @@ class AsyncTheTool:
144
144
  temperature: float | None = 0.0,
145
145
  logprobs: bool = False,
146
146
  top_logprobs: int | None = None,
147
- validator: Any | None = None,
147
+ validator: Callable[[Any], bool] | None = None,
148
148
  ) -> OutputModels.ToolOutput:
149
149
  """
150
150
  Detect if the input is phrased as a question.
@@ -181,7 +181,7 @@ class AsyncTheTool:
181
181
  temperature: float | None = 0.0,
182
182
  logprobs: bool = False,
183
183
  top_logprobs: int | None = None,
184
- validator: Any | None = None,
184
+ validator: Callable[[Any], bool] | None = None,
185
185
  ) -> OutputModels.ToolOutput:
186
186
  """
187
187
  Generate a single question from the given text.
@@ -219,7 +219,7 @@ class AsyncTheTool:
219
219
  logprobs: bool = False,
220
220
  top_logprobs: int | None = None,
221
221
  mode: Literal["default", "reason"] = "default",
222
- validator: Any | None = None,
222
+ validator: Callable[[Any], bool] | None = None,
223
223
  ) -> OutputModels.ToolOutput:
224
224
  """
225
225
  Merge multiple questions into a single unified question.
@@ -258,7 +258,7 @@ class AsyncTheTool:
258
258
  logprobs: bool = False,
259
259
  top_logprobs: int | None = None,
260
260
  mode: Literal["positive", "negative", "hard_negative"] = "positive",
261
- validator: Any | None = None,
261
+ validator: Callable[[Any], bool] | None = None,
262
262
  ) -> OutputModels.ToolOutput:
263
263
  """
264
264
  Rewrite a text with different modes.
@@ -296,7 +296,7 @@ class AsyncTheTool:
296
296
  temperature: float | None = 0.0,
297
297
  logprobs: bool = False,
298
298
  top_logprobs: int | None = None,
299
- validator: Any | None = None,
299
+ validator: Callable[[Any], bool] | None = None,
300
300
  ) -> OutputModels.ToolOutput:
301
301
  """
302
302
  Generate a list of questions about a subject.
@@ -334,7 +334,7 @@ class AsyncTheTool:
334
334
  temperature: float | None = 0.0,
335
335
  logprobs: bool = False,
336
336
  top_logprobs: int | None = None,
337
- validator: Any | None = None,
337
+ validator: Callable[[Any], bool] | None = None,
338
338
  ) -> OutputModels.ToolOutput:
339
339
  """
340
340
  Summarize the given subject text.
@@ -371,7 +371,7 @@ class AsyncTheTool:
371
371
  temperature: float | None = 0.0,
372
372
  logprobs: bool = False,
373
373
  top_logprobs: int | None = None,
374
- validator: Any | None = None,
374
+ validator: Callable[[Any], bool] | None = None,
375
375
  ) -> OutputModels.ToolOutput:
376
376
  """
377
377
  Translate text between languages.
@@ -1,4 +1,4 @@
1
- from typing import Any, TypeVar, Type, Literal
1
+ from typing import Any, TypeVar, Type, Literal, Callable
2
2
  import logging
3
3
 
4
4
  from openai import AsyncOpenAI
@@ -115,7 +115,7 @@ class AsyncOperator(BaseOperator):
115
115
  temperature: float,
116
116
  logprobs: bool,
117
117
  top_logprobs: int | None,
118
- validator: Any | None,
118
+ validator: Callable[[Any], bool] | None,
119
119
  # Internal parameters
120
120
  prompt_file: str,
121
121
  output_model: Type[T],
@@ -1,4 +1,4 @@
1
- from typing import Any, TypeVar, Type, Literal
1
+ from typing import Any, TypeVar, Type, Literal, Callable
2
2
  import logging
3
3
 
4
4
  from openai import OpenAI
@@ -115,7 +115,7 @@ class Operator(BaseOperator):
115
115
  temperature: float,
116
116
  logprobs: bool,
117
117
  top_logprobs: int | None,
118
- validator: Any | None,
118
+ validator: Callable[[Any], bool] | None,
119
119
  # Internal parameters
120
120
  prompt_file: str,
121
121
  output_model: Type[T],
@@ -1,4 +1,4 @@
1
- from typing import Literal, Any
1
+ from typing import Literal, Any, Callable
2
2
 
3
3
  from openai import OpenAI
4
4
 
@@ -32,7 +32,7 @@ class TheTool:
32
32
  temperature: float | None = 0.0,
33
33
  logprobs: bool = False,
34
34
  top_logprobs: int | None = None,
35
- validator: Any | None = None,
35
+ validator: Callable[[Any], bool] | None = None,
36
36
  ) -> OutputModels.ToolOutput:
37
37
  """
38
38
  Categorize a text into a single Islamic studies domain category.
@@ -69,7 +69,7 @@ class TheTool:
69
69
  temperature: float | None = 0.0,
70
70
  logprobs: bool = False,
71
71
  top_logprobs: int | None = None,
72
- validator: Any | None = None,
72
+ validator: Callable[[Any], bool] | None = None,
73
73
  ) -> OutputModels.ToolOutput:
74
74
  """
75
75
  Extract salient keywords from text.
@@ -106,7 +106,7 @@ class TheTool:
106
106
  temperature: float | None = 0.0,
107
107
  logprobs: bool = False,
108
108
  top_logprobs: int | None = None,
109
- validator: Any | None = None,
109
+ validator: Callable[[Any], bool] | None = None,
110
110
  ) -> OutputModels.ToolOutput:
111
111
  """
112
112
  Perform Named Entity Recognition (NER) over the input text.
@@ -142,7 +142,7 @@ class TheTool:
142
142
  temperature: float | None = 0.0,
143
143
  logprobs: bool = False,
144
144
  top_logprobs: int | None = None,
145
- validator: Any | None = None,
145
+ validator: Callable[[Any], bool] | None = None,
146
146
  ) -> OutputModels.ToolOutput:
147
147
  """
148
148
  Detect if the input is phrased as a question.
@@ -179,7 +179,7 @@ class TheTool:
179
179
  temperature: float | None = 0.0,
180
180
  logprobs: bool = False,
181
181
  top_logprobs: int | None = None,
182
- validator: Any | None = None,
182
+ validator: Callable[[Any], bool] | None = None,
183
183
  ) -> OutputModels.ToolOutput:
184
184
  """
185
185
  Generate a single question from the given text.
@@ -217,7 +217,7 @@ class TheTool:
217
217
  logprobs: bool = False,
218
218
  top_logprobs: int | None = None,
219
219
  mode: Literal["default", "reason"] = "default",
220
- validator: Any | None = None,
220
+ validator: Callable[[Any], bool] | None = None,
221
221
  ) -> OutputModels.ToolOutput:
222
222
  """
223
223
  Merge multiple questions into a single unified question.
@@ -256,7 +256,7 @@ class TheTool:
256
256
  logprobs: bool = False,
257
257
  top_logprobs: int | None = None,
258
258
  mode: Literal["positive", "negative", "hard_negative"] = "positive",
259
- validator: Any | None = None,
259
+ validator: Callable[[Any], bool] | None = None,
260
260
  ) -> OutputModels.ToolOutput:
261
261
  """
262
262
  Rewrite a text with different modes.
@@ -294,7 +294,7 @@ class TheTool:
294
294
  temperature: float | None = 0.0,
295
295
  logprobs: bool = False,
296
296
  top_logprobs: int | None = None,
297
- validator: Any | None = None,
297
+ validator: Callable[[Any], bool] | None = None,
298
298
  ) -> OutputModels.ToolOutput:
299
299
  """
300
300
  Generate a list of questions about a subject.
@@ -332,7 +332,7 @@ class TheTool:
332
332
  temperature: float | None = 0.0,
333
333
  logprobs: bool = False,
334
334
  top_logprobs: int | None = None,
335
- validator: Any | None = None,
335
+ validator: Callable[[Any], bool] | None = None,
336
336
  ) -> OutputModels.ToolOutput:
337
337
  """
338
338
  Summarize the given subject text.
@@ -369,7 +369,7 @@ class TheTool:
369
369
  temperature: float | None = 0.0,
370
370
  logprobs: bool = False,
371
371
  top_logprobs: int | None = None,
372
- validator: Any | None = None,
372
+ validator: Callable[[Any], bool] | None = None,
373
373
  ) -> OutputModels.ToolOutput:
374
374
  """
375
375
  Translate text between languages.