hamtaa-texttools 0.1.43__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hamtaa-texttools might be problematic. Click here for more details.
- hamtaa_texttools-1.0.0.dist-info/METADATA +129 -0
- hamtaa_texttools-1.0.0.dist-info/RECORD +17 -0
- hamtaa_texttools-1.0.0.dist-info/licenses/LICENSE +21 -0
- {hamtaa_texttools-0.1.43.dist-info → hamtaa_texttools-1.0.0.dist-info}/top_level.txt +0 -0
- texttools/__init__.py +4 -21
- texttools/formatters/base_formatter.py +33 -0
- texttools/formatters/user_merge_formatter/user_merge_formatter.py +47 -0
- texttools/tools/__init__.py +2 -32
- texttools/tools/operator.py +236 -0
- texttools/tools/output_models.py +54 -0
- texttools/tools/prompt_loader.py +84 -0
- texttools/tools/the_tool.py +291 -0
- texttools/utils/__init__.py +4 -0
- texttools/{batch_manager → utils/batch_manager}/__init__.py +2 -0
- texttools/{batch_manager → utils/batch_manager}/batch_manager.py +11 -12
- texttools/{batch_manager → utils/batch_manager}/batch_runner.py +20 -15
- hamtaa_texttools-0.1.43.dist-info/METADATA +0 -60
- hamtaa_texttools-0.1.43.dist-info/RECORD +0 -60
- texttools/base/__init__.py +0 -3
- texttools/base/base_categorizer.py +0 -40
- texttools/base/base_keyword_extractor.py +0 -35
- texttools/base/base_ner_extractor.py +0 -61
- texttools/base/base_question_detector.py +0 -35
- texttools/base/base_question_generator.py +0 -99
- texttools/base/base_question_merger.py +0 -59
- texttools/base/base_question_rewriter.py +0 -61
- texttools/base/base_router.py +0 -33
- texttools/base/base_summarizer.py +0 -55
- texttools/base/base_task_performer.py +0 -53
- texttools/base/base_translator.py +0 -38
- texttools/formatter/__init__.py +0 -1
- texttools/formatter/base.py +0 -26
- texttools/formatter/gemma3_formatter.py +0 -51
- texttools/handlers/__init__.py +0 -6
- texttools/handlers/categorizer/__init__.py +0 -6
- texttools/handlers/categorizer/categorizer.py +0 -61
- texttools/handlers/handlers.py +0 -88
- texttools/tools/categorizer/__init__.py +0 -2
- texttools/tools/categorizer/encoder_model/__init__.py +0 -1
- texttools/tools/categorizer/encoder_model/encoder_vectorizer.py +0 -51
- texttools/tools/categorizer/llm/__init__.py +0 -2
- texttools/tools/categorizer/llm/gemma_categorizer.py +0 -169
- texttools/tools/categorizer/llm/openai_categorizer.py +0 -80
- texttools/tools/keyword_extractor/__init__.py +0 -1
- texttools/tools/keyword_extractor/gemma_extractor.py +0 -138
- texttools/tools/merger/__init__.py +0 -2
- texttools/tools/merger/gemma_question_merger.py +0 -214
- texttools/tools/ner/__init__.py +0 -1
- texttools/tools/ner/gemma_ner_extractor.py +0 -157
- texttools/tools/question_detector/__init__.py +0 -2
- texttools/tools/question_detector/gemma_detector.py +0 -130
- texttools/tools/question_detector/llm_detector.py +0 -112
- texttools/tools/question_generator/__init__.py +0 -1
- texttools/tools/question_generator/gemma_question_generator.py +0 -198
- texttools/tools/reranker/__init__.py +0 -3
- texttools/tools/reranker/reranker.py +0 -137
- texttools/tools/reranker/scorer.py +0 -216
- texttools/tools/reranker/sorter.py +0 -278
- texttools/tools/rewriter/__init__.py +0 -2
- texttools/tools/rewriter/gemma_question_rewriter.py +0 -213
- texttools/tools/router/__init__.py +0 -0
- texttools/tools/router/gemma_router.py +0 -169
- texttools/tools/subject_to_question/__init__.py +0 -1
- texttools/tools/subject_to_question/gemma_question_generator.py +0 -224
- texttools/tools/summarizer/__init__.py +0 -2
- texttools/tools/summarizer/gemma_summarizer.py +0 -140
- texttools/tools/summarizer/llm_summerizer.py +0 -108
- texttools/tools/translator/__init__.py +0 -1
- texttools/tools/translator/gemma_translator.py +0 -202
- {hamtaa_texttools-0.1.43.dist-info → hamtaa_texttools-1.0.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: hamtaa-texttools
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: TextTools is a high-level NLP toolkit built on top of modern LLMs.
|
|
5
|
+
Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
|
|
6
|
+
License: MIT License
|
|
7
|
+
|
|
8
|
+
Copyright (c) 2025 Hamtaa
|
|
9
|
+
|
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
+
in the Software without restriction, including without limitation the rights
|
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
+
furnished to do so, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
|
18
|
+
copies or substantial portions of the Software.
|
|
19
|
+
|
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
SOFTWARE.
|
|
27
|
+
Keywords: nlp,llm,text-processing,openai
|
|
28
|
+
Requires-Python: >=3.8
|
|
29
|
+
Description-Content-Type: text/markdown
|
|
30
|
+
License-File: LICENSE
|
|
31
|
+
Requires-Dist: openai==1.97.1
|
|
32
|
+
Requires-Dist: PyYAML>=6.0
|
|
33
|
+
Dynamic: license-file
|
|
34
|
+
|
|
35
|
+
# TextTools
|
|
36
|
+
|
|
37
|
+
## 📌 Overview
|
|
38
|
+
|
|
39
|
+
**TextTools** is a high-level **NLP toolkit** built on top of modern **LLMs**.
|
|
40
|
+
It provides ready-to-use utilities for **translation, question detection, keyword extraction, categorization, NER extractor, and more** — designed to help you integrate AI-powered text processing into your applications with minimal effort.
|
|
41
|
+
|
|
42
|
+
---
|
|
43
|
+
|
|
44
|
+
## ✨ Features
|
|
45
|
+
|
|
46
|
+
TextTools provides a rich collection of high-level NLP utilities built on top of LLMs.
|
|
47
|
+
Each tool is designed to work out-of-the-box with structured outputs (JSON / Pydantic).
|
|
48
|
+
|
|
49
|
+
- **Categorizer** → Zero-finetuning text categorization for fast, scalable classification.
|
|
50
|
+
- **Keyword Extractor** → Identify the most important keywords in a text.
|
|
51
|
+
- **Question Merger** → Merge the provided questions, preserving all the main points
|
|
52
|
+
- **NER (Named Entity Recognition) Extractor** → Extract people, places, organizations, and other entities.
|
|
53
|
+
- **Question Detector** → Determine whether a text is a question or not.
|
|
54
|
+
- **Question Generator From Text** → Generate high-quality, context-relevant questions from provided text.
|
|
55
|
+
- **Question Generator From Subject** → Generate high-quality, context-relevant questions from a subject.
|
|
56
|
+
- **Rewriter** → Rewrite text while preserving meaning or without it.
|
|
57
|
+
- **Summarizer** → Condense long passages into clear, structured summaries.
|
|
58
|
+
- **Translator** → Translate text across multiple languages, with support for custom rules.
|
|
59
|
+
|
|
60
|
+
---
|
|
61
|
+
|
|
62
|
+
## 🔍 `with_analysis` Mode
|
|
63
|
+
|
|
64
|
+
The `with_analysis=True` flag enhances the tool's output by providing a detailed reasoning chain behind its result. This is valuable for debugging, improving prompts, or understanding model behavior.
|
|
65
|
+
|
|
66
|
+
**Please be aware:** This feature works by making an additional LLM API call for each tool invocation, which will **effectively double your token usage** for that operation.
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## 🚀 Installation
|
|
71
|
+
|
|
72
|
+
Install the latest release via PyPI:
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
pip install -U hamta-texttools
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
---
|
|
79
|
+
|
|
80
|
+
## ⚡ Quick Start
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from openai import OpenAI
|
|
84
|
+
|
|
85
|
+
from texttools import TheTool
|
|
86
|
+
|
|
87
|
+
# Create your OpenAI client
|
|
88
|
+
client = OpenAI(base_url = "your_url", API_KEY = "your_api_key")
|
|
89
|
+
|
|
90
|
+
# Specify the model
|
|
91
|
+
model = "gpt-4o-mini"
|
|
92
|
+
|
|
93
|
+
# Create an instance of TheTool
|
|
94
|
+
# ⚠️ Note: Enabling `with_analysis=True` provides deeper insights but incurs additional LLM calls and token usage.
|
|
95
|
+
the_tool = TheTool(client = client, model = model, with_analysis = True)
|
|
96
|
+
|
|
97
|
+
# Example: Question Detection
|
|
98
|
+
print(the_tool.detect_question("Is this project open source?")["result"])
|
|
99
|
+
# Output: True
|
|
100
|
+
|
|
101
|
+
# Example: Translation
|
|
102
|
+
print(the_tool.translate("سلام، حالت چطوره؟")["result"])
|
|
103
|
+
# Output: "Hi! How are you?"
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
---
|
|
107
|
+
|
|
108
|
+
## 📚 Use Cases
|
|
109
|
+
|
|
110
|
+
Use **TextTools** when you need to:
|
|
111
|
+
|
|
112
|
+
- 🔍 **Classify** large datasets quickly without model training
|
|
113
|
+
- 🌍 **Translate** and process multilingual corpora with ease
|
|
114
|
+
- 🧩 **Integrate** LLMs into production pipelines (structured outputs)
|
|
115
|
+
- 📊 **Analyze** large text collections using embeddings and categorization
|
|
116
|
+
- ⚙️ **Automate** common text-processing tasks without reinventing the wheel
|
|
117
|
+
|
|
118
|
+
---
|
|
119
|
+
|
|
120
|
+
## 🤝 Contributing
|
|
121
|
+
|
|
122
|
+
Contributions are welcome!
|
|
123
|
+
Feel free to **open issues, suggest new features, or submit pull requests**.
|
|
124
|
+
|
|
125
|
+
---
|
|
126
|
+
|
|
127
|
+
## License
|
|
128
|
+
|
|
129
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
hamtaa_texttools-1.0.0.dist-info/licenses/LICENSE,sha256=TJch8KUnfKaKJFkaRqgtghB7rtprhaHyGirYKr90U4o,1062
|
|
2
|
+
texttools/__init__.py,sha256=DEPDeR8rKRye57x9kq00Adq9GOLFkmaWRq9sGBNQZ_c,241
|
|
3
|
+
texttools/formatters/base_formatter.py,sha256=sUrISJcczTLDPMiMETG-kyfZ64u0NubFpT3mjEQBskk,1147
|
|
4
|
+
texttools/formatters/user_merge_formatter/user_merge_formatter.py,sha256=R-e64Gwq6jARcpsnPYsgNIX7eqFDi0BtfiZOATvwxqo,1692
|
|
5
|
+
texttools/tools/__init__.py,sha256=Gzqlobmbgd5wOvy27JYPKB74MFtqDgFy6LwlRygN240,53
|
|
6
|
+
texttools/tools/operator.py,sha256=g3ZC5OSxG_oZQkkMbfzc8uUvw0FNvehNB5jPPY26KEg,7972
|
|
7
|
+
texttools/tools/output_models.py,sha256=EdMGvPEp0k8l9Ps48Arw7GMcXSmdRLPrvAhaYnVqGj8,1099
|
|
8
|
+
texttools/tools/prompt_loader.py,sha256=zrCgLNGkFV60u6b7CN4dNcml4cGLrC2ei0WcMfD28Bc,2817
|
|
9
|
+
texttools/tools/the_tool.py,sha256=lEMVpqhJvPqVzSWx8NlmYV7jqZ1ul3IqJ9nHJLjz0bw,9653
|
|
10
|
+
texttools/utils/__init__.py,sha256=XL_cVGbe8wKf8HQh_Q1JEZgGOlmpLijPoHNvzi1aYnc,167
|
|
11
|
+
texttools/utils/batch_manager/__init__.py,sha256=WcnujCd_5XotN6emVCfDaO_lMpyk8EwJYcFgNRks5q0,139
|
|
12
|
+
texttools/utils/batch_manager/batch_manager.py,sha256=N7dg1bE0QpGYjHtM0E9DWtXErZR_z0byls9d8RQdUbs,9104
|
|
13
|
+
texttools/utils/batch_manager/batch_runner.py,sha256=3dhzmHrvCKqQVTtxeBIiUhCyRwKiQp_WmWqGX2WTG-o,7602
|
|
14
|
+
hamtaa_texttools-1.0.0.dist-info/METADATA,sha256=kGjkvffaWgdZZletnEyj6eH-PCgHADXyMSW8YF1dclA,5114
|
|
15
|
+
hamtaa_texttools-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
16
|
+
hamtaa_texttools-1.0.0.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
|
|
17
|
+
hamtaa_texttools-1.0.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Hamtaa
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
File without changes
|
texttools/__init__.py
CHANGED
|
@@ -1,26 +1,9 @@
|
|
|
1
|
-
from
|
|
2
|
-
from
|
|
3
|
-
|
|
4
|
-
PrintResultHandler,
|
|
5
|
-
ResultHandler,
|
|
6
|
-
SaveToFileResultHandler,
|
|
7
|
-
)
|
|
8
|
-
from texttools.tools.categorizer.encoder_model.encoder_vectorizer import (
|
|
9
|
-
EmbeddingCategorizer,
|
|
10
|
-
)
|
|
11
|
-
from texttools.tools.categorizer.llm.openai_categorizer import LLMCategorizer
|
|
12
|
-
from texttools.tools.question_detector.llm_detector import LLMQuestionDetector
|
|
13
|
-
from texttools.tools.summarizer import LLMSummarizer
|
|
1
|
+
from .tools.the_tool import TheTool
|
|
2
|
+
from .utils.batch_manager.batch_manager import SimpleBatchManager
|
|
3
|
+
from .utils.batch_manager.batch_runner import BatchJobRunner
|
|
14
4
|
|
|
15
5
|
__all__ = [
|
|
16
|
-
"
|
|
17
|
-
"NoOpResultHandler",
|
|
18
|
-
"PrintResultHandler",
|
|
19
|
-
"ResultHandler",
|
|
20
|
-
"SaveToFileResultHandler",
|
|
21
|
-
"EmbeddingCategorizer",
|
|
22
|
-
"LLMCategorizer",
|
|
6
|
+
"TheTool",
|
|
23
7
|
"SimpleBatchManager",
|
|
24
8
|
"BatchJobRunner",
|
|
25
|
-
"LLMSummarizer",
|
|
26
9
|
]
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class BaseFormatter(ABC):
|
|
6
|
+
"""
|
|
7
|
+
Adapter to convert a conversation into a specific LLM API's input format.
|
|
8
|
+
|
|
9
|
+
Concrete implementations transform standardized messages (e.g., list[dict]) into the
|
|
10
|
+
exact payload required by a provider (e.g., OpenAI's message list, a single string, etc.).
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
@abstractmethod
|
|
14
|
+
def format(
|
|
15
|
+
self,
|
|
16
|
+
messages: Any,
|
|
17
|
+
) -> Any:
|
|
18
|
+
"""
|
|
19
|
+
Transform the input messages into a provider-specific payload.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
messages: The input conversation. While often a list of dicts with
|
|
23
|
+
'role' and 'content' keys, the exact type and structure may vary
|
|
24
|
+
by implementation.
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
A payload in the format expected by the target LLM API. This could be:
|
|
28
|
+
- A list of role-content dictionaries (e.g., for OpenAI)
|
|
29
|
+
- A single formatted string (e.g., for completion-style APIs)
|
|
30
|
+
- A complex dictionary with additional parameters
|
|
31
|
+
- Any other provider-specific data structure
|
|
32
|
+
"""
|
|
33
|
+
pass
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from texttools.formatters.base_formatter import BaseFormatter
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class UserMergeFormatter(BaseFormatter):
|
|
5
|
+
"""
|
|
6
|
+
Merges consecutive user messages into a single message, separated by newlines.
|
|
7
|
+
|
|
8
|
+
This is useful for condensing a multi-turn user input into a single coherent
|
|
9
|
+
message for the LLM. Assistant and system messages are left unchanged and
|
|
10
|
+
act as separators between user message groups.
|
|
11
|
+
|
|
12
|
+
Raises:
|
|
13
|
+
ValueError: If the input messages have invalid structure or roles.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def _validate_input(self, messages: list[dict[str, str]]):
|
|
17
|
+
valid_keys = {"role", "content"}
|
|
18
|
+
valid_roles = {"user", "assistant"}
|
|
19
|
+
|
|
20
|
+
for message in messages:
|
|
21
|
+
# Validate keys
|
|
22
|
+
if set(message.keys()) != valid_keys:
|
|
23
|
+
raise ValueError(
|
|
24
|
+
f"Message dict keys must be exactly {valid_keys}, got {set(message.keys())}"
|
|
25
|
+
)
|
|
26
|
+
# Validate roles
|
|
27
|
+
role = message["role"]
|
|
28
|
+
if role != "system" and role not in valid_roles:
|
|
29
|
+
raise ValueError(f"Unexpected role: {role}")
|
|
30
|
+
|
|
31
|
+
def format(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:
|
|
32
|
+
self._validate_input(messages)
|
|
33
|
+
|
|
34
|
+
merged: list[dict[str, str]] = []
|
|
35
|
+
|
|
36
|
+
for message in messages:
|
|
37
|
+
role, content = message["role"], message["content"].strip()
|
|
38
|
+
|
|
39
|
+
# Merge with previous user turn
|
|
40
|
+
if merged and role == "user" and merged[-1]["role"] == "user":
|
|
41
|
+
merged[-1]["content"] += "\n" + content
|
|
42
|
+
|
|
43
|
+
# Otherwise, start a new turn
|
|
44
|
+
else:
|
|
45
|
+
merged.append({"role": role, "content": content})
|
|
46
|
+
|
|
47
|
+
return merged
|
texttools/tools/__init__.py
CHANGED
|
@@ -1,33 +1,3 @@
|
|
|
1
|
-
from .
|
|
2
|
-
from .keyword_extractor import GemmaKeywordExtractor
|
|
3
|
-
from .ner import GemmaNERExtractor
|
|
4
|
-
from .question_detector import GemmaQuestionDetector, LLMQuestionDetector
|
|
5
|
-
from .question_generator import GemmaQuestionGenerator
|
|
6
|
-
from .reranker import GemmaReranker, GemmaScorer, GemmaSorter
|
|
7
|
-
from .rewriter import GemmaQuestionRewriter, RewriteMode
|
|
8
|
-
from .merger import GemmaQuestionMerger, MergingMode
|
|
9
|
-
from .subject_to_question import GemmaQuestionGeneratorFromSubject
|
|
10
|
-
from .summarizer import GemmaSummarizer, LLMSummarizer
|
|
11
|
-
from .translator import GemmaTranslator
|
|
1
|
+
from .the_tool import TheTool
|
|
12
2
|
|
|
13
|
-
__all__ = [
|
|
14
|
-
"EmbeddingCategorizer",
|
|
15
|
-
"GemmaCategorizer",
|
|
16
|
-
"LLMCategorizer",
|
|
17
|
-
"GemmaTranslator",
|
|
18
|
-
"GemmaSummarizer",
|
|
19
|
-
"LLMSummarizer",
|
|
20
|
-
"GemmaNERExtractor",
|
|
21
|
-
"GemmaQuestionDetector",
|
|
22
|
-
"LLMQuestionDetector",
|
|
23
|
-
"GemmaQuestionGenerator",
|
|
24
|
-
"GemmaScorer",
|
|
25
|
-
"GemmaSorter",
|
|
26
|
-
"GemmaReranker",
|
|
27
|
-
"GemmaQuestionRewriter",
|
|
28
|
-
"RewriteMode",
|
|
29
|
-
"GemmaKeywordExtractor",
|
|
30
|
-
"GemmaQuestionGeneratorFromSubject",
|
|
31
|
-
"GemmaQuestionMerger",
|
|
32
|
-
"MergingMode",
|
|
33
|
-
]
|
|
3
|
+
__all__ = ["TheTool"]
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, TypeVar, Type, Literal
|
|
4
|
+
import json
|
|
5
|
+
|
|
6
|
+
from openai import OpenAI
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
|
|
9
|
+
from texttools.formatters.user_merge_formatter.user_merge_formatter import (
|
|
10
|
+
UserMergeFormatter,
|
|
11
|
+
)
|
|
12
|
+
from texttools.tools.prompt_loader import PromptLoader
|
|
13
|
+
|
|
14
|
+
# Base Model type for output models
|
|
15
|
+
T = TypeVar("T", bound=BaseModel)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Operator:
|
|
19
|
+
"""
|
|
20
|
+
Core engine for running text-processing operations with an LLM.
|
|
21
|
+
|
|
22
|
+
It wires together:
|
|
23
|
+
- `PromptLoader` → loads YAML prompt templates.
|
|
24
|
+
- `UserMergeFormatter` → applies formatting to messages (e.g., merging).
|
|
25
|
+
- OpenAI client → executes completions/parsed completions.
|
|
26
|
+
|
|
27
|
+
Workflow inside `run()`:
|
|
28
|
+
1. Load prompt templates (`main_template` [+ `analyze_template` if enabled]).
|
|
29
|
+
2. Optionally generate an "analysis" step via `_analyze()`.
|
|
30
|
+
3. Build messages for the LLM.
|
|
31
|
+
4. Call `.beta.chat.completions.parse()` to parse the result into the
|
|
32
|
+
configured `OUTPUT_MODEL` (a Pydantic schema).
|
|
33
|
+
5. Return results as a dict (always `{"result": ...}`, plus `analysis`
|
|
34
|
+
if analysis was enabled).
|
|
35
|
+
|
|
36
|
+
Attributes configured dynamically by `TheTool`:
|
|
37
|
+
- PROMPT_FILE: str → YAML filename
|
|
38
|
+
- OUTPUT_MODEL: Pydantic model class
|
|
39
|
+
- WITH_ANALYSIS: bool → whether to run an analysis phase first
|
|
40
|
+
- USE_MODES: bool → whether to select prompts by mode
|
|
41
|
+
- MODE: str → which mode to use if modes are enabled
|
|
42
|
+
- RESP_FORMAT: str → "vllm" or "parse"
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
PROMPT_FILE: str
|
|
46
|
+
OUTPUT_MODEL: Type[T]
|
|
47
|
+
WITH_ANALYSIS: bool = False
|
|
48
|
+
USE_MODES: bool
|
|
49
|
+
MODE: str = ""
|
|
50
|
+
RESP_FORMAT: Literal["vllm", "parse"] = "vllm"
|
|
51
|
+
|
|
52
|
+
def __init__(
|
|
53
|
+
self,
|
|
54
|
+
client: OpenAI,
|
|
55
|
+
*,
|
|
56
|
+
model: str,
|
|
57
|
+
temperature: float = 0.0,
|
|
58
|
+
**client_kwargs: Any,
|
|
59
|
+
):
|
|
60
|
+
self.client: OpenAI = client
|
|
61
|
+
self.model = model
|
|
62
|
+
self.prompt_loader = PromptLoader()
|
|
63
|
+
self.formatter = UserMergeFormatter()
|
|
64
|
+
self.temperature = temperature
|
|
65
|
+
self.client_kwargs = client_kwargs
|
|
66
|
+
|
|
67
|
+
def _build_user_message(self, prompt: str) -> dict[str, str]:
|
|
68
|
+
return {"role": "user", "content": prompt}
|
|
69
|
+
|
|
70
|
+
def _apply_formatter(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:
|
|
71
|
+
return self.formatter.format(messages)
|
|
72
|
+
|
|
73
|
+
def _analysis_completion(self, analyze_message: list[dict[str, str]]) -> str:
|
|
74
|
+
try:
|
|
75
|
+
completion = self.client.chat.completions.create(
|
|
76
|
+
model=self.model,
|
|
77
|
+
messages=analyze_message,
|
|
78
|
+
temperature=self.temperature,
|
|
79
|
+
**self.client_kwargs,
|
|
80
|
+
)
|
|
81
|
+
analysis = completion.choices[0].message.content.strip()
|
|
82
|
+
return analysis
|
|
83
|
+
|
|
84
|
+
except Exception as e:
|
|
85
|
+
print(f"[ERROR] Analysis failed: {e}")
|
|
86
|
+
raise
|
|
87
|
+
|
|
88
|
+
def _analyze(self) -> str:
|
|
89
|
+
analyze_prompt = self.prompt_configs["analyze_template"]
|
|
90
|
+
analyze_message = [self._build_user_message(analyze_prompt)]
|
|
91
|
+
analysis = self._analysis_completion(analyze_message)
|
|
92
|
+
|
|
93
|
+
return analysis
|
|
94
|
+
|
|
95
|
+
def _build_main_message(self) -> list[dict[str, str]]:
|
|
96
|
+
main_prompt = self.prompt_configs["main_template"]
|
|
97
|
+
main_message = self._build_user_message(main_prompt)
|
|
98
|
+
|
|
99
|
+
return main_message
|
|
100
|
+
|
|
101
|
+
def _parse_completion(self, message: list[dict[str, str]]) -> T:
|
|
102
|
+
try:
|
|
103
|
+
completion = self.client.beta.chat.completions.parse(
|
|
104
|
+
model=self.model,
|
|
105
|
+
messages=message,
|
|
106
|
+
response_format=self.OUTPUT_MODEL,
|
|
107
|
+
temperature=self.temperature,
|
|
108
|
+
**self.client_kwargs,
|
|
109
|
+
)
|
|
110
|
+
parsed = completion.choices[0].message.parsed
|
|
111
|
+
return parsed
|
|
112
|
+
|
|
113
|
+
except Exception as e:
|
|
114
|
+
print(f"[ERROR] Failed to parse completion: {e}")
|
|
115
|
+
raise
|
|
116
|
+
|
|
117
|
+
def _clean_json_response(self, response: str) -> str:
|
|
118
|
+
"""
|
|
119
|
+
Clean JSON response by removing code block markers and whitespace.
|
|
120
|
+
Handles cases like:
|
|
121
|
+
- ```json{"result": "value"}```
|
|
122
|
+
- ```{"result": "value"}```
|
|
123
|
+
"""
|
|
124
|
+
# Remove code block markers
|
|
125
|
+
cleaned = response.strip()
|
|
126
|
+
|
|
127
|
+
# Remove ```json and ``` markers
|
|
128
|
+
if cleaned.startswith("```json"):
|
|
129
|
+
cleaned = cleaned[7:] # Remove ```json
|
|
130
|
+
elif cleaned.startswith("```"):
|
|
131
|
+
cleaned = cleaned[3:] # Remove ```
|
|
132
|
+
|
|
133
|
+
# Remove trailing ``` or '''
|
|
134
|
+
if cleaned.endswith("```"):
|
|
135
|
+
cleaned = cleaned[:-3]
|
|
136
|
+
|
|
137
|
+
return cleaned.strip()
|
|
138
|
+
|
|
139
|
+
def _convert_to_output_model(self, response_string: str) -> T:
|
|
140
|
+
"""
|
|
141
|
+
Convert a JSON response string to output model.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
response_string: The JSON string (may contain code block markers)
|
|
145
|
+
output_model: Your Pydantic output model class (e.g., StrOutput, ListStrOutput)
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
Instance of your output model
|
|
149
|
+
"""
|
|
150
|
+
try:
|
|
151
|
+
# Clean the response string
|
|
152
|
+
cleaned_json = self._clean_json_response(response_string)
|
|
153
|
+
|
|
154
|
+
# Convert string to Python dictionary
|
|
155
|
+
response_dict = json.loads(cleaned_json)
|
|
156
|
+
|
|
157
|
+
# Convert dictionary to output model
|
|
158
|
+
return self.OUTPUT_MODEL(**response_dict)
|
|
159
|
+
|
|
160
|
+
except json.JSONDecodeError as e:
|
|
161
|
+
raise ValueError(
|
|
162
|
+
f"Failed to parse JSON response: {e}\nResponse: {response_string}"
|
|
163
|
+
)
|
|
164
|
+
except Exception as e:
|
|
165
|
+
raise ValueError(f"Failed to convert to output model: {e}")
|
|
166
|
+
|
|
167
|
+
def _vllm_completion(self, message: list[dict[str, str]]) -> T:
|
|
168
|
+
try:
|
|
169
|
+
json_schema = self.OUTPUT_MODEL.model_json_schema()
|
|
170
|
+
completion = self.client.chat.completions.create(
|
|
171
|
+
model=self.model,
|
|
172
|
+
messages=message,
|
|
173
|
+
extra_body={"guided_json": json_schema},
|
|
174
|
+
temperature=self.temperature,
|
|
175
|
+
**self.client_kwargs,
|
|
176
|
+
)
|
|
177
|
+
response = completion.choices[0].message.content
|
|
178
|
+
|
|
179
|
+
# Convert the string response to output model
|
|
180
|
+
parsed_response = self._convert_to_output_model(response)
|
|
181
|
+
|
|
182
|
+
return parsed_response
|
|
183
|
+
|
|
184
|
+
except Exception as e:
|
|
185
|
+
print(f"[ERROR] Failed to get vLLM structured output: {e}")
|
|
186
|
+
raise
|
|
187
|
+
|
|
188
|
+
def run(self, input_text: str, **extra_kwargs) -> dict[str, Any]:
|
|
189
|
+
"""
|
|
190
|
+
Execute the LLM pipeline with the given input text.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
input_text: The text to process (will be stripped of whitespace)
|
|
194
|
+
**extra_kwargs: Additional variables to inject into prompt templates
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
Dictionary containing the parsed result and optional analysis
|
|
198
|
+
"""
|
|
199
|
+
try:
|
|
200
|
+
cleaned_text = input_text.strip()
|
|
201
|
+
|
|
202
|
+
self.prompt_configs = self.prompt_loader.load_prompts(
|
|
203
|
+
self.PROMPT_FILE,
|
|
204
|
+
self.USE_MODES,
|
|
205
|
+
self.MODE,
|
|
206
|
+
cleaned_text,
|
|
207
|
+
**extra_kwargs,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
messages: list[dict[str, str]] = []
|
|
211
|
+
|
|
212
|
+
if self.WITH_ANALYSIS:
|
|
213
|
+
analysis = self._analyze()
|
|
214
|
+
messages.append(
|
|
215
|
+
self._build_user_message(f"Based on this analysis: {analysis}")
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
messages.append(self._build_main_message())
|
|
219
|
+
messages = self.formatter.format(messages)
|
|
220
|
+
|
|
221
|
+
if self.RESP_FORMAT == "vllm":
|
|
222
|
+
parsed = self._vllm_completion(messages)
|
|
223
|
+
elif self.RESP_FORMAT == "parse":
|
|
224
|
+
parsed = self._parse_completion(messages)
|
|
225
|
+
|
|
226
|
+
results = {"result": parsed.result}
|
|
227
|
+
|
|
228
|
+
if self.WITH_ANALYSIS:
|
|
229
|
+
results["analysis"] = analysis
|
|
230
|
+
|
|
231
|
+
return results
|
|
232
|
+
|
|
233
|
+
except Exception as e:
|
|
234
|
+
# Print error clearly and exit
|
|
235
|
+
print(f"[ERROR] Operation failed: {e}")
|
|
236
|
+
exit(1)
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class StrOutput(BaseModel):
|
|
7
|
+
"""
|
|
8
|
+
Output model for a single string result.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
result: str
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ListStrOutput(BaseModel):
|
|
15
|
+
"""
|
|
16
|
+
Output model for a list of strings result.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
result: list[str]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ListDictStrStrOutput(BaseModel):
|
|
23
|
+
"""
|
|
24
|
+
Output model for a list of dictionaries with string key-value pairs.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
result: list[dict[str, str]]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class ReasonListStrOutput(BaseModel):
|
|
31
|
+
"""
|
|
32
|
+
Output model containing a reasoning string followed by a list of strings.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
reason: str
|
|
36
|
+
result: list[str]
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class CategorizerOutput(BaseModel):
|
|
40
|
+
"""
|
|
41
|
+
Output model for categorization with reasoning and a predefined category result.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
reason: str
|
|
45
|
+
result: Literal[
|
|
46
|
+
"باورهای دینی",
|
|
47
|
+
"اخلاق اسلامی",
|
|
48
|
+
"احکام و فقه",
|
|
49
|
+
"تاریخ اسلام و شخصیت ها",
|
|
50
|
+
"منابع دینی",
|
|
51
|
+
"دین و جامعه/سیاست",
|
|
52
|
+
"عرفان و معنویت",
|
|
53
|
+
"هیچکدام",
|
|
54
|
+
]
|