hamtaa-texttools 1.0.5__py3-none-any.whl → 1.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. hamtaa_texttools-1.1.16.dist-info/METADATA +255 -0
  2. hamtaa_texttools-1.1.16.dist-info/RECORD +31 -0
  3. texttools/__init__.py +6 -8
  4. texttools/batch/batch_config.py +26 -0
  5. texttools/batch/batch_runner.py +144 -139
  6. texttools/batch/{batch_manager.py → internals/batch_manager.py} +42 -54
  7. texttools/batch/internals/utils.py +16 -0
  8. texttools/prompts/README.md +8 -4
  9. texttools/prompts/categorize.yaml +77 -0
  10. texttools/prompts/detect_entity.yaml +22 -0
  11. texttools/prompts/extract_keywords.yaml +68 -0
  12. texttools/prompts/{question_merger.yaml → merge_questions.yaml} +5 -5
  13. texttools/tools/async_tools.py +804 -0
  14. texttools/tools/internals/async_operator.py +139 -236
  15. texttools/tools/internals/formatters.py +24 -0
  16. texttools/tools/internals/models.py +183 -0
  17. texttools/tools/internals/operator_utils.py +54 -0
  18. texttools/tools/internals/prompt_loader.py +23 -43
  19. texttools/tools/internals/sync_operator.py +201 -0
  20. texttools/tools/sync_tools.py +804 -0
  21. hamtaa_texttools-1.0.5.dist-info/METADATA +0 -192
  22. hamtaa_texttools-1.0.5.dist-info/RECORD +0 -30
  23. texttools/batch/__init__.py +0 -4
  24. texttools/formatters/base_formatter.py +0 -33
  25. texttools/formatters/user_merge_formatter.py +0 -30
  26. texttools/prompts/categorizer.yaml +0 -28
  27. texttools/prompts/keyword_extractor.yaml +0 -18
  28. texttools/tools/__init__.py +0 -4
  29. texttools/tools/async_the_tool.py +0 -277
  30. texttools/tools/internals/operator.py +0 -295
  31. texttools/tools/internals/output_models.py +0 -52
  32. texttools/tools/the_tool.py +0 -501
  33. {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.1.16.dist-info}/WHEEL +0 -0
  34. {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.1.16.dist-info}/licenses/LICENSE +0 -0
  35. {hamtaa_texttools-1.0.5.dist-info → hamtaa_texttools-1.1.16.dist-info}/top_level.txt +0 -0
  36. /texttools/prompts/{ner_extractor.yaml → extract_entities.yaml} +0 -0
  37. /texttools/prompts/{question_detector.yaml → is_question.yaml} +0 -0
  38. /texttools/prompts/{rewriter.yaml → rewrite.yaml} +0 -0
  39. /texttools/prompts/{custom_tool.yaml → run_custom.yaml} +0 -0
  40. /texttools/prompts/{subject_question_generator.yaml → subject_to_question.yaml} +0 -0
  41. /texttools/prompts/{summarizer.yaml → summarize.yaml} +0 -0
  42. /texttools/prompts/{question_generator.yaml → text_to_question.yaml} +0 -0
  43. /texttools/prompts/{translator.yaml → translate.yaml} +0 -0
@@ -1,192 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: hamtaa-texttools
3
- Version: 1.0.5
4
- Summary: TextTools is a high-level NLP toolkit built on top of modern LLMs.
5
- Author-email: Tohidi <the.mohammad.tohidi@gmail.com>, Montazer <montazerh82@gmail.com>, Givechi <mohamad.m.givechi@gmail.com>, MoosaviNejad <erfanmoosavi84@gmail.com>
6
- License: MIT License
7
-
8
- Copyright (c) 2025 Hamtaa
9
-
10
- Permission is hereby granted, free of charge, to any person obtaining a copy
11
- of this software and associated documentation files (the "Software"), to deal
12
- in the Software without restriction, including without limitation the rights
13
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
- copies of the Software, and to permit persons to whom the Software is
15
- furnished to do so, subject to the following conditions:
16
-
17
- The above copyright notice and this permission notice shall be included in all
18
- copies or substantial portions of the Software.
19
-
20
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
- SOFTWARE.
27
- Keywords: nlp,llm,text-processing,openai
28
- Requires-Python: >=3.8
29
- Description-Content-Type: text/markdown
30
- License-File: LICENSE
31
- Requires-Dist: openai==1.97.1
32
- Requires-Dist: PyYAML>=6.0
33
- Dynamic: license-file
34
-
35
- # TextTools
36
-
37
- ## 📌 Overview
38
-
39
- **TextTools** is a high-level **NLP toolkit** built on top of modern **LLMs**.
40
-
41
- It provides both **sync (`TheTool`)** and **async (`AsyncTheTool`)** APIs for maximum flexibility.
42
-
43
- It provides ready-to-use utilities for **translation, question detection, keyword extraction, categorization, NER extractor, and more** — designed to help you integrate AI-powered text processing into your applications with minimal effort.
44
-
45
- **Thread Safety:** All methods in AsyncTheTool are thread-safe, allowing concurrent usage across multiple threads without conflicts.
46
-
47
- ---
48
-
49
- ## ✨ Features
50
-
51
- TextTools provides a rich collection of high-level NLP utilities built on top of LLMs.
52
- Each tool is designed to work out-of-the-box with structured outputs (JSON / Pydantic).
53
-
54
- - **Categorizer** → Zero-finetuning text categorization for fast, scalable classification.
55
- - **Keyword Extractor** → Identify the most important keywords in a text.
56
- - **Question Merger** → Merge the provided questions, preserving all the main points
57
- - **NER (Named Entity Recognition) Extractor** → Extract people, places, organizations, and other entities.
58
- - **Question Detector** → Determine whether a text is a question or not.
59
- - **Question Generator From Text** → Generate high-quality, context-relevant questions from provided text.
60
- - **Question Generator From Subject** → Generate high-quality, context-relevant questions from a subject.
61
- - **Rewriter** → Rewrite text while preserving meaning or without it.
62
- - **Summarizer** → Condense long passages into clear, structured summaries.
63
- - **Translator** → Translate text across multiple languages, with support for custom rules.
64
- - **Custom Tool** → Allows users to define a custom tool with arbitrary BaseModel.
65
-
66
- ---
67
-
68
- ## ⚙️ `with_analysis`, `logprobs`, `output_lang`, and `user_prompt` parameters
69
-
70
- TextTools provides several optional flags to customize LLM behavior:
71
-
72
- - **`with_analysis=True`** → Adds a reasoning step before generating the final output. Useful for debugging, improving prompts, or understanding model behavior.
73
- Note: This doubles token usage per call because it triggers an additional LLM request.
74
-
75
- - **`logprobs=True`** → Returns token-level probabilities for the generated output. You can also specify `top_logprobs=<N>` to get the top N alternative tokens and their probabilities.
76
-
77
- - **`output_lang="en"`** → Forces the model to respond in a specific language. The model will ignore other instructions about language and respond strictly in the requested language.
78
-
79
- - **`user_prompt="..."`** → Allows you to inject a custom instruction or prompt into the model alongside the main template. This gives you fine-grained control over how the model interprets or modifies the input text.
80
-
81
- All these flags can be used individually or together to tailor the behavior of any tool in **TextTools**.
82
-
83
- ---
84
-
85
- ## 🚀 Installation
86
-
87
- Install the latest release via PyPI:
88
-
89
- ```bash
90
- pip install -U hamta-texttools
91
- ```
92
-
93
- ---
94
-
95
- ## Sync vs Async
96
- | Tool | Style | Use case |
97
- |--------------|---------|---------------------------------------------|
98
- | `TheTool` | Sync | Simple scripts, sequential workflows |
99
- | `AsyncTheTool` | Async | High-throughput apps, APIs, concurrent tasks |
100
-
101
- ---
102
-
103
- ## ⚡ Quick Start (Sync)
104
-
105
- ```python
106
- from openai import OpenAI
107
- from pydantic import BaseModel
108
- from texttools import TheTool
109
-
110
- # Create your OpenAI client
111
- client = OpenAI(base_url = "your_url", API_KEY = "your_api_key")
112
-
113
- # Specify the model
114
- model = "gpt-4o-mini"
115
-
116
- # Create an instance of TheTool
117
- # Note: You can give parameters to TheTool so that you don't need to give them to each tool
118
- the_tool = TheTool(client=client, model=model, with_analysis=True, output_lang="English")
119
-
120
- # Example: Question Detection
121
- detection = the_tool.detect_question("Is this project open source?", logpobs=True, top_logprobs=2)
122
- print(detection["result"])
123
- print(detection["logprobs"])
124
- # Output: True
125
-
126
- # Example: Translation
127
- # Note: You can overwrite with_analysis if defined at TheTool
128
- print(the_tool.translate("سلام، حالت چطوره؟", target_language="English", with_analysis=False)["result"])
129
- # Output: "Hi! How are you?"
130
-
131
- # Example: Custom Tool
132
- # Note: Output model should only contain result key
133
- # Everything else will be ignored
134
- class Custom(BaseModel):
135
- result: list[list[dict[str, int]]]
136
-
137
- custom_prompt = "Something"
138
- custom_result = the_tool.custom_tool(custom_prompt, Custom)
139
- print(custom_result)
140
- ```
141
-
142
- ---
143
-
144
- ## ⚡ Quick Start (Async)
145
-
146
- ```python
147
- import asyncio
148
- from openai import AsyncOpenAI
149
- from texttools import AsyncTheTool
150
-
151
- async def main():
152
- # Create your async OpenAI client
153
- async_client = AsyncOpenAI(base_url="your_url", api_key="your_api_key")
154
-
155
- # Specify the model
156
- model = "gpt-4o-mini"
157
-
158
- # Create an instance of AsyncTheTool
159
- the_tool = AsyncTheTool(client=async_client, model=model)
160
-
161
- # Example: Async Translation
162
- result = await the_tool.translate("سلام، حالت چطوره؟", target_language="English")
163
- print(result["result"])
164
- # Output: "Hi! How are you?"
165
-
166
- asyncio.run(main())
167
- ```
168
-
169
- ---
170
-
171
- ## 📚 Use Cases
172
-
173
- Use **TextTools** when you need to:
174
-
175
- - 🔍 **Classify** large datasets quickly without model training
176
- - 🌍 **Translate** and process multilingual corpora with ease
177
- - 🧩 **Integrate** LLMs into production pipelines (structured outputs)
178
- - 📊 **Analyze** large text collections using embeddings and categorization
179
- - 👍 **Automate** common text-processing tasks without reinventing the wheel
180
-
181
- ---
182
-
183
- ## 🤝 Contributing
184
-
185
- Contributions are welcome!
186
- Feel free to **open issues, suggest new features, or submit pull requests**.
187
-
188
- ---
189
-
190
- ## License
191
-
192
- This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
@@ -1,30 +0,0 @@
1
- hamtaa_texttools-1.0.5.dist-info/licenses/LICENSE,sha256=Hb2YOBKy2MJQLnyLrX37B4ZVuac8eaIcE71SvVIMOLg,1082
2
- texttools/__init__.py,sha256=v3tQCH_Cjj47fCpuhK6sKSVAqEjNkc-cZbY4OJa4IZw,202
3
- texttools/batch/__init__.py,sha256=q50JsQsmQGp_8RW0KNasYeYWVV0R4FUNZ-ujXwEJemY,143
4
- texttools/batch/batch_manager.py,sha256=aYnHy82b4FJmhi2TWjXtxg67dN6PZOu2gQSusx373vE,9328
5
- texttools/batch/batch_runner.py,sha256=OHxeFT0YhEuDwZVAE07PUEC_7cuICWnnim3SuyZsx4U,7814
6
- texttools/formatters/base_formatter.py,sha256=xxnbujAr01NqZ49Y61LVFpIbj6kTEmV6JiUH_qCsIFk,1180
7
- texttools/formatters/user_merge_formatter.py,sha256=U_d7npTkC9QDgtEFjAjIuDlPfxVj3S1RyziidKqjciw,1086
8
- texttools/prompts/README.md,sha256=z8XW7ovh0yT1WGhkSPzYKixEtiGuDzeof6MALXaxUUY,1365
9
- texttools/prompts/categorizer.yaml,sha256=GMqIIzQFhgnlpkgU1qi3FAD3mD4A2jiWD5TilQ2XnnE,1204
10
- texttools/prompts/custom_tool.yaml,sha256=38OkCoVITbuuS9c08UZSP1jZW4WjSmRIi8fR0RAiPu4,108
11
- texttools/prompts/keyword_extractor.yaml,sha256=R05Ac_qnP4sUvhOGCW3XpjlJFdz1KgU4CgVCOXflY8M,775
12
- texttools/prompts/ner_extractor.yaml,sha256=KiKjeDpHaeh3JVtZ6q1pa3k4DYucUIU9WnEcRTCA-SE,651
13
- texttools/prompts/question_detector.yaml,sha256=d0-vKRbXWkxvO64ikvxRjEmpAXGpCYIPGhgexvPPjws,471
14
- texttools/prompts/question_generator.yaml,sha256=UheKYpDn6iyKI8NxunHZtFpNyfCLZZe5cvkuXpurUJY,783
15
- texttools/prompts/question_merger.yaml,sha256=b72QAk9Gs8k1xb2lSDXx44u-3Ku5vIuWL_6han4UaO0,1797
16
- texttools/prompts/rewriter.yaml,sha256=LO7He_IA3MZKz8a-LxH9DHJpOjpYwaYN1pbjp1Y0tFo,5392
17
- texttools/prompts/subject_question_generator.yaml,sha256=C7x7rNNm6U_ZG9HOn6zuzYOtvJUZ2skuWbL1-aYdd3E,1147
18
- texttools/prompts/summarizer.yaml,sha256=o6rxGPfWtZd61Duvm8NVvCJqfq73b-wAuMSKR6UYUqY,459
19
- texttools/prompts/translator.yaml,sha256=mGT2uBCei6uucWqVbs4silk-UV060v3G0jnt0P6sr50,634
20
- texttools/tools/__init__.py,sha256=hG1I28Q7BJ1Dbs95x6QMKXdsAlC5Eh_tqC-EbAibwiU,114
21
- texttools/tools/async_the_tool.py,sha256=m5b8t1eAGDDN44nOf-h9l-8rLxg7a859nZ9QePVlRzI,8827
22
- texttools/tools/the_tool.py,sha256=oRDsg8ZqMcUiWX6WXdtw9C1XkgWdi93GcBXCqTcUCMo,19406
23
- texttools/tools/internals/async_operator.py,sha256=jvpeVffRYnm6inUB7jncbmiOAztayMPzsIi7UH1IrHs,9910
24
- texttools/tools/internals/operator.py,sha256=4lrV8UisJoSUKGI23iL8IjK_MWg-ev43ywAXxCob3nU,10033
25
- texttools/tools/internals/output_models.py,sha256=Rf2x-UuGlmQHrvYIqnD11YuzMH_mPuir62HoMJQa2uk,1528
26
- texttools/tools/internals/prompt_loader.py,sha256=8RFhZE3HOcnQdtndSP2qULD6YQbZ34EVsTbXR-Zr-NM,2510
27
- hamtaa_texttools-1.0.5.dist-info/METADATA,sha256=l2qrSuF_s2JHwcybcGdq4c2hhcE-D6eqNzPvJZPyHaE,7780
28
- hamtaa_texttools-1.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
29
- hamtaa_texttools-1.0.5.dist-info/top_level.txt,sha256=5Mh0jIxxZ5rOXHGJ6Mp-JPKviywwN0MYuH0xk5bEWqE,10
30
- hamtaa_texttools-1.0.5.dist-info/RECORD,,
@@ -1,4 +0,0 @@
1
- from .batch_manager import SimpleBatchManager
2
- from .batch_runner import BatchJobRunner
3
-
4
- __all__ = ["SimpleBatchManager", "BatchJobRunner"]
@@ -1,33 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from typing import Any
3
-
4
-
5
- class BaseFormatter(ABC):
6
- """
7
- Adapter to convert a conversation into a specific LLM API's input format.
8
-
9
- Concrete implementations transform standardized messages (e.g., list[dict]) into the
10
- exact payload required by a provider (e.g., OpenAI's message list, a single string, etc.).
11
- """
12
-
13
- @abstractmethod
14
- def format(
15
- self,
16
- messages: Any,
17
- ) -> Any:
18
- """
19
- Transform the input messages into a provider-specific payload.
20
-
21
- Args:
22
- messages: The input conversation. While often a list of dicts with
23
- 'role' and 'content' keys, the exact type and structure may vary
24
- by implementation.
25
-
26
- Returns:
27
- A payload in the format expected by the target LLM API. This could be:
28
- - A list of role-content dictionaries (e.g., for OpenAI)
29
- - A single formatted string (e.g., for completion-style APIs)
30
- - A complex dictionary with additional parameters
31
- - Any other provider-specific data structure
32
- """
33
- pass
@@ -1,30 +0,0 @@
1
- from texttools.formatters.base_formatter import BaseFormatter
2
-
3
-
4
- class UserMergeFormatter(BaseFormatter):
5
- """
6
- Merges consecutive user messages into a single message, separated by newlines.
7
-
8
- This is useful for condensing a multi-turn user input into a single coherent
9
- message for the LLM. Assistant and system messages are left unchanged and
10
- act as separators between user message groups.
11
-
12
- Raises:
13
- ValueError: If the input messages have invalid structure or roles.
14
- """
15
-
16
- def format(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:
17
- merged: list[dict[str, str]] = []
18
-
19
- for message in messages:
20
- role, content = message["role"], message["content"].strip()
21
-
22
- # Merge with previous user turn
23
- if merged and role == "user" and merged[-1]["role"] == "user":
24
- merged[-1]["content"] += "\n" + content
25
-
26
- # Otherwise, start a new turn
27
- else:
28
- merged.append({"role": role, "content": content})
29
-
30
- return merged
@@ -1,28 +0,0 @@
1
- main_template: |
2
- تو یک متخصص علوم دینی هستی
3
- من یک متن به تو میدهم و تو باید
4
- آن متن را در یکی از دسته بندی های زیر طبقه بندی کنی
5
- دسته بندی ها:
6
- "باورهای دینی",
7
- "اخلاق اسلامی",
8
- "احکام و فقه",
9
- "تاریخ اسلام و شخصیت ها",
10
- "منابع دینی",
11
- "دین و جامعه/سیاست",
12
- "عرفان و معنویت",
13
- "هیچکدام",
14
- فقط با این فرمت json پاسخ بده:
15
- {{
16
- "reason": "<دلیل انتخابت رو به صورت خلاصه بگو>",
17
- "result": "<یکی از دسته بندی ها>"
18
- }}
19
- متنی که باید طبقه بندی کنی:
20
- {input}
21
-
22
- analyze_template: |
23
- ما میخواهیم متنی که داده می شود را طبقه بندی کنیم.
24
- برای بهبود طبقه بندی، نیاز به آنالیز متن داریم.
25
- متنی که داده می شود را آنالیز کن و ایده اصلی و آنالیزی کوتاه از آن را بنویس.
26
- آنالیز باید بسیار خلاصه باشد
27
- نهایتا 20 کلمه
28
- {input}
@@ -1,18 +0,0 @@
1
- main_template: |
2
- You are an expert keyword extractor.
3
- Extract the most relevant keywords from the given text.
4
- Guidelines:
5
- 1. Keywords must represent the main concepts of the text.
6
- 2. If two words have overlapping meanings, choose only one.
7
- 3. Do not include generic or unrelated words.
8
- 4. Keywords must be single, self-contained words (no phrases).
9
- 5. Output between 3 and 7 keywords based on the input length.
10
- 6. Respond only in JSON format:
11
- {{"result": ["keyword1", "keyword2", etc.]}}
12
- Here is the text:
13
- {input}
14
-
15
- analyze_template: |
16
- Analyze the following text to identify its main topics, concepts, and important terms.
17
- Provide a concise summary of your findings that will help in extracting relevant keywords.
18
- {input}
@@ -1,4 +0,0 @@
1
- from .async_the_tool import AsyncTheTool
2
- from .the_tool import TheTool
3
-
4
- __all__ = ["TheTool", "AsyncTheTool"]
@@ -1,277 +0,0 @@
1
- from typing import Literal
2
-
3
- from openai import AsyncOpenAI
4
-
5
- import texttools.tools.internals.output_models as OutputModels
6
- from texttools.tools.internals.async_operator import AsyncOperator
7
-
8
-
9
- class AsyncTheTool:
10
- """
11
- Async counterpart to TheTool.
12
-
13
- Usage:
14
- async_client = AsyncOpenAI(...)
15
- tool = TheToolAsync(async_client, model="gemma-3")
16
- result = await tool.categorize("متن ...", with_analysis=True)
17
- """
18
-
19
- def __init__(
20
- self,
21
- client: AsyncOpenAI,
22
- *,
23
- model: str,
24
- temperature: float = 0.0,
25
- ):
26
- self.operator = AsyncOperator(
27
- client=client,
28
- model=model,
29
- temperature=temperature,
30
- )
31
-
32
- async def categorize(
33
- self,
34
- text: str,
35
- with_analysis: bool = False,
36
- user_prompt: str = "",
37
- logprobs: bool = False,
38
- top_logprobs: int = 8,
39
- max_tokens: int | None = None,
40
- ) -> dict[str, str]:
41
- results = await self.operator.run(
42
- text,
43
- prompt_file="categorizer.yaml",
44
- output_model=OutputModels.CategorizerOutput,
45
- with_analysis=with_analysis,
46
- resp_format="parse",
47
- user_prompt=user_prompt,
48
- logprobs=logprobs,
49
- top_logprobs=top_logprobs,
50
- max_tokens=max_tokens,
51
- )
52
- return results
53
-
54
- async def extract_keywords(
55
- self,
56
- text: str,
57
- output_lang: str | None = None,
58
- with_analysis: bool = False,
59
- user_prompt: str = "",
60
- logprobs: bool = False,
61
- top_logprobs: int = 3,
62
- max_tokens: int | None = None,
63
- ) -> dict[str, list[str]]:
64
- results = await self.operator.run(
65
- text,
66
- prompt_file="keyword_extractor.yaml",
67
- output_model=OutputModels.ListStrOutput,
68
- with_analysis=with_analysis,
69
- resp_format="parse",
70
- user_prompt=user_prompt,
71
- output_lang=output_lang,
72
- logprobs=logprobs,
73
- top_logprobs=top_logprobs,
74
- max_tokens=max_tokens,
75
- )
76
- return results
77
-
78
- async def extract_entities(
79
- self,
80
- text: str,
81
- output_lang: str | None = None,
82
- with_analysis: bool = False,
83
- user_prompt: str = "",
84
- logprobs: bool = False,
85
- top_logprobs: int = 3,
86
- max_tokens: int | None = None,
87
- ) -> dict[str, list[dict[str, str]]]:
88
- results = await self.operator.run(
89
- text,
90
- prompt_file="ner_extractor.yaml",
91
- output_model=OutputModels.ListDictStrStrOutput,
92
- with_analysis=with_analysis,
93
- resp_format="parse",
94
- user_prompt=user_prompt,
95
- output_lang=output_lang,
96
- logprobs=logprobs,
97
- top_logprobs=top_logprobs,
98
- max_tokens=max_tokens,
99
- )
100
- return results
101
-
102
- async def detect_question(
103
- self,
104
- question: str,
105
- output_lang: str | None = None,
106
- with_analysis: bool = False,
107
- user_prompt: str = "",
108
- logprobs: bool = False,
109
- top_logprobs: int = 2,
110
- max_tokens: int | None = None,
111
- ) -> dict[str, bool]:
112
- results = await self.operator.run(
113
- question,
114
- prompt_file="question_detector.yaml",
115
- output_model=OutputModels.BoolOutput,
116
- with_analysis=with_analysis,
117
- resp_format="parse",
118
- user_prompt=user_prompt,
119
- output_lang=output_lang,
120
- logprobs=logprobs,
121
- top_logprobs=top_logprobs,
122
- max_tokens=max_tokens,
123
- )
124
- return results
125
-
126
- async def generate_question_from_text(
127
- self,
128
- text: str,
129
- output_lang: str | None = None,
130
- with_analysis: bool = False,
131
- user_prompt: str = "",
132
- logprobs: bool = False,
133
- top_logprobs: int = 3,
134
- max_tokens: int | None = None,
135
- ) -> dict[str, str]:
136
- results = await self.operator.run(
137
- text,
138
- prompt_file="question_generator.yaml",
139
- output_model=OutputModels.StrOutput,
140
- with_analysis=with_analysis,
141
- resp_format="parse",
142
- user_prompt=user_prompt,
143
- output_lang=output_lang,
144
- logprobs=logprobs,
145
- top_logprobs=top_logprobs,
146
- max_tokens=max_tokens,
147
- )
148
- return results
149
-
150
- async def merge_questions(
151
- self,
152
- questions: list[str],
153
- output_lang: str | None = None,
154
- mode: Literal["default", "reason"] = "default",
155
- with_analysis: bool = False,
156
- user_prompt: str = "",
157
- logprobs: bool = False,
158
- top_logprobs: int = 3,
159
- max_tokens: int | None = None,
160
- ) -> dict[str, str]:
161
- question_str = ", ".join(questions)
162
- results = await self.operator.run(
163
- question_str,
164
- prompt_file="question_merger.yaml",
165
- output_model=OutputModels.StrOutput,
166
- with_analysis=with_analysis,
167
- use_modes=True,
168
- mode=mode,
169
- resp_format="parse",
170
- user_prompt=user_prompt,
171
- output_lang=output_lang,
172
- logprobs=logprobs,
173
- top_logprobs=top_logprobs,
174
- max_tokens=max_tokens,
175
- )
176
- return results
177
-
178
- async def rewrite(
179
- self,
180
- question: str,
181
- output_lang: str | None = None,
182
- mode: Literal["positive", "negative", "hard_negative"] = "positive",
183
- with_analysis: bool = False,
184
- user_prompt: str = "",
185
- logprobs: bool = False,
186
- top_logprobs: int = 3,
187
- max_tokens: int | None = None,
188
- ) -> dict[str, str]:
189
- results = await self.operator.run(
190
- question,
191
- prompt_file="rewriter.yaml",
192
- output_model=OutputModels.StrOutput,
193
- with_analysis=with_analysis,
194
- use_modes=True,
195
- mode=mode,
196
- resp_format="parse",
197
- user_prompt=user_prompt,
198
- output_lang=output_lang,
199
- logprobs=logprobs,
200
- top_logprobs=top_logprobs,
201
- max_tokens=max_tokens,
202
- )
203
- return results
204
-
205
- async def generate_questions_from_subject(
206
- self,
207
- subject: str,
208
- number_of_questions: int,
209
- output_lang: str | None = None,
210
- with_analysis: bool = False,
211
- user_prompt: str = "",
212
- logprobs: bool = False,
213
- top_logprobs: int = 3,
214
- max_tokens: int | None = None,
215
- ) -> dict[str, list[str]]:
216
- results = await self.operator.run(
217
- subject,
218
- prompt_file="subject_question_generator.yaml",
219
- output_model=OutputModels.ReasonListStrOutput,
220
- with_analysis=with_analysis,
221
- resp_format="parse",
222
- user_prompt=user_prompt,
223
- number_of_questions=number_of_questions,
224
- output_lang=output_lang,
225
- logprobs=logprobs,
226
- top_logprobs=top_logprobs,
227
- max_tokens=max_tokens,
228
- )
229
- return results
230
-
231
- async def summarize(
232
- self,
233
- text: str,
234
- output_lang: str | None = None,
235
- with_analysis: bool = False,
236
- user_prompt: str = "",
237
- logprobs: bool = False,
238
- top_logprobs: int = 3,
239
- max_tokens: int | None = None,
240
- ) -> dict[str, str]:
241
- results = await self.operator.run(
242
- text,
243
- prompt_file="summarizer.yaml",
244
- output_model=OutputModels.StrOutput,
245
- with_analysis=with_analysis,
246
- resp_format="parse",
247
- user_prompt=user_prompt,
248
- output_lang=output_lang,
249
- logprobs=logprobs,
250
- top_logprobs=top_logprobs,
251
- max_tokens=max_tokens,
252
- )
253
- return results
254
-
255
- async def translate(
256
- self,
257
- text: str,
258
- target_language: str,
259
- with_analysis: bool = False,
260
- user_prompt: str = "",
261
- logprobs: bool = False,
262
- top_logprobs: int = 3,
263
- max_tokens: int | None = None,
264
- ) -> dict[str, str]:
265
- results = await self.operator.run(
266
- text,
267
- prompt_file="translator.yaml",
268
- output_model=OutputModels.StrOutput,
269
- with_analysis=with_analysis,
270
- resp_format="parse",
271
- user_prompt=user_prompt,
272
- target_language=target_language,
273
- logprobs=logprobs,
274
- top_logprobs=top_logprobs,
275
- max_tokens=max_tokens,
276
- )
277
- return results