webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,356 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import cloudscraper # Import cloudscraper
|
|
4
|
+
import json
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage, get_system_prompt # Import get_system_prompt
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# Attempt to import LitAgent, fallback if not available
|
|
15
|
+
try:
|
|
16
|
+
from webscout.litagent import LitAgent
|
|
17
|
+
except ImportError:
|
|
18
|
+
# Define a dummy LitAgent if webscout is not installed or accessible
|
|
19
|
+
class LitAgent:
|
|
20
|
+
def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
|
|
21
|
+
print("Warning: LitAgent not found. Using default minimal headers.")
|
|
22
|
+
return {
|
|
23
|
+
"accept": "*/*",
|
|
24
|
+
"accept_language": "en-US,en;q=0.9",
|
|
25
|
+
"platform": "Windows",
|
|
26
|
+
"sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
|
27
|
+
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
|
28
|
+
"browser_type": browser,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
# --- YEPCHAT Client ---
|
|
32
|
+
|
|
33
|
+
# ANSI escape codes for formatting
|
|
34
|
+
BOLD = "\033[1m"
|
|
35
|
+
RED = "\033[91m"
|
|
36
|
+
RESET = "\033[0m"
|
|
37
|
+
|
|
38
|
+
class Completions(BaseCompletions):
|
|
39
|
+
def __init__(self, client: 'YEPCHAT'):
|
|
40
|
+
self._client = client
|
|
41
|
+
|
|
42
|
+
def create(
|
|
43
|
+
self,
|
|
44
|
+
*,
|
|
45
|
+
model: str,
|
|
46
|
+
messages: List[Dict[str, str]],
|
|
47
|
+
max_tokens: Optional[int] = 1280,
|
|
48
|
+
stream: bool = False,
|
|
49
|
+
temperature: Optional[float] = 0.6,
|
|
50
|
+
top_p: Optional[float] = 0.7,
|
|
51
|
+
system_prompt: Optional[str] = None, # Added for consistency, but will be ignored
|
|
52
|
+
**kwargs: Any
|
|
53
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
54
|
+
"""
|
|
55
|
+
Creates a model response for the given chat conversation using YEPCHAT API.
|
|
56
|
+
Mimics openai.chat.completions.create
|
|
57
|
+
Note: YEPCHAT does not support system messages. They will be ignored.
|
|
58
|
+
"""
|
|
59
|
+
# Accept both raw and prefixed model names from the user, but always send the raw name to the API
|
|
60
|
+
if model.startswith("YEPCHAT/"):
|
|
61
|
+
model_raw = model.replace("YEPCHAT/", "", 1)
|
|
62
|
+
else:
|
|
63
|
+
model_raw = model
|
|
64
|
+
# Validate model
|
|
65
|
+
if f"YEPCHAT/{model_raw}" not in self._client.AVAILABLE_MODELS:
|
|
66
|
+
raise ValueError(
|
|
67
|
+
f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}"
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Filter out system messages and warn the user if any are present
|
|
71
|
+
filtered_messages = []
|
|
72
|
+
has_system_message = False
|
|
73
|
+
if get_system_prompt(messages) or system_prompt: # Check both message list and explicit param
|
|
74
|
+
has_system_message = True
|
|
75
|
+
|
|
76
|
+
for msg in messages:
|
|
77
|
+
if msg["role"] == "system":
|
|
78
|
+
continue # Skip system messages
|
|
79
|
+
filtered_messages.append(msg)
|
|
80
|
+
|
|
81
|
+
if has_system_message:
|
|
82
|
+
# Print warning in bold red
|
|
83
|
+
print(f"{BOLD}{RED}Warning: YEPCHAT does not support system messages, they will be ignored.{RESET}")
|
|
84
|
+
|
|
85
|
+
# If no messages left after filtering, raise an error
|
|
86
|
+
if not filtered_messages:
|
|
87
|
+
raise ValueError("At least one user or assistant message is required for YEPCHAT.")
|
|
88
|
+
|
|
89
|
+
payload = {
|
|
90
|
+
"stream": stream,
|
|
91
|
+
"max_tokens": max_tokens,
|
|
92
|
+
"top_p": top_p,
|
|
93
|
+
"temperature": temperature,
|
|
94
|
+
"messages": filtered_messages, # Use filtered messages
|
|
95
|
+
"model": model_raw, # Send only the raw model name to the API
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
# Add any extra kwargs to the payload
|
|
99
|
+
payload.update(kwargs)
|
|
100
|
+
|
|
101
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
102
|
+
created_time = int(time.time())
|
|
103
|
+
|
|
104
|
+
if stream:
|
|
105
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
106
|
+
else:
|
|
107
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
108
|
+
|
|
109
|
+
def _create_stream(
|
|
110
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
111
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
112
|
+
try:
|
|
113
|
+
response = self._client.session.post(
|
|
114
|
+
self._client.api_endpoint,
|
|
115
|
+
headers=self._client.headers,
|
|
116
|
+
cookies=self._client.cookies,
|
|
117
|
+
json=payload,
|
|
118
|
+
stream=True,
|
|
119
|
+
timeout=self._client.timeout
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
if not response.ok:
|
|
123
|
+
raise IOError(
|
|
124
|
+
f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
128
|
+
if line:
|
|
129
|
+
line = line.strip()
|
|
130
|
+
if line.startswith("data: "):
|
|
131
|
+
json_str = line[6:]
|
|
132
|
+
if json_str == "[DONE]":
|
|
133
|
+
break
|
|
134
|
+
try:
|
|
135
|
+
data = json.loads(json_str)
|
|
136
|
+
choice_data = data.get('choices', [{}])[0]
|
|
137
|
+
delta_data = choice_data.get('delta', {})
|
|
138
|
+
finish_reason = choice_data.get('finish_reason')
|
|
139
|
+
content = delta_data.get('content')
|
|
140
|
+
role = delta_data.get('role', None)
|
|
141
|
+
|
|
142
|
+
if content is not None or role is not None:
|
|
143
|
+
delta = ChoiceDelta(content=content, role=role)
|
|
144
|
+
choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
|
|
145
|
+
chunk = ChatCompletionChunk(
|
|
146
|
+
id=request_id,
|
|
147
|
+
choices=[choice],
|
|
148
|
+
created=created_time,
|
|
149
|
+
model=model,
|
|
150
|
+
)
|
|
151
|
+
yield chunk
|
|
152
|
+
|
|
153
|
+
except json.JSONDecodeError:
|
|
154
|
+
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
155
|
+
continue
|
|
156
|
+
|
|
157
|
+
# Yield final chunk with finish reason if not already sent
|
|
158
|
+
delta = ChoiceDelta()
|
|
159
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
160
|
+
chunk = ChatCompletionChunk(
|
|
161
|
+
id=request_id,
|
|
162
|
+
choices=[choice],
|
|
163
|
+
created=created_time,
|
|
164
|
+
model=model,
|
|
165
|
+
)
|
|
166
|
+
yield chunk
|
|
167
|
+
|
|
168
|
+
except cloudscraper.exceptions.CloudflareChallengeError as e:
|
|
169
|
+
pass
|
|
170
|
+
|
|
171
|
+
def _create_non_stream(
|
|
172
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
173
|
+
) -> ChatCompletion:
|
|
174
|
+
full_response_content = ""
|
|
175
|
+
finish_reason = "stop"
|
|
176
|
+
try:
|
|
177
|
+
# Make a non-streaming request to the API
|
|
178
|
+
payload_copy = payload.copy()
|
|
179
|
+
payload_copy["stream"] = False
|
|
180
|
+
response = self._client.session.post(
|
|
181
|
+
self._client.api_endpoint,
|
|
182
|
+
headers=self._client.headers,
|
|
183
|
+
cookies=self._client.cookies,
|
|
184
|
+
json=payload_copy,
|
|
185
|
+
timeout=self._client.timeout
|
|
186
|
+
)
|
|
187
|
+
if not response.ok:
|
|
188
|
+
raise IOError(
|
|
189
|
+
f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
|
|
190
|
+
)
|
|
191
|
+
data = response.json()
|
|
192
|
+
if 'choices' in data and len(data['choices']) > 0:
|
|
193
|
+
# YEPCHAT non-streaming returns message content in choices[0]['message']['content']
|
|
194
|
+
full_response_content = data['choices'][0].get('message', {}).get('content', '')
|
|
195
|
+
finish_reason = data['choices'][0].get('finish_reason', 'stop')
|
|
196
|
+
else:
|
|
197
|
+
full_response_content = ''
|
|
198
|
+
finish_reason = 'stop'
|
|
199
|
+
except Exception as e:
|
|
200
|
+
print(f"Error obtaining non-stream response from YEPCHAT: {e}")
|
|
201
|
+
finish_reason = "error"
|
|
202
|
+
|
|
203
|
+
message = ChatCompletionMessage(
|
|
204
|
+
role="assistant",
|
|
205
|
+
content=full_response_content
|
|
206
|
+
)
|
|
207
|
+
choice = Choice(
|
|
208
|
+
index=0,
|
|
209
|
+
message=message,
|
|
210
|
+
finish_reason=finish_reason
|
|
211
|
+
)
|
|
212
|
+
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
|
213
|
+
completion = ChatCompletion(
|
|
214
|
+
id=request_id,
|
|
215
|
+
choices=[choice],
|
|
216
|
+
created=created_time,
|
|
217
|
+
model=model,
|
|
218
|
+
usage=usage,
|
|
219
|
+
)
|
|
220
|
+
return completion
|
|
221
|
+
|
|
222
|
+
class Chat(BaseChat):
|
|
223
|
+
def __init__(self, client: 'YEPCHAT'):
|
|
224
|
+
self.completions = Completions(client)
|
|
225
|
+
|
|
226
|
+
class YEPCHAT(OpenAICompatibleProvider):
|
|
227
|
+
"""
|
|
228
|
+
OpenAI-compatible client for YEPCHAT API.
|
|
229
|
+
|
|
230
|
+
Usage:
|
|
231
|
+
client = YEPCHAT()
|
|
232
|
+
response = client.chat.completions.create(
|
|
233
|
+
model="DeepSeek-R1-Distill-Qwen-32B",
|
|
234
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
235
|
+
)
|
|
236
|
+
print(response.choices[0].message.content)
|
|
237
|
+
"""
|
|
238
|
+
_base_models = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
|
|
239
|
+
|
|
240
|
+
# Create AVAILABLE_MODELS as a list with the format "YEPCHAT/model"
|
|
241
|
+
AVAILABLE_MODELS = [f"YEPCHAT/{model}" for model in _base_models]
|
|
242
|
+
|
|
243
|
+
# Create a mapping dictionary for internal use
|
|
244
|
+
_model_mapping = {model: f"YEPCHAT/{model}" for model in _base_models}
|
|
245
|
+
|
|
246
|
+
def __init__(
|
|
247
|
+
self,
|
|
248
|
+
timeout: int = 30,
|
|
249
|
+
browser: str = "chrome"
|
|
250
|
+
):
|
|
251
|
+
"""
|
|
252
|
+
Initialize the YEPCHAT client.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
timeout: Request timeout in seconds.
|
|
256
|
+
browser: Browser name for LitAgent to generate User-Agent.
|
|
257
|
+
"""
|
|
258
|
+
self.timeout = timeout
|
|
259
|
+
self.api_endpoint = "https://api.yep.com/v1/chat/completions"
|
|
260
|
+
self.session = cloudscraper.create_scraper() # Use cloudscraper
|
|
261
|
+
|
|
262
|
+
# Initialize LitAgent for user agent generation and fingerprinting
|
|
263
|
+
try:
|
|
264
|
+
agent = LitAgent()
|
|
265
|
+
fingerprint = agent.generate_fingerprint(browser=browser)
|
|
266
|
+
except Exception as e:
|
|
267
|
+
print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
|
|
268
|
+
# Fallback fingerprint data
|
|
269
|
+
fingerprint = {
|
|
270
|
+
"accept": "*/*",
|
|
271
|
+
"accept_language": "en-US,en;q=0.9",
|
|
272
|
+
"sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
|
273
|
+
"platform": "Windows",
|
|
274
|
+
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
# Initialize headers using the fingerprint
|
|
278
|
+
self.headers = {
|
|
279
|
+
"Accept": fingerprint["accept"],
|
|
280
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
281
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
282
|
+
"Content-Type": "application/json; charset=utf-8",
|
|
283
|
+
"DNT": "1",
|
|
284
|
+
"Origin": "https://yep.com",
|
|
285
|
+
"Referer": "https://yep.com/",
|
|
286
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
287
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
288
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
289
|
+
"User-Agent": fingerprint["user_agent"],
|
|
290
|
+
}
|
|
291
|
+
self.session.headers.update(self.headers)
|
|
292
|
+
|
|
293
|
+
# Generate cookies (consider if these need refreshing or specific values)
|
|
294
|
+
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
295
|
+
|
|
296
|
+
# Initialize the chat interface
|
|
297
|
+
self.chat = Chat(self)
|
|
298
|
+
|
|
299
|
+
@property
|
|
300
|
+
def models(self):
|
|
301
|
+
class _ModelList:
|
|
302
|
+
def list(inner_self):
|
|
303
|
+
return YEPCHAT.AVAILABLE_MODELS
|
|
304
|
+
return _ModelList()
|
|
305
|
+
|
|
306
|
+
def convert_model_name(self, model: str) -> str:
|
|
307
|
+
"""
|
|
308
|
+
Ensures the model name is valid for YEPCHAT.
|
|
309
|
+
Returns the validated model name or raises an error if invalid.
|
|
310
|
+
"""
|
|
311
|
+
if model in self.AVAILABLE_MODELS:
|
|
312
|
+
return model
|
|
313
|
+
else:
|
|
314
|
+
# Raise error instead of defaulting, as model is mandatory in create()
|
|
315
|
+
raise ValueError(f"Model '{model}' not supported by YEPCHAT. Available: {self.AVAILABLE_MODELS}")
|
|
316
|
+
|
|
317
|
+
# Example usage (optional, for testing)
|
|
318
|
+
if __name__ == '__main__':
|
|
319
|
+
print("Testing YEPCHAT OpenAI-Compatible Client...")
|
|
320
|
+
|
|
321
|
+
# Test Non-Streaming
|
|
322
|
+
try:
|
|
323
|
+
print("\n--- Non-Streaming Test (DeepSeek) ---")
|
|
324
|
+
client = YEPCHAT()
|
|
325
|
+
response = client.chat.completions.create(
|
|
326
|
+
model="DeepSeek-R1-Distill-Qwen-32B",
|
|
327
|
+
messages=[
|
|
328
|
+
{"role": "user", "content": "Say 'Hello World'"}
|
|
329
|
+
],
|
|
330
|
+
stream=False
|
|
331
|
+
)
|
|
332
|
+
print("Response:", response.choices[0].message.content)
|
|
333
|
+
print("Usage:", response.usage) # Will show 0 tokens
|
|
334
|
+
except Exception as e:
|
|
335
|
+
print(f"Non-Streaming Test Failed: {e}")
|
|
336
|
+
|
|
337
|
+
# Test Streaming
|
|
338
|
+
try:
|
|
339
|
+
print("\n--- Streaming Test (Mixtral) ---")
|
|
340
|
+
client_stream = YEPCHAT()
|
|
341
|
+
stream = client_stream.chat.completions.create(
|
|
342
|
+
model="Mixtral-8x7B-Instruct-v0.1",
|
|
343
|
+
messages=[
|
|
344
|
+
{"role": "user", "content": "Write a short sentence about AI."}
|
|
345
|
+
],
|
|
346
|
+
stream=True
|
|
347
|
+
)
|
|
348
|
+
print("Streaming Response:")
|
|
349
|
+
for chunk in stream:
|
|
350
|
+
content = chunk.choices[0].delta.content
|
|
351
|
+
if content:
|
|
352
|
+
print(content, end="", flush=True)
|
|
353
|
+
print() # Add a newline at the end
|
|
354
|
+
|
|
355
|
+
except Exception as e:
|
|
356
|
+
print(f"Streaming Test Failed: {e}")
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
from typing import Dict, Generator, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OpenGPT(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the Open-GPT API.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
app_id: str = "clf3yg8730000ih08ndbdi2v4",
|
|
31
|
+
):
|
|
32
|
+
"""Initializes the OpenGPT API client.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
is_conversation (bool, optional): Whether to maintain conversation history. Defaults to True.
|
|
36
|
+
max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 600.
|
|
37
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
38
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
39
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
40
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
41
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
42
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
43
|
+
act (str, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
44
|
+
app_id (str, optional): The OpenGPT application ID. Defaults to "clf3yg8730000ih08ndbdi2v4".
|
|
45
|
+
"""
|
|
46
|
+
# Initialize curl_cffi Session
|
|
47
|
+
self.session = Session()
|
|
48
|
+
self.agent = LitAgent() # Keep for potential future use or other headers
|
|
49
|
+
|
|
50
|
+
self.is_conversation = is_conversation
|
|
51
|
+
self.max_tokens_to_sample = max_tokens
|
|
52
|
+
self.timeout = timeout
|
|
53
|
+
self.last_response = {}
|
|
54
|
+
self.app_id = app_id
|
|
55
|
+
|
|
56
|
+
# Set up headers (remove User-Agent if using impersonate)
|
|
57
|
+
self.headers = {
|
|
58
|
+
"Content-Type": "application/json",
|
|
59
|
+
# "User-Agent": self.agent.random(), # Removed, handled by impersonate
|
|
60
|
+
"Referer": f"https://open-gpt.app/id/app/{self.app_id}",
|
|
61
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Update curl_cffi session headers and proxies
|
|
65
|
+
self.session.headers.update(self.headers)
|
|
66
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
67
|
+
|
|
68
|
+
# Initialize optimizers
|
|
69
|
+
self.__available_optimizers = (
|
|
70
|
+
method
|
|
71
|
+
for method in dir(Optimizers)
|
|
72
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# Setup conversation
|
|
76
|
+
Conversation.intro = (
|
|
77
|
+
AwesomePrompts().get_act(
|
|
78
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
79
|
+
) if act else intro or Conversation.intro
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
self.conversation = Conversation(
|
|
83
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
|
+
)
|
|
85
|
+
self.conversation.history_offset = history_offset
|
|
86
|
+
|
|
87
|
+
def ask(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
stream: bool = False, # Note: API does not support streaming
|
|
91
|
+
raw: bool = False,
|
|
92
|
+
optimizer: str = None,
|
|
93
|
+
conversationally: bool = False,
|
|
94
|
+
) -> Union[Dict, Generator]:
|
|
95
|
+
"""
|
|
96
|
+
Send a prompt to the OpenGPT API and get a response.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
prompt: The user input/prompt for the API.
|
|
100
|
+
stream: Whether to stream the response.
|
|
101
|
+
raw: Whether to return the raw API response.
|
|
102
|
+
optimizer: Optimizer to use on the prompt.
|
|
103
|
+
conversationally: Whether to apply the optimizer on the full conversation prompt.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
A dictionary or generator with the response.
|
|
107
|
+
"""
|
|
108
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
109
|
+
|
|
110
|
+
if optimizer:
|
|
111
|
+
if optimizer in self.__available_optimizers:
|
|
112
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
113
|
+
conversation_prompt if conversationally else prompt
|
|
114
|
+
)
|
|
115
|
+
else:
|
|
116
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
117
|
+
|
|
118
|
+
# Prepare the request body payload
|
|
119
|
+
payload = {
|
|
120
|
+
"userInput": conversation_prompt,
|
|
121
|
+
"id": self.app_id,
|
|
122
|
+
"userKey": "" # Assuming userKey is meant to be empty as in the original code
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
# API does not stream, implement non-stream logic directly
|
|
126
|
+
def for_non_stream():
|
|
127
|
+
try:
|
|
128
|
+
# Use curl_cffi session post with impersonate
|
|
129
|
+
response = self.session.post(
|
|
130
|
+
"https://open-gpt.app/api/generate",
|
|
131
|
+
# headers are set on the session
|
|
132
|
+
data=json.dumps(payload), # Keep data as JSON string
|
|
133
|
+
timeout=self.timeout,
|
|
134
|
+
# proxies are set on the session
|
|
135
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
response.raise_for_status() # Check for HTTP errors
|
|
139
|
+
|
|
140
|
+
# Use response.text which is already decoded
|
|
141
|
+
response_text = response.text
|
|
142
|
+
self.last_response = {"text": response_text}
|
|
143
|
+
self.conversation.update_chat_history(prompt, response_text)
|
|
144
|
+
|
|
145
|
+
# Return dict or raw string based on raw flag
|
|
146
|
+
return {"raw": response_text} if raw else {"text": response_text}
|
|
147
|
+
|
|
148
|
+
except CurlError as e: # Catch CurlError
|
|
149
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
150
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
|
|
151
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
153
|
+
|
|
154
|
+
# This provider doesn't support streaming, so just return non-stream
|
|
155
|
+
return for_non_stream()
|
|
156
|
+
|
|
157
|
+
def chat(
|
|
158
|
+
self,
|
|
159
|
+
prompt: str,
|
|
160
|
+
stream: bool = False, # Keep stream param for interface consistency
|
|
161
|
+
optimizer: str = None,
|
|
162
|
+
conversationally: bool = False,
|
|
163
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
164
|
+
"""
|
|
165
|
+
Send a prompt to the OpenGPT API and get a text response.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
prompt: The user input/prompt for the API.
|
|
169
|
+
stream: Whether to stream the response (not supported).
|
|
170
|
+
optimizer: Optimizer to use on the prompt.
|
|
171
|
+
conversationally: Whether to apply the optimizer on the full conversation prompt.
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
A string with the response text.
|
|
175
|
+
"""
|
|
176
|
+
# Since ask() now handles both stream=True/False by returning the full response dict:
|
|
177
|
+
response_data = self.ask(
|
|
178
|
+
prompt,
|
|
179
|
+
stream=False, # Call ask in non-stream mode internally
|
|
180
|
+
raw=False, # Ensure ask returns dict with 'text' key
|
|
181
|
+
optimizer=optimizer,
|
|
182
|
+
conversationally=conversationally
|
|
183
|
+
)
|
|
184
|
+
# If stream=True was requested, simulate streaming by yielding the full message at once
|
|
185
|
+
if stream:
|
|
186
|
+
def stream_wrapper():
|
|
187
|
+
yield self.get_message(response_data) # yield only the text string
|
|
188
|
+
return stream_wrapper()
|
|
189
|
+
else:
|
|
190
|
+
# If stream=False, return the full message directly
|
|
191
|
+
return self.get_message(response_data)
|
|
192
|
+
|
|
193
|
+
def get_message(self, response: dict) -> str:
|
|
194
|
+
"""
|
|
195
|
+
Extract the message from the response dictionary.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
response: Response dictionary from the ask method.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
The text response as a string.
|
|
202
|
+
"""
|
|
203
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
204
|
+
return response["text"]
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
if __name__ == "__main__":
|
|
208
|
+
ai = OpenGPT()
|
|
209
|
+
print(ai.chat("Hello, how are you?"))
|