webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
from typing import Optional, Union, Any, Dict
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
from curl_cffi import CurlError
|
|
4
|
+
from curl_cffi.requests import Session
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
|
+
# Import HTTPVersion enum
|
|
14
|
+
from curl_cffi.const import CurlHttpVersion
|
|
15
|
+
|
|
16
|
+
class X0GPT(Provider):
|
|
17
|
+
"""
|
|
18
|
+
A class to interact with the x0-gpt.devwtf.in API.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
22
|
+
|
|
23
|
+
Examples:
|
|
24
|
+
>>> from webscout.Provider.x0gpt import X0GPT
|
|
25
|
+
>>> ai = X0GPT()
|
|
26
|
+
>>> response = ai.chat("What's the weather today?")
|
|
27
|
+
>>> print(response)
|
|
28
|
+
'The weather today is sunny with a high of 75°F.'
|
|
29
|
+
"""
|
|
30
|
+
AVAILABLE_MODELS = ["UNKNOWN"]
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
is_conversation: bool = True,
|
|
35
|
+
max_tokens: int = 600,
|
|
36
|
+
timeout: int = 30,
|
|
37
|
+
intro: str = None,
|
|
38
|
+
filepath: str = None,
|
|
39
|
+
update_file: bool = True,
|
|
40
|
+
proxies: dict = {},
|
|
41
|
+
history_offset: int = 10250,
|
|
42
|
+
act: str = None,
|
|
43
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
44
|
+
model: str = "UNKNOWN"
|
|
45
|
+
):
|
|
46
|
+
"""
|
|
47
|
+
Initializes the X0GPT API with given parameters.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
is_conversation (bool): Whether the provider is in conversation mode.
|
|
51
|
+
max_tokens (int): Maximum number of tokens to sample.
|
|
52
|
+
timeout (int): Timeout for API requests.
|
|
53
|
+
intro (str): Introduction message for the conversation.
|
|
54
|
+
filepath (str): Filepath for storing conversation history.
|
|
55
|
+
update_file (bool): Whether to update the conversation history file.
|
|
56
|
+
proxies (dict): Proxies for the API requests.
|
|
57
|
+
history_offset (int): Offset for conversation history.
|
|
58
|
+
act (str): Act for the conversation.
|
|
59
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
60
|
+
|
|
61
|
+
Examples:
|
|
62
|
+
>>> ai = X0GPT(system_prompt="You are a friendly assistant.")
|
|
63
|
+
>>> print(ai.system_prompt)
|
|
64
|
+
'You are a friendly assistant.'
|
|
65
|
+
"""
|
|
66
|
+
# Initialize curl_cffi Session instead of requests.Session
|
|
67
|
+
self.session = Session()
|
|
68
|
+
self.is_conversation = is_conversation
|
|
69
|
+
self.max_tokens_to_sample = max_tokens
|
|
70
|
+
self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
|
|
71
|
+
self.timeout = timeout
|
|
72
|
+
self.last_response = {}
|
|
73
|
+
self.system_prompt = system_prompt
|
|
74
|
+
|
|
75
|
+
# Initialize LitAgent for user agent generation
|
|
76
|
+
self.agent = LitAgent()
|
|
77
|
+
|
|
78
|
+
self.headers = {
|
|
79
|
+
"authority": "x0-gpt.devwtf.in",
|
|
80
|
+
"method": "POST",
|
|
81
|
+
"path": "/api/stream/reply",
|
|
82
|
+
"scheme": "https",
|
|
83
|
+
"accept": "*/*",
|
|
84
|
+
"accept-encoding": "gzip, deflate, br, zstd", # Keep zstd for now
|
|
85
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
86
|
+
# "content-length": "114", # Let curl_cffi handle content-length
|
|
87
|
+
"content-type": "application/json",
|
|
88
|
+
"dnt": "1",
|
|
89
|
+
"origin": "https://x0-gpt.devwtf.in",
|
|
90
|
+
# "priority": "u=1, i", # Remove priority header
|
|
91
|
+
"referer": "https://x0-gpt.devwtf.in/chat",
|
|
92
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
93
|
+
"sec-ch-ua-mobile": "?0",
|
|
94
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
95
|
+
"user-agent": self.agent.random()
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
self.__available_optimizers = (
|
|
99
|
+
method
|
|
100
|
+
for method in dir(Optimizers)
|
|
101
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
102
|
+
)
|
|
103
|
+
# Update curl_cffi session headers and proxies
|
|
104
|
+
self.session.headers.update(self.headers)
|
|
105
|
+
self.session.proxies = proxies
|
|
106
|
+
|
|
107
|
+
Conversation.intro = (
|
|
108
|
+
AwesomePrompts().get_act(
|
|
109
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
110
|
+
)
|
|
111
|
+
if act
|
|
112
|
+
else intro or Conversation.intro
|
|
113
|
+
)
|
|
114
|
+
self.conversation = Conversation(
|
|
115
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
116
|
+
)
|
|
117
|
+
self.conversation.history_offset = history_offset
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
def _x0gpt_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
121
|
+
"""Extracts content from the x0gpt stream format '0:"..."'."""
|
|
122
|
+
if isinstance(chunk, str):
|
|
123
|
+
match = re.search(r'0:"(.*?)"', chunk)
|
|
124
|
+
if match:
|
|
125
|
+
# Decode potential unicode escapes like \u00e9
|
|
126
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
127
|
+
return content.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes and quotes
|
|
128
|
+
return None
|
|
129
|
+
|
|
130
|
+
def ask(
|
|
131
|
+
self,
|
|
132
|
+
prompt: str,
|
|
133
|
+
stream: bool = False,
|
|
134
|
+
raw: bool = False,
|
|
135
|
+
optimizer: str = None,
|
|
136
|
+
conversationally: bool = False,
|
|
137
|
+
) -> Dict[str, Any]:
|
|
138
|
+
"""
|
|
139
|
+
Sends a prompt to the x0-gpt.devwtf.in API and returns the response.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
prompt (str): The prompt to send to the API.
|
|
143
|
+
stream (bool): Whether to stream the response.
|
|
144
|
+
raw (bool): Whether to return the raw response.
|
|
145
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
146
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Dict[str, Any]: The API response.
|
|
150
|
+
|
|
151
|
+
Examples:
|
|
152
|
+
>>> ai = X0GPT()
|
|
153
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
154
|
+
>>> print(response)
|
|
155
|
+
{'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
|
|
156
|
+
"""
|
|
157
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
158
|
+
if optimizer:
|
|
159
|
+
if optimizer in self.__available_optimizers:
|
|
160
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
161
|
+
conversation_prompt if conversationally else prompt
|
|
162
|
+
)
|
|
163
|
+
else:
|
|
164
|
+
raise Exception(
|
|
165
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
payload = {
|
|
169
|
+
"messages": [
|
|
170
|
+
{"role": "system", "content": self.system_prompt},
|
|
171
|
+
{"role": "user", "content": conversation_prompt}
|
|
172
|
+
],
|
|
173
|
+
"chatId": uuid4().hex,
|
|
174
|
+
"namespace": None
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
def for_stream():
|
|
178
|
+
try:
|
|
179
|
+
# Use curl_cffi session post with updated impersonate and http_version
|
|
180
|
+
response = self.session.post(
|
|
181
|
+
self.api_endpoint,
|
|
182
|
+
headers=self.headers,
|
|
183
|
+
json=payload,
|
|
184
|
+
stream=True,
|
|
185
|
+
timeout=self.timeout,
|
|
186
|
+
impersonate="chrome120", # Try a different impersonation profile
|
|
187
|
+
http_version=CurlHttpVersion.V1_1 # Force HTTP/1.1
|
|
188
|
+
)
|
|
189
|
+
if not response.ok:
|
|
190
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
191
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
streaming_response = ""
|
|
195
|
+
# Use sanitize_stream with the custom extractor
|
|
196
|
+
processed_stream = sanitize_stream(
|
|
197
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
198
|
+
intro_value=None, # No simple prefix to remove here
|
|
199
|
+
to_json=False, # Content is not JSON
|
|
200
|
+
content_extractor=self._x0gpt_extractor # Use the specific extractor
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
for content_chunk in processed_stream:
|
|
204
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
205
|
+
streaming_response += content_chunk
|
|
206
|
+
yield content_chunk if raw else dict(text=content_chunk)
|
|
207
|
+
|
|
208
|
+
self.last_response.update(dict(text=streaming_response))
|
|
209
|
+
self.conversation.update_chat_history(
|
|
210
|
+
prompt, self.get_message(self.last_response)
|
|
211
|
+
)
|
|
212
|
+
except CurlError as e: # Catch CurlError
|
|
213
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
214
|
+
except Exception as e: # Catch other potential exceptions
|
|
215
|
+
# Include the original exception type in the message for clarity
|
|
216
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
217
|
+
|
|
218
|
+
def for_non_stream():
|
|
219
|
+
# This function implicitly uses the updated for_stream
|
|
220
|
+
for _ in for_stream():
|
|
221
|
+
pass
|
|
222
|
+
return self.last_response
|
|
223
|
+
|
|
224
|
+
return for_stream() if stream else for_non_stream()
|
|
225
|
+
|
|
226
|
+
def chat(
|
|
227
|
+
self,
|
|
228
|
+
prompt: str,
|
|
229
|
+
stream: bool = False,
|
|
230
|
+
optimizer: str = None,
|
|
231
|
+
conversationally: bool = False,
|
|
232
|
+
) -> str:
|
|
233
|
+
"""
|
|
234
|
+
Generates a response from the X0GPT API.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
prompt (str): The prompt to send to the API.
|
|
238
|
+
stream (bool): Whether to stream the response.
|
|
239
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
240
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
str: The API response.
|
|
244
|
+
|
|
245
|
+
Examples:
|
|
246
|
+
>>> ai = X0GPT()
|
|
247
|
+
>>> response = ai.chat("What's the weather today?")
|
|
248
|
+
>>> print(response)
|
|
249
|
+
'The weather today is sunny with a high of 75°F.'
|
|
250
|
+
"""
|
|
251
|
+
|
|
252
|
+
def for_stream():
|
|
253
|
+
for response in self.ask(
|
|
254
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
255
|
+
):
|
|
256
|
+
yield self.get_message(response)
|
|
257
|
+
|
|
258
|
+
def for_non_stream():
|
|
259
|
+
return self.get_message(
|
|
260
|
+
self.ask(
|
|
261
|
+
prompt,
|
|
262
|
+
False,
|
|
263
|
+
optimizer=optimizer,
|
|
264
|
+
conversationally=conversationally,
|
|
265
|
+
)
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
return for_stream() if stream else for_non_stream()
|
|
269
|
+
|
|
270
|
+
def get_message(self, response: dict) -> str:
|
|
271
|
+
"""
|
|
272
|
+
Extracts the message from the API response.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
response (dict): The API response.
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
str: The message content.
|
|
279
|
+
|
|
280
|
+
Examples:
|
|
281
|
+
>>> ai = X0GPT()
|
|
282
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
283
|
+
>>> message = ai.get_message(response)
|
|
284
|
+
>>> print(message)
|
|
285
|
+
'Why did the scarecrow win an award? Because he was outstanding in his field!'
|
|
286
|
+
"""
|
|
287
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
288
|
+
# Ensure text exists before processing
|
|
289
|
+
text = response.get("text", "")
|
|
290
|
+
# Formatting is now mostly handled by the extractor, just return
|
|
291
|
+
formatted_text = text
|
|
292
|
+
return formatted_text
|
|
293
|
+
|
|
294
|
+
if __name__ == "__main__":
|
|
295
|
+
from rich import print
|
|
296
|
+
ai = X0GPT(timeout=5000)
|
|
297
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
298
|
+
for chunk in response:
|
|
299
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/yep.py
ADDED
|
@@ -0,0 +1,389 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
import json
|
|
3
|
+
from curl_cffi import CurlError
|
|
4
|
+
from curl_cffi.requests import Session
|
|
5
|
+
|
|
6
|
+
from typing import Any, Dict, Optional, Generator, Union, List, TypeVar
|
|
7
|
+
|
|
8
|
+
from webscout.AIutel import Optimizers
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
|
+
from webscout.conversation import Conversation, Fn
|
|
14
|
+
|
|
15
|
+
T = TypeVar('T')
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class YEPCHAT(Provider):
|
|
19
|
+
"""
|
|
20
|
+
YEPCHAT is a provider class for interacting with the Yep API.
|
|
21
|
+
|
|
22
|
+
Attributes:
|
|
23
|
+
AVAILABLE_MODELS (list): List of available models for the provider.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
AVAILABLE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
is_conversation: bool = True,
|
|
31
|
+
max_tokens: int = 1280,
|
|
32
|
+
timeout: int = 30,
|
|
33
|
+
intro: str = None,
|
|
34
|
+
filepath: str = None,
|
|
35
|
+
update_file: bool = True,
|
|
36
|
+
proxies: dict = {},
|
|
37
|
+
history_offset: int = 10250,
|
|
38
|
+
act: str = None,
|
|
39
|
+
model: str = "DeepSeek-R1-Distill-Qwen-32B",
|
|
40
|
+
temperature: float = 0.6,
|
|
41
|
+
top_p: float = 0.7,
|
|
42
|
+
browser: str = "chrome",
|
|
43
|
+
tools: Optional[List[Fn]] = None
|
|
44
|
+
):
|
|
45
|
+
"""
|
|
46
|
+
Initializes the YEPCHAT provider with the specified parameters.
|
|
47
|
+
|
|
48
|
+
Examples:
|
|
49
|
+
>>> ai = YEPCHAT()
|
|
50
|
+
>>> ai.ask("What's the weather today?")
|
|
51
|
+
Sends a prompt to the Yep API and returns the response.
|
|
52
|
+
|
|
53
|
+
>>> ai.chat("Tell me a joke", stream=True)
|
|
54
|
+
Initiates a chat with the Yep API using the provided prompt.
|
|
55
|
+
|
|
56
|
+
>>> weather_tool = Fn(name="get_weather", description="Get the current weather", parameters={"location": "string"})
|
|
57
|
+
>>> ai = YEPCHAT(tools=[weather_tool])
|
|
58
|
+
>>> ai.chat("What's the weather in New York?")
|
|
59
|
+
Uses the weather tool to provide weather information.
|
|
60
|
+
"""
|
|
61
|
+
if model not in self.AVAILABLE_MODELS:
|
|
62
|
+
raise ValueError(
|
|
63
|
+
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# Initialize curl_cffi Session instead of cloudscraper
|
|
67
|
+
self.session = Session()
|
|
68
|
+
self.is_conversation = is_conversation
|
|
69
|
+
self.max_tokens_to_sample = max_tokens
|
|
70
|
+
self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
|
|
71
|
+
self.stream_chunk_size = 64
|
|
72
|
+
self.timeout = timeout
|
|
73
|
+
self.last_response = {}
|
|
74
|
+
self.model = model
|
|
75
|
+
self.temperature = temperature
|
|
76
|
+
self.top_p = top_p
|
|
77
|
+
|
|
78
|
+
# Initialize LitAgent for user agent generation
|
|
79
|
+
self.agent = LitAgent()
|
|
80
|
+
# Use fingerprinting to create a consistent browser identity
|
|
81
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
82
|
+
|
|
83
|
+
# Use the fingerprint for headers
|
|
84
|
+
self.headers = {
|
|
85
|
+
"Accept": self.fingerprint["accept"],
|
|
86
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
87
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
88
|
+
"Content-Type": "application/json; charset=utf-8",
|
|
89
|
+
"DNT": "1",
|
|
90
|
+
"Origin": "https://yep.com",
|
|
91
|
+
"Referer": "https://yep.com/",
|
|
92
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
93
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
94
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
95
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
# Create session cookies with unique identifiers
|
|
99
|
+
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
100
|
+
|
|
101
|
+
self.__available_optimizers = (
|
|
102
|
+
method
|
|
103
|
+
for method in dir(Optimizers)
|
|
104
|
+
if callable(getattr(Optimizers, method))
|
|
105
|
+
and not method.startswith("__")
|
|
106
|
+
)
|
|
107
|
+
Conversation.intro = (
|
|
108
|
+
AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
|
|
109
|
+
if act
|
|
110
|
+
else intro or Conversation.intro
|
|
111
|
+
)
|
|
112
|
+
self.conversation = Conversation(
|
|
113
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file, tools=tools
|
|
114
|
+
)
|
|
115
|
+
self.conversation.history_offset = history_offset
|
|
116
|
+
# Set consistent headers and proxies for the curl_cffi session
|
|
117
|
+
self.session.headers.update(self.headers)
|
|
118
|
+
self.session.proxies = proxies
|
|
119
|
+
# Note: curl_cffi handles cookies differently, passed directly in requests
|
|
120
|
+
|
|
121
|
+
def refresh_identity(self, browser: str = None):
|
|
122
|
+
"""
|
|
123
|
+
Refreshes the browser identity fingerprint.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
browser: Specific browser to use for the new fingerprint
|
|
127
|
+
"""
|
|
128
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
129
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
130
|
+
|
|
131
|
+
# Update headers with new fingerprint
|
|
132
|
+
self.headers.update({
|
|
133
|
+
"Accept": self.fingerprint["accept"],
|
|
134
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
135
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
136
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
137
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
138
|
+
})
|
|
139
|
+
|
|
140
|
+
# Update session headers
|
|
141
|
+
self.session.headers.update(self.headers)
|
|
142
|
+
|
|
143
|
+
# Generate new cookies (will be passed in requests)
|
|
144
|
+
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
145
|
+
|
|
146
|
+
return self.fingerprint
|
|
147
|
+
|
|
148
|
+
def ask(
|
|
149
|
+
self,
|
|
150
|
+
prompt: str,
|
|
151
|
+
stream: bool = False,
|
|
152
|
+
raw: bool = False,
|
|
153
|
+
optimizer: str = None,
|
|
154
|
+
conversationally: bool = False,
|
|
155
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
156
|
+
"""
|
|
157
|
+
Sends a prompt to the Yep API and returns the response.
|
|
158
|
+
Now supports tool calling functionality.
|
|
159
|
+
|
|
160
|
+
Examples:
|
|
161
|
+
>>> ai = YEPCHAT()
|
|
162
|
+
>>> ai.ask("What's the weather today?")
|
|
163
|
+
Returns the response from the Yep API.
|
|
164
|
+
|
|
165
|
+
>>> ai.ask("Tell me a joke", stream=True)
|
|
166
|
+
Streams the response from the Yep API.
|
|
167
|
+
|
|
168
|
+
>>> weather_tool = Fn(name="get_weather", description="Get the current weather", parameters={"location": "string"})
|
|
169
|
+
>>> ai = YEPCHAT(tools=[weather_tool])
|
|
170
|
+
>>> ai.ask("What's the weather in New York?")
|
|
171
|
+
Will use the weather tool to provide response.
|
|
172
|
+
"""
|
|
173
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
174
|
+
if optimizer:
|
|
175
|
+
if optimizer in self.__available_optimizers:
|
|
176
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
177
|
+
conversation_prompt if conversationally else prompt
|
|
178
|
+
)
|
|
179
|
+
else:
|
|
180
|
+
raise Exception(
|
|
181
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
data = {
|
|
185
|
+
"stream": stream,
|
|
186
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
187
|
+
"top_p": self.top_p,
|
|
188
|
+
"temperature": self.temperature,
|
|
189
|
+
"messages": [{"content": conversation_prompt, "role": "user"}],
|
|
190
|
+
"model": self.model,
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
def for_stream():
|
|
194
|
+
try:
|
|
195
|
+
# buffer = b"" # No longer needed here
|
|
196
|
+
# Use curl_cffi session post, pass cookies explicitly
|
|
197
|
+
response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
|
|
198
|
+
|
|
199
|
+
if not response.ok:
|
|
200
|
+
# If we get a non-200 response, try refreshing our identity once
|
|
201
|
+
if response.status_code in [403, 429]:
|
|
202
|
+
self.refresh_identity()
|
|
203
|
+
# Retry with new identity
|
|
204
|
+
# Use curl_cffi session post, pass cookies explicitly
|
|
205
|
+
retry_response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
|
|
206
|
+
if not retry_response.ok:
|
|
207
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
208
|
+
f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
|
|
209
|
+
)
|
|
210
|
+
response = retry_response # Use the successful retry response
|
|
211
|
+
else:
|
|
212
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
213
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
# --- Start of stream processing block (should be outside the 'if not response.ok' block) ---
|
|
217
|
+
streaming_text = ""
|
|
218
|
+
|
|
219
|
+
# Use sanitize_stream to process the lines
|
|
220
|
+
processed_stream = sanitize_stream(
|
|
221
|
+
data=response.iter_content(chunk_size=None), # Pass the byte iterator directly
|
|
222
|
+
intro_value="data:",
|
|
223
|
+
to_json=True, # Yep sends JSON after 'data:'
|
|
224
|
+
skip_markers=["[DONE]"], # Skip the final marker
|
|
225
|
+
yield_raw_on_error=False, # Only process valid JSON data
|
|
226
|
+
# --- Add the content extractor ---
|
|
227
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None
|
|
228
|
+
)
|
|
229
|
+
# The loop now yields the final extracted string content directly
|
|
230
|
+
for content_chunk in processed_stream:
|
|
231
|
+
# --- TEMPORARY DEBUG PRINT ---
|
|
232
|
+
# print(f"\nDEBUG: Received extracted content: {content_chunk!r}\n", flush=True) # Keep or remove debug print as needed
|
|
233
|
+
if content_chunk and isinstance(content_chunk, str): # Ensure it's a non-empty string
|
|
234
|
+
streaming_text += content_chunk
|
|
235
|
+
# Yield dict or raw string chunk based on 'raw' flag
|
|
236
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
237
|
+
# --- End of stream processing block ---
|
|
238
|
+
|
|
239
|
+
# Check if the response contains a tool call (This should happen *after* processing the stream)
|
|
240
|
+
response_data = self.conversation.handle_tool_response(streaming_text)
|
|
241
|
+
|
|
242
|
+
if response_data["is_tool_call"]:
|
|
243
|
+
# Handle tool call results
|
|
244
|
+
if response_data["success"]:
|
|
245
|
+
for tool_call in response_data.get("tool_calls", []):
|
|
246
|
+
tool_name = tool_call.get("name", "unknown_tool")
|
|
247
|
+
result = response_data["result"]
|
|
248
|
+
self.conversation.update_chat_history_with_tool(prompt, tool_name, result)
|
|
249
|
+
else:
|
|
250
|
+
# If tool call failed, update history with error
|
|
251
|
+
self.conversation.update_chat_history(prompt,
|
|
252
|
+
f"Error executing tool call: {response_data['result']}")
|
|
253
|
+
else:
|
|
254
|
+
# Normal response handling
|
|
255
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
256
|
+
|
|
257
|
+
except CurlError as e: # Catch CurlError
|
|
258
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
259
|
+
except Exception as e:
|
|
260
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
261
|
+
|
|
262
|
+
def for_non_stream():
|
|
263
|
+
try:
|
|
264
|
+
# Use curl_cffi session post, pass cookies explicitly
|
|
265
|
+
response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
|
|
266
|
+
if not response.ok:
|
|
267
|
+
if response.status_code in [403, 429]:
|
|
268
|
+
self.refresh_identity()
|
|
269
|
+
# Use curl_cffi session post, pass cookies explicitly
|
|
270
|
+
response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
|
|
271
|
+
if not response.ok:
|
|
272
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
273
|
+
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
|
|
274
|
+
)
|
|
275
|
+
else:
|
|
276
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
277
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
# ... existing non-stream response handling code ...
|
|
281
|
+
response_data = response.json()
|
|
282
|
+
if 'choices' in response_data and len(response_data['choices']) > 0:
|
|
283
|
+
content = response_data['choices'][0].get('message', {}).get('content', '')
|
|
284
|
+
|
|
285
|
+
# Check if the response contains a tool call
|
|
286
|
+
tool_response = self.conversation.handle_tool_response(content)
|
|
287
|
+
|
|
288
|
+
if tool_response["is_tool_call"]:
|
|
289
|
+
# Process tool call
|
|
290
|
+
if tool_response["success"]:
|
|
291
|
+
# Get the first tool call for simplicity
|
|
292
|
+
if "tool_calls" in tool_response and len(tool_response["tool_calls"]) > 0:
|
|
293
|
+
tool_call = tool_response["tool_calls"][0]
|
|
294
|
+
tool_name = tool_call.get("name", "unknown_tool")
|
|
295
|
+
tool_result = tool_response["result"]
|
|
296
|
+
|
|
297
|
+
# Update chat history with tool call
|
|
298
|
+
self.conversation.update_chat_history_with_tool(prompt, tool_name, tool_result)
|
|
299
|
+
|
|
300
|
+
# Return tool result
|
|
301
|
+
return {"text": tool_result, "is_tool_call": True, "tool_name": tool_name}
|
|
302
|
+
|
|
303
|
+
# If tool call processing failed
|
|
304
|
+
return {"text": tool_response["result"], "is_tool_call": True, "error": True}
|
|
305
|
+
else:
|
|
306
|
+
# Normal response handling
|
|
307
|
+
self.conversation.update_chat_history(prompt, content)
|
|
308
|
+
return {"text": content}
|
|
309
|
+
else:
|
|
310
|
+
raise exceptions.FailedToGenerateResponseError("No response content found")
|
|
311
|
+
except CurlError as e: # Catch CurlError
|
|
312
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
313
|
+
except Exception as e:
|
|
314
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
315
|
+
|
|
316
|
+
return for_stream() if stream else for_non_stream()
|
|
317
|
+
|
|
318
|
+
def chat(
|
|
319
|
+
self,
|
|
320
|
+
prompt: str,
|
|
321
|
+
stream: bool = False,
|
|
322
|
+
optimizer: str = None,
|
|
323
|
+
conversationally: bool = False,
|
|
324
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
325
|
+
"""
|
|
326
|
+
Initiates a chat with the Yep API using the provided prompt.
|
|
327
|
+
|
|
328
|
+
Examples:
|
|
329
|
+
>>> ai = YEPCHAT()
|
|
330
|
+
>>> ai.chat("Tell me a joke")
|
|
331
|
+
Returns the chat response from the Yep API.
|
|
332
|
+
|
|
333
|
+
>>> ai.chat("What's the weather today?", stream=True)
|
|
334
|
+
Streams the chat response from the Yep API.
|
|
335
|
+
"""
|
|
336
|
+
def for_stream():
|
|
337
|
+
for response in self.ask(
|
|
338
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
339
|
+
):
|
|
340
|
+
yield self.get_message(response)
|
|
341
|
+
|
|
342
|
+
def for_non_stream():
|
|
343
|
+
return self.get_message(
|
|
344
|
+
self.ask(
|
|
345
|
+
prompt,
|
|
346
|
+
False,
|
|
347
|
+
optimizer=optimizer,
|
|
348
|
+
conversationally=conversationally,
|
|
349
|
+
)
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
return for_stream() if stream else for_non_stream()
|
|
353
|
+
|
|
354
|
+
def get_message(self, response: dict) -> str:
|
|
355
|
+
"""
|
|
356
|
+
Extracts the message content from the API response.
|
|
357
|
+
|
|
358
|
+
Examples:
|
|
359
|
+
>>> ai = YEPCHAT()
|
|
360
|
+
>>> response = ai.ask("Tell me a joke")
|
|
361
|
+
>>> ai.get_message(response)
|
|
362
|
+
Extracts and returns the message content from the response.
|
|
363
|
+
"""
|
|
364
|
+
assert isinstance(response, dict)
|
|
365
|
+
return response["text"]
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
if __name__ == "__main__":
|
|
369
|
+
print("-" * 80)
|
|
370
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
371
|
+
print("-" * 80)
|
|
372
|
+
|
|
373
|
+
for model in YEPCHAT.AVAILABLE_MODELS:
|
|
374
|
+
try:
|
|
375
|
+
test_ai = YEPCHAT(model=model, timeout=60)
|
|
376
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
377
|
+
response_text = response
|
|
378
|
+
|
|
379
|
+
if response_text and len(response_text.strip()) > 0:
|
|
380
|
+
status = "✓"
|
|
381
|
+
# Truncate response if too long
|
|
382
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
383
|
+
else:
|
|
384
|
+
status = "✗"
|
|
385
|
+
display_text = "Empty or invalid response"
|
|
386
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
387
|
+
except Exception as e:
|
|
388
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
389
|
+
|