webscout 8.2.6__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +97 -87
- webscout/version.py +1 -1
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/METADATA +2 -15
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -910
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -684
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -198
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -322
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -447
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/ChatSandbox.py +0 -342
- webscout/Provider/Cloudflare.py +0 -325
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/Deepinfra.py +0 -338
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -370
- webscout/Provider/GizAI.py +0 -285
- webscout/Provider/Glider.py +0 -222
- webscout/Provider/Groq.py +0 -801
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -257
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Jadve.py +0 -291
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/MCPCore.py +0 -315
- webscout/Provider/Marcus.py +0 -206
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/Netwrck.py +0 -270
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -28
- webscout/Provider/OPENAI/ai4chat.py +0 -286
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -481
- webscout/Provider/OPENAI/deepinfra.py +0 -309
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -437
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/groq.py +0 -354
- webscout/Provider/OPENAI/heckai.py +0 -341
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/mcpcore.py +0 -376
- webscout/Provider/OPENAI/multichat.py +0 -368
- webscout/Provider/OPENAI/netwrck.py +0 -350
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -462
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -329
- webscout/Provider/OPENAI/toolbaz.py +0 -406
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -429
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -8
- webscout/Provider/TTS/base.py +0 -159
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -233
- webscout/Provider/TextPollinationsAI.py +0 -306
- webscout/Provider/TwoAI.py +0 -280
- webscout/Provider/TypliAI.py +0 -305
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/WiseCat.py +0 -233
- webscout/Provider/WrDoChat.py +0 -370
- webscout/Provider/Writecream.py +0 -237
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -178
- webscout/Provider/ai4chat.py +0 -203
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -335
- webscout/Provider/asksteve.py +0 -212
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -288
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/elmo.py +0 -283
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/learnfastai.py +0 -325
- webscout/Provider/llama3mitril.py +0 -215
- webscout/Provider/llmchat.py +0 -255
- webscout/Provider/llmchatco.py +0 -306
- webscout/Provider/meta.py +0 -798
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -297
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/searchchat.py +0 -292
- webscout/Provider/sonus.py +0 -258
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -353
- webscout/Provider/turboseek.py +0 -266
- webscout/Provider/typefully.py +0 -330
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/x0gpt.py +0 -299
- webscout/Provider/yep.py +0 -389
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -95
- webscout/swiftcli/core/__init__.py +0 -7
- webscout/swiftcli/core/cli.py +0 -297
- webscout/swiftcli/core/context.py +0 -104
- webscout/swiftcli/core/group.py +0 -241
- webscout/swiftcli/decorators/__init__.py +0 -28
- webscout/swiftcli/decorators/command.py +0 -221
- webscout/swiftcli/decorators/options.py +0 -220
- webscout/swiftcli/decorators/output.py +0 -252
- webscout/swiftcli/exceptions.py +0 -21
- webscout/swiftcli/plugins/__init__.py +0 -9
- webscout/swiftcli/plugins/base.py +0 -135
- webscout/swiftcli/plugins/manager.py +0 -262
- webscout/swiftcli/utils/__init__.py +0 -59
- webscout/swiftcli/utils/formatting.py +0 -252
- webscout/swiftcli/utils/parsing.py +0 -267
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.6.dist-info/RECORD +0 -307
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -1,371 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import re
|
|
5
|
-
import json
|
|
6
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
-
|
|
8
|
-
# Import base classes and utility structures
|
|
9
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
-
from .utils import (
|
|
11
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
-
ChatCompletionMessage, CompletionUsage
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
# Import LitAgent
|
|
16
|
-
from webscout.litagent import LitAgent
|
|
17
|
-
|
|
18
|
-
# --- X0GPT Client ---
|
|
19
|
-
|
|
20
|
-
class Completions(BaseCompletions):
|
|
21
|
-
def __init__(self, client: 'X0GPT'):
|
|
22
|
-
self._client = client
|
|
23
|
-
|
|
24
|
-
def create(
|
|
25
|
-
self,
|
|
26
|
-
*,
|
|
27
|
-
model: str,
|
|
28
|
-
messages: List[Dict[str, str]],
|
|
29
|
-
max_tokens: Optional[int] = 2049,
|
|
30
|
-
stream: bool = False,
|
|
31
|
-
temperature: Optional[float] = None,
|
|
32
|
-
top_p: Optional[float] = None,
|
|
33
|
-
**kwargs: Any
|
|
34
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
35
|
-
"""
|
|
36
|
-
Creates a model response for the given chat conversation.
|
|
37
|
-
Mimics openai.chat.completions.create
|
|
38
|
-
"""
|
|
39
|
-
# Prepare the payload for X0GPT API
|
|
40
|
-
payload = {
|
|
41
|
-
"messages": messages,
|
|
42
|
-
"chatId": uuid.uuid4().hex,
|
|
43
|
-
"namespace": None
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
# Add optional parameters if provided
|
|
47
|
-
if max_tokens is not None and max_tokens > 0:
|
|
48
|
-
payload["max_tokens"] = max_tokens
|
|
49
|
-
|
|
50
|
-
if temperature is not None:
|
|
51
|
-
payload["temperature"] = temperature
|
|
52
|
-
|
|
53
|
-
if top_p is not None:
|
|
54
|
-
payload["top_p"] = top_p
|
|
55
|
-
|
|
56
|
-
# Add any additional parameters
|
|
57
|
-
payload.update(kwargs)
|
|
58
|
-
|
|
59
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
60
|
-
created_time = int(time.time())
|
|
61
|
-
|
|
62
|
-
if stream:
|
|
63
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
64
|
-
else:
|
|
65
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
66
|
-
|
|
67
|
-
def _create_stream(
|
|
68
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
69
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
70
|
-
try:
|
|
71
|
-
response = self._client.session.post(
|
|
72
|
-
self._client.api_endpoint,
|
|
73
|
-
headers=self._client.headers,
|
|
74
|
-
json=payload,
|
|
75
|
-
stream=True,
|
|
76
|
-
timeout=self._client.timeout
|
|
77
|
-
)
|
|
78
|
-
|
|
79
|
-
# Handle non-200 responses
|
|
80
|
-
if not response.ok:
|
|
81
|
-
raise IOError(
|
|
82
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
83
|
-
)
|
|
84
|
-
|
|
85
|
-
# Track token usage across chunks
|
|
86
|
-
prompt_tokens = 0
|
|
87
|
-
completion_tokens = 0
|
|
88
|
-
total_tokens = 0
|
|
89
|
-
|
|
90
|
-
# Estimate prompt tokens based on message length
|
|
91
|
-
for msg in payload.get("messages", []):
|
|
92
|
-
prompt_tokens += len(msg.get("content", "").split())
|
|
93
|
-
|
|
94
|
-
for line in response.iter_lines():
|
|
95
|
-
if line:
|
|
96
|
-
decoded_line = line.decode('utf-8').strip()
|
|
97
|
-
|
|
98
|
-
# X0GPT uses a different format, so we need to extract the content
|
|
99
|
-
match = re.search(r'0:"(.*?)"', decoded_line)
|
|
100
|
-
if match:
|
|
101
|
-
content = match.group(1)
|
|
102
|
-
|
|
103
|
-
# Format the content (replace escaped newlines)
|
|
104
|
-
content = self._client.format_text(content)
|
|
105
|
-
|
|
106
|
-
# Update token counts
|
|
107
|
-
completion_tokens += 1
|
|
108
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
109
|
-
|
|
110
|
-
# Create the delta object
|
|
111
|
-
delta = ChoiceDelta(
|
|
112
|
-
content=content,
|
|
113
|
-
role="assistant",
|
|
114
|
-
tool_calls=None
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
# Create the choice object
|
|
118
|
-
choice = Choice(
|
|
119
|
-
index=0,
|
|
120
|
-
delta=delta,
|
|
121
|
-
finish_reason=None,
|
|
122
|
-
logprobs=None
|
|
123
|
-
)
|
|
124
|
-
|
|
125
|
-
# Create the chunk object
|
|
126
|
-
chunk = ChatCompletionChunk(
|
|
127
|
-
id=request_id,
|
|
128
|
-
choices=[choice],
|
|
129
|
-
created=created_time,
|
|
130
|
-
model=model,
|
|
131
|
-
system_fingerprint=None
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
# Convert to dict for proper formatting
|
|
135
|
-
chunk_dict = chunk.to_dict()
|
|
136
|
-
|
|
137
|
-
# Add usage information to match OpenAI format
|
|
138
|
-
usage_dict = {
|
|
139
|
-
"prompt_tokens": prompt_tokens,
|
|
140
|
-
"completion_tokens": completion_tokens,
|
|
141
|
-
"total_tokens": total_tokens,
|
|
142
|
-
"estimated_cost": None
|
|
143
|
-
}
|
|
144
|
-
|
|
145
|
-
chunk_dict["usage"] = usage_dict
|
|
146
|
-
|
|
147
|
-
# Return the chunk object for internal processing
|
|
148
|
-
yield chunk
|
|
149
|
-
|
|
150
|
-
# Final chunk with finish_reason="stop"
|
|
151
|
-
delta = ChoiceDelta(
|
|
152
|
-
content=None,
|
|
153
|
-
role=None,
|
|
154
|
-
tool_calls=None
|
|
155
|
-
)
|
|
156
|
-
|
|
157
|
-
choice = Choice(
|
|
158
|
-
index=0,
|
|
159
|
-
delta=delta,
|
|
160
|
-
finish_reason="stop",
|
|
161
|
-
logprobs=None
|
|
162
|
-
)
|
|
163
|
-
|
|
164
|
-
chunk = ChatCompletionChunk(
|
|
165
|
-
id=request_id,
|
|
166
|
-
choices=[choice],
|
|
167
|
-
created=created_time,
|
|
168
|
-
model=model,
|
|
169
|
-
system_fingerprint=None
|
|
170
|
-
)
|
|
171
|
-
|
|
172
|
-
chunk_dict = chunk.to_dict()
|
|
173
|
-
chunk_dict["usage"] = {
|
|
174
|
-
"prompt_tokens": prompt_tokens,
|
|
175
|
-
"completion_tokens": completion_tokens,
|
|
176
|
-
"total_tokens": total_tokens,
|
|
177
|
-
"estimated_cost": None
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
yield chunk
|
|
181
|
-
|
|
182
|
-
except Exception as e:
|
|
183
|
-
print(f"Error during X0GPT stream request: {e}")
|
|
184
|
-
raise IOError(f"X0GPT request failed: {e}") from e
|
|
185
|
-
|
|
186
|
-
def _create_non_stream(
|
|
187
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
188
|
-
) -> ChatCompletion:
|
|
189
|
-
try:
|
|
190
|
-
# For non-streaming, we still use streaming internally to collect the full response
|
|
191
|
-
response = self._client.session.post(
|
|
192
|
-
self._client.api_endpoint,
|
|
193
|
-
headers=self._client.headers,
|
|
194
|
-
json=payload,
|
|
195
|
-
stream=True,
|
|
196
|
-
timeout=self._client.timeout
|
|
197
|
-
)
|
|
198
|
-
|
|
199
|
-
# Handle non-200 responses
|
|
200
|
-
if not response.ok:
|
|
201
|
-
raise IOError(
|
|
202
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
203
|
-
)
|
|
204
|
-
|
|
205
|
-
# Collect the full response
|
|
206
|
-
full_text = ""
|
|
207
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
208
|
-
if line:
|
|
209
|
-
match = re.search(r'0:"(.*?)"', line)
|
|
210
|
-
if match:
|
|
211
|
-
content = match.group(1)
|
|
212
|
-
full_text += content
|
|
213
|
-
|
|
214
|
-
# Format the text (replace escaped newlines)
|
|
215
|
-
full_text = self._client.format_text(full_text)
|
|
216
|
-
|
|
217
|
-
# Estimate token counts
|
|
218
|
-
prompt_tokens = 0
|
|
219
|
-
for msg in payload.get("messages", []):
|
|
220
|
-
prompt_tokens += len(msg.get("content", "").split())
|
|
221
|
-
|
|
222
|
-
completion_tokens = len(full_text.split())
|
|
223
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
224
|
-
|
|
225
|
-
# Create the message object
|
|
226
|
-
message = ChatCompletionMessage(
|
|
227
|
-
role="assistant",
|
|
228
|
-
content=full_text
|
|
229
|
-
)
|
|
230
|
-
|
|
231
|
-
# Create the choice object
|
|
232
|
-
choice = Choice(
|
|
233
|
-
index=0,
|
|
234
|
-
message=message,
|
|
235
|
-
finish_reason="stop"
|
|
236
|
-
)
|
|
237
|
-
|
|
238
|
-
# Create the usage object
|
|
239
|
-
usage = CompletionUsage(
|
|
240
|
-
prompt_tokens=prompt_tokens,
|
|
241
|
-
completion_tokens=completion_tokens,
|
|
242
|
-
total_tokens=total_tokens
|
|
243
|
-
)
|
|
244
|
-
|
|
245
|
-
# Create the completion object
|
|
246
|
-
completion = ChatCompletion(
|
|
247
|
-
id=request_id,
|
|
248
|
-
choices=[choice],
|
|
249
|
-
created=created_time,
|
|
250
|
-
model=model,
|
|
251
|
-
usage=usage,
|
|
252
|
-
)
|
|
253
|
-
|
|
254
|
-
return completion
|
|
255
|
-
|
|
256
|
-
except Exception as e:
|
|
257
|
-
print(f"Error during X0GPT non-stream request: {e}")
|
|
258
|
-
raise IOError(f"X0GPT request failed: {e}") from e
|
|
259
|
-
|
|
260
|
-
class Chat(BaseChat):
|
|
261
|
-
def __init__(self, client: 'X0GPT'):
|
|
262
|
-
self.completions = Completions(client)
|
|
263
|
-
|
|
264
|
-
class X0GPT(OpenAICompatibleProvider):
|
|
265
|
-
"""
|
|
266
|
-
OpenAI-compatible client for X0GPT API.
|
|
267
|
-
|
|
268
|
-
Usage:
|
|
269
|
-
client = X0GPT()
|
|
270
|
-
response = client.chat.completions.create(
|
|
271
|
-
model="gpt-4",
|
|
272
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
273
|
-
)
|
|
274
|
-
"""
|
|
275
|
-
|
|
276
|
-
AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
|
|
277
|
-
|
|
278
|
-
def __init__(
|
|
279
|
-
self,
|
|
280
|
-
timeout: Optional[int] = None,
|
|
281
|
-
browser: str = "chrome"
|
|
282
|
-
):
|
|
283
|
-
"""
|
|
284
|
-
Initialize the X0GPT client.
|
|
285
|
-
|
|
286
|
-
Args:
|
|
287
|
-
timeout: Request timeout in seconds (None for no timeout)
|
|
288
|
-
browser: Browser to emulate in user agent
|
|
289
|
-
"""
|
|
290
|
-
self.timeout = timeout
|
|
291
|
-
self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
|
|
292
|
-
self.session = requests.Session()
|
|
293
|
-
|
|
294
|
-
# Initialize LitAgent for user agent generation
|
|
295
|
-
agent = LitAgent()
|
|
296
|
-
self.fingerprint = agent.generate_fingerprint(browser)
|
|
297
|
-
|
|
298
|
-
self.headers = {
|
|
299
|
-
"authority": "x0-gpt.devwtf.in",
|
|
300
|
-
"method": "POST",
|
|
301
|
-
"path": "/api/stream/reply",
|
|
302
|
-
"scheme": "https",
|
|
303
|
-
"accept": self.fingerprint["accept"],
|
|
304
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
305
|
-
"accept-language": self.fingerprint["accept_language"],
|
|
306
|
-
"content-type": "application/json",
|
|
307
|
-
"dnt": "1",
|
|
308
|
-
"origin": "https://x0-gpt.devwtf.in",
|
|
309
|
-
"priority": "u=1, i",
|
|
310
|
-
"referer": "https://x0-gpt.devwtf.in/chat",
|
|
311
|
-
"sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
312
|
-
"sec-ch-ua-mobile": "?0",
|
|
313
|
-
"sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
|
|
314
|
-
"user-agent": self.fingerprint["user_agent"]
|
|
315
|
-
}
|
|
316
|
-
|
|
317
|
-
self.session.headers.update(self.headers)
|
|
318
|
-
|
|
319
|
-
# Initialize the chat interface
|
|
320
|
-
self.chat = Chat(self)
|
|
321
|
-
|
|
322
|
-
def format_text(self, text: str) -> str:
|
|
323
|
-
"""
|
|
324
|
-
Format text by replacing escaped newlines with actual newlines.
|
|
325
|
-
|
|
326
|
-
Args:
|
|
327
|
-
text: Text to format
|
|
328
|
-
|
|
329
|
-
Returns:
|
|
330
|
-
Formatted text
|
|
331
|
-
"""
|
|
332
|
-
# Use a more comprehensive approach to handle all escape sequences
|
|
333
|
-
try:
|
|
334
|
-
# First handle double backslashes to avoid issues
|
|
335
|
-
text = text.replace('\\\\', '\\')
|
|
336
|
-
|
|
337
|
-
# Handle common escape sequences
|
|
338
|
-
text = text.replace('\\n', '\n')
|
|
339
|
-
text = text.replace('\\r', '\r')
|
|
340
|
-
text = text.replace('\\t', '\t')
|
|
341
|
-
text = text.replace('\\"', '"')
|
|
342
|
-
text = text.replace("\\'", "'")
|
|
343
|
-
|
|
344
|
-
# Handle any remaining escape sequences using JSON decoding
|
|
345
|
-
# This is a fallback in case there are other escape sequences
|
|
346
|
-
try:
|
|
347
|
-
# Add quotes to make it a valid JSON string
|
|
348
|
-
json_str = f'"{text}"'
|
|
349
|
-
# Use json module to decode all escape sequences
|
|
350
|
-
decoded = json.loads(json_str)
|
|
351
|
-
return decoded
|
|
352
|
-
except json.JSONDecodeError:
|
|
353
|
-
# If JSON decoding fails, return the text with the replacements we've already done
|
|
354
|
-
return text
|
|
355
|
-
except Exception as e:
|
|
356
|
-
# If any error occurs, return the original text
|
|
357
|
-
print(f"Warning: Error formatting text: {e}")
|
|
358
|
-
return text
|
|
359
|
-
|
|
360
|
-
def convert_model_name(self, model: str) -> str:
|
|
361
|
-
"""
|
|
362
|
-
Convert model names to ones supported by X0GPT.
|
|
363
|
-
|
|
364
|
-
Args:
|
|
365
|
-
model: Model name to convert
|
|
366
|
-
|
|
367
|
-
Returns:
|
|
368
|
-
X0GPT model name
|
|
369
|
-
"""
|
|
370
|
-
# X0GPT doesn't actually use model names, but we'll keep this for compatibility
|
|
371
|
-
return model
|
webscout/Provider/OPENAI/yep.py
DELETED
|
@@ -1,327 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import cloudscraper # Import cloudscraper
|
|
4
|
-
import json
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage, get_system_prompt # Import get_system_prompt
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
# Attempt to import LitAgent, fallback if not available
|
|
15
|
-
try:
|
|
16
|
-
from webscout.litagent import LitAgent
|
|
17
|
-
except ImportError:
|
|
18
|
-
# Define a dummy LitAgent if webscout is not installed or accessible
|
|
19
|
-
class LitAgent:
|
|
20
|
-
def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
|
|
21
|
-
print("Warning: LitAgent not found. Using default minimal headers.")
|
|
22
|
-
return {
|
|
23
|
-
"accept": "*/*",
|
|
24
|
-
"accept_language": "en-US,en;q=0.9",
|
|
25
|
-
"platform": "Windows",
|
|
26
|
-
"sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
|
27
|
-
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
|
28
|
-
"browser_type": browser,
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
# --- YEPCHAT Client ---
|
|
32
|
-
|
|
33
|
-
# ANSI escape codes for formatting
|
|
34
|
-
BOLD = "\033[1m"
|
|
35
|
-
RED = "\033[91m"
|
|
36
|
-
RESET = "\033[0m"
|
|
37
|
-
|
|
38
|
-
class Completions(BaseCompletions):
|
|
39
|
-
def __init__(self, client: 'YEPCHAT'):
|
|
40
|
-
self._client = client
|
|
41
|
-
|
|
42
|
-
def create(
|
|
43
|
-
self,
|
|
44
|
-
*,
|
|
45
|
-
model: str,
|
|
46
|
-
messages: List[Dict[str, str]],
|
|
47
|
-
max_tokens: Optional[int] = 1280,
|
|
48
|
-
stream: bool = False,
|
|
49
|
-
temperature: Optional[float] = 0.6,
|
|
50
|
-
top_p: Optional[float] = 0.7,
|
|
51
|
-
system_prompt: Optional[str] = None, # Added for consistency, but will be ignored
|
|
52
|
-
**kwargs: Any
|
|
53
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
54
|
-
"""
|
|
55
|
-
Creates a model response for the given chat conversation using YEPCHAT API.
|
|
56
|
-
Mimics openai.chat.completions.create
|
|
57
|
-
Note: YEPCHAT does not support system messages. They will be ignored.
|
|
58
|
-
"""
|
|
59
|
-
if model not in self._client.AVAILABLE_MODELS:
|
|
60
|
-
raise ValueError(
|
|
61
|
-
f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}"
|
|
62
|
-
)
|
|
63
|
-
|
|
64
|
-
# Filter out system messages and warn the user if any are present
|
|
65
|
-
filtered_messages = []
|
|
66
|
-
has_system_message = False
|
|
67
|
-
if get_system_prompt(messages) or system_prompt: # Check both message list and explicit param
|
|
68
|
-
has_system_message = True
|
|
69
|
-
|
|
70
|
-
for msg in messages:
|
|
71
|
-
if msg["role"] == "system":
|
|
72
|
-
continue # Skip system messages
|
|
73
|
-
filtered_messages.append(msg)
|
|
74
|
-
|
|
75
|
-
if has_system_message:
|
|
76
|
-
# Print warning in bold red
|
|
77
|
-
print(f"{BOLD}{RED}Warning: YEPCHAT does not support system messages, they will be ignored.{RESET}")
|
|
78
|
-
|
|
79
|
-
# If no messages left after filtering, raise an error
|
|
80
|
-
if not filtered_messages:
|
|
81
|
-
raise ValueError("At least one user or assistant message is required for YEPCHAT.")
|
|
82
|
-
|
|
83
|
-
payload = {
|
|
84
|
-
"stream": stream,
|
|
85
|
-
"max_tokens": max_tokens,
|
|
86
|
-
"top_p": top_p,
|
|
87
|
-
"temperature": temperature,
|
|
88
|
-
"messages": filtered_messages, # Use filtered messages
|
|
89
|
-
"model": model,
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
# Add any extra kwargs to the payload
|
|
93
|
-
payload.update(kwargs)
|
|
94
|
-
|
|
95
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
96
|
-
created_time = int(time.time())
|
|
97
|
-
|
|
98
|
-
if stream:
|
|
99
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
100
|
-
else:
|
|
101
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
102
|
-
|
|
103
|
-
def _create_stream(
|
|
104
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
105
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
106
|
-
try:
|
|
107
|
-
# Use session.post from cloudscraper instance
|
|
108
|
-
response = self._client.session.post(
|
|
109
|
-
self._client.api_endpoint,
|
|
110
|
-
headers=self._client.headers,
|
|
111
|
-
cookies=self._client.cookies,
|
|
112
|
-
json=payload,
|
|
113
|
-
stream=True,
|
|
114
|
-
timeout=self._client.timeout
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
if not response.ok:
|
|
118
|
-
# Simplified error handling for now, add refresh logic if needed
|
|
119
|
-
raise IOError(
|
|
120
|
-
f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
124
|
-
if line:
|
|
125
|
-
line = line.strip()
|
|
126
|
-
if line.startswith("data: "):
|
|
127
|
-
json_str = line[6:]
|
|
128
|
-
if json_str == "[DONE]":
|
|
129
|
-
break
|
|
130
|
-
try:
|
|
131
|
-
data = json.loads(json_str)
|
|
132
|
-
choice_data = data.get('choices', [{}])[0]
|
|
133
|
-
delta_data = choice_data.get('delta', {})
|
|
134
|
-
finish_reason = choice_data.get('finish_reason')
|
|
135
|
-
content = delta_data.get('content')
|
|
136
|
-
|
|
137
|
-
if content is not None: # Only yield chunks with content
|
|
138
|
-
delta = ChoiceDelta(content=content, role=delta_data.get('role', 'assistant'))
|
|
139
|
-
choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
|
|
140
|
-
chunk = ChatCompletionChunk(
|
|
141
|
-
id=request_id,
|
|
142
|
-
choices=[choice],
|
|
143
|
-
created=created_time,
|
|
144
|
-
model=model,
|
|
145
|
-
)
|
|
146
|
-
yield chunk
|
|
147
|
-
|
|
148
|
-
except json.JSONDecodeError:
|
|
149
|
-
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
150
|
-
continue
|
|
151
|
-
|
|
152
|
-
# Yield final chunk with finish reason if not already sent
|
|
153
|
-
delta = ChoiceDelta()
|
|
154
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop") # Assume stop if loop finishes
|
|
155
|
-
chunk = ChatCompletionChunk(
|
|
156
|
-
id=request_id,
|
|
157
|
-
choices=[choice],
|
|
158
|
-
created=created_time,
|
|
159
|
-
model=model,
|
|
160
|
-
)
|
|
161
|
-
yield chunk
|
|
162
|
-
|
|
163
|
-
except cloudscraper.exceptions.CloudflareChallengeError as e:
|
|
164
|
-
pass
|
|
165
|
-
|
|
166
|
-
def _create_non_stream(
|
|
167
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
168
|
-
) -> ChatCompletion:
|
|
169
|
-
full_response_content = ""
|
|
170
|
-
finish_reason = "stop" # Assume stop unless error occurs
|
|
171
|
-
|
|
172
|
-
try:
|
|
173
|
-
stream_generator = self._create_stream(request_id, created_time, model, payload)
|
|
174
|
-
for chunk in stream_generator:
|
|
175
|
-
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
|
|
176
|
-
full_response_content += chunk.choices[0].delta.content
|
|
177
|
-
if chunk.choices and chunk.choices[0].finish_reason:
|
|
178
|
-
finish_reason = chunk.choices[0].finish_reason # Capture finish reason if provided
|
|
179
|
-
|
|
180
|
-
except IOError as e:
|
|
181
|
-
print(f"Error obtaining non-stream response from YEPCHAT: {e}")
|
|
182
|
-
finish_reason = "error"
|
|
183
|
-
|
|
184
|
-
# Construct the final ChatCompletion object
|
|
185
|
-
message = ChatCompletionMessage(
|
|
186
|
-
role="assistant",
|
|
187
|
-
content=full_response_content
|
|
188
|
-
)
|
|
189
|
-
choice = Choice(
|
|
190
|
-
index=0,
|
|
191
|
-
message=message,
|
|
192
|
-
finish_reason=finish_reason
|
|
193
|
-
)
|
|
194
|
-
# Usage data is not provided by this API in a standard way, set to 0
|
|
195
|
-
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
|
196
|
-
|
|
197
|
-
completion = ChatCompletion(
|
|
198
|
-
id=request_id,
|
|
199
|
-
choices=[choice],
|
|
200
|
-
created=created_time,
|
|
201
|
-
model=model,
|
|
202
|
-
usage=usage,
|
|
203
|
-
)
|
|
204
|
-
return completion
|
|
205
|
-
|
|
206
|
-
class Chat(BaseChat):
|
|
207
|
-
def __init__(self, client: 'YEPCHAT'):
|
|
208
|
-
self.completions = Completions(client)
|
|
209
|
-
|
|
210
|
-
class YEPCHAT(OpenAICompatibleProvider):
|
|
211
|
-
"""
|
|
212
|
-
OpenAI-compatible client for YEPCHAT API.
|
|
213
|
-
|
|
214
|
-
Usage:
|
|
215
|
-
client = YEPCHAT()
|
|
216
|
-
response = client.chat.completions.create(
|
|
217
|
-
model="DeepSeek-R1-Distill-Qwen-32B",
|
|
218
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
219
|
-
)
|
|
220
|
-
print(response.choices[0].message.content)
|
|
221
|
-
"""
|
|
222
|
-
AVAILABLE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
|
|
223
|
-
|
|
224
|
-
def __init__(
|
|
225
|
-
self,
|
|
226
|
-
timeout: int = 30,
|
|
227
|
-
browser: str = "chrome"
|
|
228
|
-
):
|
|
229
|
-
"""
|
|
230
|
-
Initialize the YEPCHAT client.
|
|
231
|
-
|
|
232
|
-
Args:
|
|
233
|
-
timeout: Request timeout in seconds.
|
|
234
|
-
browser: Browser name for LitAgent to generate User-Agent.
|
|
235
|
-
"""
|
|
236
|
-
self.timeout = timeout
|
|
237
|
-
self.api_endpoint = "https://api.yep.com/v1/chat/completions"
|
|
238
|
-
self.session = cloudscraper.create_scraper() # Use cloudscraper
|
|
239
|
-
|
|
240
|
-
# Initialize LitAgent for user agent generation and fingerprinting
|
|
241
|
-
try:
|
|
242
|
-
agent = LitAgent()
|
|
243
|
-
fingerprint = agent.generate_fingerprint(browser=browser)
|
|
244
|
-
except Exception as e:
|
|
245
|
-
print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
|
|
246
|
-
# Fallback fingerprint data
|
|
247
|
-
fingerprint = {
|
|
248
|
-
"accept": "*/*",
|
|
249
|
-
"accept_language": "en-US,en;q=0.9",
|
|
250
|
-
"sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
|
251
|
-
"platform": "Windows",
|
|
252
|
-
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
|
253
|
-
}
|
|
254
|
-
|
|
255
|
-
# Initialize headers using the fingerprint
|
|
256
|
-
self.headers = {
|
|
257
|
-
"Accept": fingerprint["accept"],
|
|
258
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
259
|
-
"Accept-Language": fingerprint["accept_language"],
|
|
260
|
-
"Content-Type": "application/json; charset=utf-8",
|
|
261
|
-
"DNT": "1",
|
|
262
|
-
"Origin": "https://yep.com",
|
|
263
|
-
"Referer": "https://yep.com/",
|
|
264
|
-
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
265
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
266
|
-
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
267
|
-
"User-Agent": fingerprint["user_agent"],
|
|
268
|
-
}
|
|
269
|
-
self.session.headers.update(self.headers)
|
|
270
|
-
|
|
271
|
-
# Generate cookies (consider if these need refreshing or specific values)
|
|
272
|
-
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
273
|
-
|
|
274
|
-
# Initialize the chat interface
|
|
275
|
-
self.chat = Chat(self)
|
|
276
|
-
|
|
277
|
-
def convert_model_name(self, model: str) -> str:
|
|
278
|
-
"""
|
|
279
|
-
Ensures the model name is valid for YEPCHAT.
|
|
280
|
-
Returns the validated model name or raises an error if invalid.
|
|
281
|
-
"""
|
|
282
|
-
if model in self.AVAILABLE_MODELS:
|
|
283
|
-
return model
|
|
284
|
-
else:
|
|
285
|
-
# Raise error instead of defaulting, as model is mandatory in create()
|
|
286
|
-
raise ValueError(f"Model '{model}' not supported by YEPCHAT. Available: {self.AVAILABLE_MODELS}")
|
|
287
|
-
|
|
288
|
-
# Example usage (optional, for testing)
|
|
289
|
-
if __name__ == '__main__':
|
|
290
|
-
print("Testing YEPCHAT OpenAI-Compatible Client...")
|
|
291
|
-
|
|
292
|
-
# Test Non-Streaming
|
|
293
|
-
try:
|
|
294
|
-
print("\n--- Non-Streaming Test (DeepSeek) ---")
|
|
295
|
-
client = YEPCHAT()
|
|
296
|
-
response = client.chat.completions.create(
|
|
297
|
-
model="DeepSeek-R1-Distill-Qwen-32B",
|
|
298
|
-
messages=[
|
|
299
|
-
{"role": "user", "content": "Say 'Hello World'"}
|
|
300
|
-
],
|
|
301
|
-
stream=False
|
|
302
|
-
)
|
|
303
|
-
print("Response:", response.choices[0].message.content)
|
|
304
|
-
print("Usage:", response.usage) # Will show 0 tokens
|
|
305
|
-
except Exception as e:
|
|
306
|
-
print(f"Non-Streaming Test Failed: {e}")
|
|
307
|
-
|
|
308
|
-
# Test Streaming
|
|
309
|
-
try:
|
|
310
|
-
print("\n--- Streaming Test (Mixtral) ---")
|
|
311
|
-
client_stream = YEPCHAT()
|
|
312
|
-
stream = client_stream.chat.completions.create(
|
|
313
|
-
model="Mixtral-8x7B-Instruct-v0.1",
|
|
314
|
-
messages=[
|
|
315
|
-
{"role": "user", "content": "Write a short sentence about AI."}
|
|
316
|
-
],
|
|
317
|
-
stream=True
|
|
318
|
-
)
|
|
319
|
-
print("Streaming Response:")
|
|
320
|
-
for chunk in stream:
|
|
321
|
-
content = chunk.choices[0].delta.content
|
|
322
|
-
if content:
|
|
323
|
-
print(content, end="", flush=True)
|
|
324
|
-
print() # Add a newline at the end
|
|
325
|
-
|
|
326
|
-
except Exception as e:
|
|
327
|
-
print(f"Streaming Test Failed: {e}")
|