webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,356 +1,384 @@
1
- import time
2
- import uuid
3
- import cloudscraper # Import cloudscraper
4
- import json
5
- from typing import List, Dict, Optional, Union, Generator, Any
6
-
7
- # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, get_system_prompt # Import get_system_prompt
12
- )
13
-
14
- # Attempt to import LitAgent, fallback if not available
15
- try:
16
- from webscout.litagent import LitAgent
17
- except ImportError:
18
- # Define a dummy LitAgent if webscout is not installed or accessible
19
- class LitAgent:
20
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
21
- print("Warning: LitAgent not found. Using default minimal headers.")
22
- return {
23
- "accept": "*/*",
24
- "accept_language": "en-US,en;q=0.9",
25
- "platform": "Windows",
26
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
27
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
28
- "browser_type": browser,
29
- }
30
-
31
- # --- YEPCHAT Client ---
32
-
33
- # ANSI escape codes for formatting
34
- BOLD = "\033[1m"
35
- RED = "\033[91m"
36
- RESET = "\033[0m"
37
-
38
- class Completions(BaseCompletions):
39
- def __init__(self, client: 'YEPCHAT'):
40
- self._client = client
41
-
42
- def create(
43
- self,
44
- *,
45
- model: str,
46
- messages: List[Dict[str, str]],
47
- max_tokens: Optional[int] = 1280,
48
- stream: bool = False,
49
- temperature: Optional[float] = 0.6,
50
- top_p: Optional[float] = 0.7,
51
- system_prompt: Optional[str] = None, # Added for consistency, but will be ignored
52
- **kwargs: Any
53
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
54
- """
55
- Creates a model response for the given chat conversation using YEPCHAT API.
56
- Mimics openai.chat.completions.create
57
- Note: YEPCHAT does not support system messages. They will be ignored.
58
- """
59
- # Accept both raw and prefixed model names from the user, but always send the raw name to the API
60
- if model.startswith("YEPCHAT/"):
61
- model_raw = model.replace("YEPCHAT/", "", 1)
62
- else:
63
- model_raw = model
64
- # Validate model
65
- if f"YEPCHAT/{model_raw}" not in self._client.AVAILABLE_MODELS:
66
- raise ValueError(
67
- f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}"
68
- )
69
-
70
- # Filter out system messages and warn the user if any are present
71
- filtered_messages = []
72
- has_system_message = False
73
- if get_system_prompt(messages) or system_prompt: # Check both message list and explicit param
74
- has_system_message = True
75
-
76
- for msg in messages:
77
- if msg["role"] == "system":
78
- continue # Skip system messages
79
- filtered_messages.append(msg)
80
-
81
- if has_system_message:
82
- # Print warning in bold red
83
- print(f"{BOLD}{RED}Warning: YEPCHAT does not support system messages, they will be ignored.{RESET}")
84
-
85
- # If no messages left after filtering, raise an error
86
- if not filtered_messages:
87
- raise ValueError("At least one user or assistant message is required for YEPCHAT.")
88
-
89
- payload = {
90
- "stream": stream,
91
- "max_tokens": max_tokens,
92
- "top_p": top_p,
93
- "temperature": temperature,
94
- "messages": filtered_messages, # Use filtered messages
95
- "model": model_raw, # Send only the raw model name to the API
96
- }
97
-
98
- # Add any extra kwargs to the payload
99
- payload.update(kwargs)
100
-
101
- request_id = f"chatcmpl-{uuid.uuid4()}"
102
- created_time = int(time.time())
103
-
104
- if stream:
105
- return self._create_stream(request_id, created_time, model, payload)
106
- else:
107
- return self._create_non_stream(request_id, created_time, model, payload)
108
-
109
- def _create_stream(
110
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
111
- ) -> Generator[ChatCompletionChunk, None, None]:
112
- try:
113
- response = self._client.session.post(
114
- self._client.api_endpoint,
115
- headers=self._client.headers,
116
- cookies=self._client.cookies,
117
- json=payload,
118
- stream=True,
119
- timeout=self._client.timeout
120
- )
121
-
122
- if not response.ok:
123
- raise IOError(
124
- f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
125
- )
126
-
127
- for line in response.iter_lines(decode_unicode=True):
128
- if line:
129
- line = line.strip()
130
- if line.startswith("data: "):
131
- json_str = line[6:]
132
- if json_str == "[DONE]":
133
- break
134
- try:
135
- data = json.loads(json_str)
136
- choice_data = data.get('choices', [{}])[0]
137
- delta_data = choice_data.get('delta', {})
138
- finish_reason = choice_data.get('finish_reason')
139
- content = delta_data.get('content')
140
- role = delta_data.get('role', None)
141
-
142
- if content is not None or role is not None:
143
- delta = ChoiceDelta(content=content, role=role)
144
- choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
145
- chunk = ChatCompletionChunk(
146
- id=request_id,
147
- choices=[choice],
148
- created=created_time,
149
- model=model,
150
- )
151
- yield chunk
152
-
153
- except json.JSONDecodeError:
154
- print(f"Warning: Could not decode JSON line: {json_str}")
155
- continue
156
-
157
- # Yield final chunk with finish reason if not already sent
158
- delta = ChoiceDelta()
159
- choice = Choice(index=0, delta=delta, finish_reason="stop")
160
- chunk = ChatCompletionChunk(
161
- id=request_id,
162
- choices=[choice],
163
- created=created_time,
164
- model=model,
165
- )
166
- yield chunk
167
-
168
- except cloudscraper.exceptions.CloudflareChallengeError as e:
169
- pass
170
-
171
- def _create_non_stream(
172
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
173
- ) -> ChatCompletion:
174
- full_response_content = ""
175
- finish_reason = "stop"
176
- try:
177
- # Make a non-streaming request to the API
178
- payload_copy = payload.copy()
179
- payload_copy["stream"] = False
180
- response = self._client.session.post(
181
- self._client.api_endpoint,
182
- headers=self._client.headers,
183
- cookies=self._client.cookies,
184
- json=payload_copy,
185
- timeout=self._client.timeout
186
- )
187
- if not response.ok:
188
- raise IOError(
189
- f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
190
- )
191
- data = response.json()
192
- if 'choices' in data and len(data['choices']) > 0:
193
- # YEPCHAT non-streaming returns message content in choices[0]['message']['content']
194
- full_response_content = data['choices'][0].get('message', {}).get('content', '')
195
- finish_reason = data['choices'][0].get('finish_reason', 'stop')
196
- else:
197
- full_response_content = ''
198
- finish_reason = 'stop'
199
- except Exception as e:
200
- print(f"Error obtaining non-stream response from YEPCHAT: {e}")
201
- finish_reason = "error"
202
-
203
- message = ChatCompletionMessage(
204
- role="assistant",
205
- content=full_response_content
206
- )
207
- choice = Choice(
208
- index=0,
209
- message=message,
210
- finish_reason=finish_reason
211
- )
212
- usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
213
- completion = ChatCompletion(
214
- id=request_id,
215
- choices=[choice],
216
- created=created_time,
217
- model=model,
218
- usage=usage,
219
- )
220
- return completion
221
-
222
- class Chat(BaseChat):
223
- def __init__(self, client: 'YEPCHAT'):
224
- self.completions = Completions(client)
225
-
226
- class YEPCHAT(OpenAICompatibleProvider):
227
- """
228
- OpenAI-compatible client for YEPCHAT API.
229
-
230
- Usage:
231
- client = YEPCHAT()
232
- response = client.chat.completions.create(
233
- model="DeepSeek-R1-Distill-Qwen-32B",
234
- messages=[{"role": "user", "content": "Hello!"}]
235
- )
236
- print(response.choices[0].message.content)
237
- """
238
- _base_models = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
239
-
240
- # Create AVAILABLE_MODELS as a list with the format "YEPCHAT/model"
241
- AVAILABLE_MODELS = [f"YEPCHAT/{model}" for model in _base_models]
242
-
243
- # Create a mapping dictionary for internal use
244
- _model_mapping = {model: f"YEPCHAT/{model}" for model in _base_models}
245
-
246
- def __init__(
247
- self,
248
- timeout: int = 30,
249
- browser: str = "chrome"
250
- ):
251
- """
252
- Initialize the YEPCHAT client.
253
-
254
- Args:
255
- timeout: Request timeout in seconds.
256
- browser: Browser name for LitAgent to generate User-Agent.
257
- """
258
- self.timeout = timeout
259
- self.api_endpoint = "https://api.yep.com/v1/chat/completions"
260
- self.session = cloudscraper.create_scraper() # Use cloudscraper
261
-
262
- # Initialize LitAgent for user agent generation and fingerprinting
263
- try:
264
- agent = LitAgent()
265
- fingerprint = agent.generate_fingerprint(browser=browser)
266
- except Exception as e:
267
- print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
268
- # Fallback fingerprint data
269
- fingerprint = {
270
- "accept": "*/*",
271
- "accept_language": "en-US,en;q=0.9",
272
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
273
- "platform": "Windows",
274
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
275
- }
276
-
277
- # Initialize headers using the fingerprint
278
- self.headers = {
279
- "Accept": fingerprint["accept"],
280
- "Accept-Encoding": "gzip, deflate, br, zstd",
281
- "Accept-Language": fingerprint["accept_language"],
282
- "Content-Type": "application/json; charset=utf-8",
283
- "DNT": "1",
284
- "Origin": "https://yep.com",
285
- "Referer": "https://yep.com/",
286
- "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
287
- "Sec-CH-UA-Mobile": "?0",
288
- "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
289
- "User-Agent": fingerprint["user_agent"],
290
- }
291
- self.session.headers.update(self.headers)
292
-
293
- # Generate cookies (consider if these need refreshing or specific values)
294
- self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
295
-
296
- # Initialize the chat interface
297
- self.chat = Chat(self)
298
-
299
- @property
300
- def models(self):
301
- class _ModelList:
302
- def list(inner_self):
303
- return YEPCHAT.AVAILABLE_MODELS
304
- return _ModelList()
305
-
306
- def convert_model_name(self, model: str) -> str:
307
- """
308
- Ensures the model name is valid for YEPCHAT.
309
- Returns the validated model name or raises an error if invalid.
310
- """
311
- if model in self.AVAILABLE_MODELS:
312
- return model
313
- else:
314
- # Raise error instead of defaulting, as model is mandatory in create()
315
- raise ValueError(f"Model '{model}' not supported by YEPCHAT. Available: {self.AVAILABLE_MODELS}")
316
-
317
- # Example usage (optional, for testing)
318
- if __name__ == '__main__':
319
- print("Testing YEPCHAT OpenAI-Compatible Client...")
320
-
321
- # Test Non-Streaming
322
- try:
323
- print("\n--- Non-Streaming Test (DeepSeek) ---")
324
- client = YEPCHAT()
325
- response = client.chat.completions.create(
326
- model="DeepSeek-R1-Distill-Qwen-32B",
327
- messages=[
328
- {"role": "user", "content": "Say 'Hello World'"}
329
- ],
330
- stream=False
331
- )
332
- print("Response:", response.choices[0].message.content)
333
- print("Usage:", response.usage) # Will show 0 tokens
334
- except Exception as e:
335
- print(f"Non-Streaming Test Failed: {e}")
336
-
337
- # Test Streaming
338
- try:
339
- print("\n--- Streaming Test (Mixtral) ---")
340
- client_stream = YEPCHAT()
341
- stream = client_stream.chat.completions.create(
342
- model="Mixtral-8x7B-Instruct-v0.1",
343
- messages=[
344
- {"role": "user", "content": "Write a short sentence about AI."}
345
- ],
346
- stream=True
347
- )
348
- print("Streaming Response:")
349
- for chunk in stream:
350
- content = chunk.choices[0].delta.content
351
- if content:
352
- print(content, end="", flush=True)
353
- print() # Add a newline at the end
354
-
355
- except Exception as e:
356
- print(f"Streaming Test Failed: {e}")
1
+ import time
2
+ import uuid
3
+ import cloudscraper # Import cloudscraper
4
+ import json
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, get_system_prompt, count_tokens # Import count_tokens
12
+ )
13
+
14
+ # Attempt to import LitAgent, fallback if not available
15
+ try:
16
+ from webscout.litagent import LitAgent
17
+ except ImportError:
18
+ # Define a dummy LitAgent if webscout is not installed or accessible
19
+ class LitAgent:
20
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
21
+ print("Warning: LitAgent not found. Using default minimal headers.")
22
+ return {
23
+ "accept": "*/*",
24
+ "accept_language": "en-US,en;q=0.9",
25
+ "platform": "Windows",
26
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
27
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
28
+ "browser_type": browser,
29
+ }
30
+
31
+ # --- YEPCHAT Client ---
32
+
33
+ # ANSI escape codes for formatting
34
+ BOLD = "\033[1m"
35
+ RED = "\033[91m"
36
+ RESET = "\033[0m"
37
+
38
+ class Completions(BaseCompletions):
39
+ def __init__(self, client: 'YEPCHAT'):
40
+ self._client = client
41
+
42
+ def create(
43
+ self,
44
+ *,
45
+ model: str,
46
+ messages: List[Dict[str, str]],
47
+ max_tokens: Optional[int] = 1280,
48
+ stream: bool = False,
49
+ temperature: Optional[float] = 0.6,
50
+ top_p: Optional[float] = 0.7,
51
+ system_prompt: Optional[str] = None, # Added for consistency, but will be ignored
52
+ timeout: Optional[int] = None,
53
+ proxies: Optional[Dict[str, str]] = None,
54
+ **kwargs: Any
55
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
56
+ """
57
+ Creates a model response for the given chat conversation using YEPCHAT API.
58
+ Mimics openai.chat.completions.create
59
+ Note: YEPCHAT does not support system messages. They will be ignored.
60
+ """
61
+ # Only accept and use the raw model name (no prefix logic)
62
+ model_raw = model
63
+ # Validate model
64
+ if model_raw not in self._client.AVAILABLE_MODELS:
65
+ raise ValueError(
66
+ f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}"
67
+ )
68
+
69
+ # Filter out system messages and warn the user if any are present
70
+ filtered_messages = []
71
+ has_system_message = False
72
+ if get_system_prompt(messages) or system_prompt: # Check both message list and explicit param
73
+ has_system_message = True
74
+
75
+ for msg in messages:
76
+ if msg["role"] == "system":
77
+ continue # Skip system messages
78
+ filtered_messages.append(msg)
79
+
80
+ if has_system_message:
81
+ # Print warning in bold red
82
+ print(f"{BOLD}{RED}Warning: YEPCHAT does not support system messages, they will be ignored.{RESET}")
83
+
84
+ # If no messages left after filtering, raise an error
85
+ if not filtered_messages:
86
+ raise ValueError("At least one user or assistant message is required for YEPCHAT.")
87
+
88
+ payload = {
89
+ "stream": stream,
90
+ "max_tokens": max_tokens,
91
+ "top_p": top_p,
92
+ "temperature": temperature,
93
+ "messages": filtered_messages, # Use filtered messages
94
+ "model": model_raw, # Send only the raw model name to the API
95
+ }
96
+
97
+ # Add any extra kwargs to the payload
98
+ payload.update(kwargs)
99
+
100
+ request_id = f"chatcmpl-{uuid.uuid4()}"
101
+ created_time = int(time.time())
102
+
103
+ if stream:
104
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
105
+ else:
106
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
107
+
108
+ def _create_stream(
109
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
110
+ ) -> Generator[ChatCompletionChunk, None, None]:
111
+ try:
112
+ response = self._client.session.post(
113
+ self._client.api_endpoint,
114
+ headers=self._client.headers,
115
+ cookies=self._client.cookies,
116
+ json=payload,
117
+ stream=True,
118
+ timeout=timeout or self._client.timeout,
119
+ proxies=proxies or getattr(self._client, "proxies", None)
120
+ )
121
+
122
+ if not response.ok:
123
+ raise IOError(
124
+ f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
125
+ )
126
+
127
+ # Track tokens for streaming
128
+ prompt_tokens = count_tokens([m.get('content', '') for m in payload.get('messages', [])])
129
+ completion_tokens = 0
130
+ total_tokens = prompt_tokens
131
+
132
+ for line in response.iter_lines(decode_unicode=True):
133
+ if line:
134
+ line = line.strip()
135
+ if line.startswith("data: "):
136
+ json_str = line[6:]
137
+ if json_str == "[DONE]":
138
+ break
139
+ try:
140
+ data = json.loads(json_str)
141
+ choice_data = data.get('choices', [{}])[0]
142
+ delta_data = choice_data.get('delta', {})
143
+ finish_reason = choice_data.get('finish_reason')
144
+ content = delta_data.get('content')
145
+ role = delta_data.get('role', None)
146
+
147
+ # Count tokens for this chunk
148
+ chunk_tokens = count_tokens(content) if content else 0
149
+ completion_tokens += chunk_tokens
150
+ total_tokens = prompt_tokens + completion_tokens
151
+
152
+ if content is not None or role is not None:
153
+ delta = ChoiceDelta(content=content, role=role)
154
+ choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
155
+ chunk = ChatCompletionChunk(
156
+ id=request_id,
157
+ choices=[choice],
158
+ created=created_time,
159
+ model=model,
160
+ )
161
+ # Set usage directly on the chunk object
162
+ chunk.usage = {
163
+ "prompt_tokens": prompt_tokens,
164
+ "completion_tokens": completion_tokens,
165
+ "total_tokens": total_tokens,
166
+ "estimated_cost": None
167
+ }
168
+ # Yield the chunk with usage information
169
+ yield chunk
170
+
171
+ except json.JSONDecodeError:
172
+ print(f"Warning: Could not decode JSON line: {json_str}")
173
+ continue
174
+
175
+ # Yield final chunk with finish reason if not already sent
176
+ delta = ChoiceDelta()
177
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
178
+ chunk = ChatCompletionChunk(
179
+ id=request_id,
180
+ choices=[choice],
181
+ created=created_time,
182
+ model=model,
183
+ )
184
+ # Set usage directly on the chunk object
185
+ chunk.usage = {
186
+ "prompt_tokens": prompt_tokens,
187
+ "completion_tokens": completion_tokens,
188
+ "total_tokens": total_tokens,
189
+ "estimated_cost": None
190
+ }
191
+ yield chunk
192
+
193
+ except cloudscraper.exceptions.CloudflareChallengeError as e:
194
+ pass
195
+
196
+ def _create_non_stream(
197
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
198
+ ) -> ChatCompletion:
199
+ full_response_content = ""
200
+ finish_reason = "stop"
201
+ try:
202
+ # Make a non-streaming request to the API
203
+ payload_copy = payload.copy()
204
+ payload_copy["stream"] = False
205
+ response = self._client.session.post(
206
+ self._client.api_endpoint,
207
+ headers=self._client.headers,
208
+ cookies=self._client.cookies,
209
+ json=payload_copy,
210
+ timeout=timeout or self._client.timeout,
211
+ proxies=proxies or getattr(self._client, "proxies", None)
212
+ )
213
+ if not response.ok:
214
+ raise IOError(
215
+ f"YEPCHAT API Error: {response.status_code} {response.reason} - {response.text}"
216
+ )
217
+ data = response.json()
218
+ if 'choices' in data and len(data['choices']) > 0:
219
+ # YEPCHAT non-streaming returns message content in choices[0]['message']['content']
220
+ full_response_content = data['choices'][0].get('message', {}).get('content', '')
221
+ finish_reason = data['choices'][0].get('finish_reason', 'stop')
222
+ else:
223
+ full_response_content = ''
224
+ finish_reason = 'stop'
225
+ except Exception as e:
226
+ print(f"Error obtaining non-stream response from YEPCHAT: {e}")
227
+ finish_reason = "error"
228
+
229
+ message = ChatCompletionMessage(
230
+ role="assistant",
231
+ content=full_response_content
232
+ )
233
+ choice = Choice(
234
+ index=0,
235
+ message=message,
236
+ finish_reason=finish_reason
237
+ )
238
+ # Use count_tokens to compute usage
239
+ prompt_tokens = count_tokens([m.get('content', '') for m in payload.get('messages', [])])
240
+ completion_tokens = count_tokens(full_response_content)
241
+ usage = CompletionUsage(
242
+ prompt_tokens=prompt_tokens,
243
+ completion_tokens=completion_tokens,
244
+ total_tokens=prompt_tokens + completion_tokens
245
+ )
246
+ completion = ChatCompletion(
247
+ id=request_id,
248
+ choices=[choice],
249
+ created=created_time,
250
+ model=model,
251
+ usage=usage,
252
+ )
253
+ return completion
254
+
255
+ class Chat(BaseChat):
256
+ def __init__(self, client: 'YEPCHAT'):
257
+ self.completions = Completions(client)
258
+
259
+ class YEPCHAT(OpenAICompatibleProvider):
260
+ """
261
+ OpenAI-compatible client for YEPCHAT API.
262
+
263
+ Usage:
264
+ client = YEPCHAT()
265
+ response = client.chat.completions.create(
266
+ model="DeepSeek-R1-Distill-Qwen-32B",
267
+ messages=[{"role": "user", "content": "Hello!"}]
268
+ )
269
+ print(response.choices[0].message.content)
270
+ """
271
+ _base_models = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
272
+
273
+ # Create AVAILABLE_MODELS as a list of base model names (no prefix)
274
+ AVAILABLE_MODELS = _base_models
275
+
276
+ def __init__(
277
+ self,
278
+ browser: str = "chrome"
279
+ ):
280
+ """
281
+ Initialize the YEPCHAT client.
282
+
283
+ Args:
284
+ browser: Browser name for LitAgent to generate User-Agent.
285
+ """
286
+ self.timeout = None
287
+ self.api_endpoint = "https://api.yep.com/v1/chat/completions"
288
+ self.session = cloudscraper.create_scraper() # Use cloudscraper
289
+
290
+ # Initialize LitAgent for user agent generation and fingerprinting
291
+ try:
292
+ agent = LitAgent()
293
+ fingerprint = agent.generate_fingerprint(browser=browser)
294
+ except Exception as e:
295
+ print(f"Warning: Failed to generate fingerprint with LitAgent: {e}. Using fallback.")
296
+ # Fallback fingerprint data
297
+ fingerprint = {
298
+ "accept": "*/*",
299
+ "accept_language": "en-US,en;q=0.9",
300
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
301
+ "platform": "Windows",
302
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
303
+ }
304
+
305
+ # Initialize headers using the fingerprint
306
+ self.headers = {
307
+ "Accept": fingerprint["accept"],
308
+ "Accept-Encoding": "gzip, deflate, br, zstd",
309
+ "Accept-Language": fingerprint["accept_language"],
310
+ "Content-Type": "application/json; charset=utf-8",
311
+ "DNT": "1",
312
+ "Origin": "https://yep.com",
313
+ "Referer": "https://yep.com/",
314
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
315
+ "Sec-CH-UA-Mobile": "?0",
316
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
317
+ "User-Agent": fingerprint["user_agent"],
318
+ }
319
+ self.session.headers.update(self.headers)
320
+
321
+ # Generate cookies (consider if these need refreshing or specific values)
322
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
323
+
324
+ # Initialize the chat interface
325
+ self.chat = Chat(self)
326
+
327
+ @property
328
+ def models(self):
329
+ class _ModelList:
330
+ def list(inner_self):
331
+ return YEPCHAT.AVAILABLE_MODELS
332
+ return _ModelList()
333
+
334
+ def convert_model_name(self, model: str) -> str:
335
+ """
336
+ Ensures the model name is valid for YEPCHAT.
337
+ Returns the validated model name or raises an error if invalid.
338
+ """
339
+ if model in self.AVAILABLE_MODELS:
340
+ return model
341
+ else:
342
+ # Raise error instead of defaulting, as model is mandatory in create()
343
+ raise ValueError(f"Model '{model}' not supported by YEPCHAT. Available: {self.AVAILABLE_MODELS}")
344
+
345
+ # Example usage (optional, for testing)
346
+ if __name__ == '__main__':
347
+ print("Testing YEPCHAT OpenAI-Compatible Client...")
348
+
349
+ # Test Non-Streaming
350
+ try:
351
+ print("\n--- Non-Streaming Test (DeepSeek) ---")
352
+ client = YEPCHAT()
353
+ response = client.chat.completions.create(
354
+ model="DeepSeek-R1-Distill-Qwen-32B",
355
+ messages=[
356
+ {"role": "user", "content": "Say 'Hello World'"}
357
+ ],
358
+ stream=False
359
+ )
360
+ print("Response:", response.choices[0].message.content)
361
+ print("Usage:", response.usage) # Will show 0 tokens
362
+ except Exception as e:
363
+ print(f"Non-Streaming Test Failed: {e}")
364
+
365
+ # Test Streaming
366
+ try:
367
+ print("\n--- Streaming Test (Mixtral) ---")
368
+ client_stream = YEPCHAT()
369
+ stream = client_stream.chat.completions.create(
370
+ model="Mixtral-8x7B-Instruct-v0.1",
371
+ messages=[
372
+ {"role": "user", "content": "Write a short sentence about AI."}
373
+ ],
374
+ stream=True
375
+ )
376
+ print("Streaming Response:")
377
+ for chunk in stream:
378
+ content = chunk.choices[0].delta.content
379
+ if content:
380
+ print(content, end="", flush=True)
381
+ print() # Add a newline at the end
382
+
383
+ except Exception as e:
384
+ print(f"Streaming Test Failed: {e}")