webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -1,488 +1,513 @@
1
- import time
2
- import uuid
3
- # import cloudscraper
4
- from curl_cffi.requests import Session, RequestsError
5
- import json
6
- import re
7
- from typing import List, Dict, Optional, Union, Generator, Any
8
-
9
- # Import base classes and utility structures
10
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
11
- from .utils import (
12
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
13
- ChatCompletionMessage, CompletionUsage
14
- )
15
-
16
- # Attempt to import LitAgent, fallback if not available
17
- try:
18
- from webscout.litagent import LitAgent
19
- except ImportError:
20
- # Define a dummy LitAgent if webscout is not installed or accessible
21
- class LitAgent:
22
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
23
- # Return minimal default headers if LitAgent is unavailable
24
- print("Warning: LitAgent not found. Using default minimal headers.")
25
- return {
26
- "accept": "*/*",
27
- "accept_language": "en-US,en;q=0.9",
28
- "platform": "Windows",
29
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
30
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
31
- "browser_type": browser,
32
- }
33
-
34
- # --- ChatGPTClone Client ---
35
-
36
- class Completions(BaseCompletions):
37
- def __init__(self, client: 'ChatGPTClone'):
38
- self._client = client
39
-
40
- def create(
41
- self,
42
- *,
43
- model: str,
44
- messages: List[Dict[str, str]],
45
- max_tokens: Optional[int] = 2049,
46
- stream: bool = False,
47
- temperature: Optional[float] = None,
48
- top_p: Optional[float] = None,
49
- **kwargs: Any
50
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
51
- """
52
- Creates a model response for the given chat conversation.
53
- Mimics openai.chat.completions.create
54
- """
55
- # Prepare the payload for ChatGPTClone API
56
- payload = {
57
- "messages": messages,
58
- "model": self._client.convert_model_name(model),
59
- }
60
-
61
- # Add optional parameters if provided
62
- if max_tokens is not None and max_tokens > 0:
63
- payload["max_tokens"] = max_tokens
64
-
65
- if temperature is not None:
66
- payload["temperature"] = temperature
67
-
68
- if top_p is not None:
69
- payload["top_p"] = top_p
70
-
71
- # Add any additional parameters
72
- payload.update(kwargs)
73
-
74
- request_id = f"chatcmpl-{uuid.uuid4()}"
75
- created_time = int(time.time())
76
-
77
- if stream:
78
- return self._create_stream(request_id, created_time, model, payload)
79
- else:
80
- return self._create_non_stream(request_id, created_time, model, payload)
81
-
82
- def _create_stream(
83
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
84
- ) -> Generator[ChatCompletionChunk, None, None]:
85
- try:
86
- response = self._client.session.post(
87
- f"{self._client.url}/api/chat",
88
- headers=self._client.headers,
89
- cookies=self._client.cookies,
90
- json=payload,
91
- stream=True,
92
- timeout=self._client.timeout
93
- )
94
-
95
- # Handle non-200 responses
96
- if not response.ok:
97
- # If we get a non-200 response, try refreshing our identity once
98
- if response.status_code in [403, 429]:
99
- self._client.refresh_identity()
100
- # Retry with new identity
101
- response = self._client.session.post(
102
- f"{self._client.url}/api/chat",
103
- headers=self._client.headers,
104
- cookies=self._client.cookies,
105
- json=payload,
106
- stream=True,
107
- timeout=self._client.timeout
108
- )
109
- if not response.ok:
110
- raise IOError(
111
- f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
112
- )
113
- else:
114
- raise IOError(
115
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
116
- )
117
-
118
- # Track token usage across chunks
119
- prompt_tokens = 0
120
- completion_tokens = 0
121
- total_tokens = 0
122
-
123
- # Estimate prompt tokens based on message length
124
- for msg in payload.get("messages", []):
125
- prompt_tokens += len(msg.get("content", "").split())
126
-
127
- buffer = ""
128
- for line in response.iter_content():
129
- if line:
130
- if isinstance(line, bytes):
131
- line = line.decode("utf-8", errors="replace")
132
- buffer += line
133
-
134
- # ChatGPTClone uses a different format, so we need to extract the content
135
- match = re.search(r'0:"(.*?)"', buffer)
136
- if match:
137
- content = match.group(1)
138
-
139
- # Format the content (replace escaped newlines)
140
- content = self._client.format_text(content)
141
-
142
- # Update token counts
143
- completion_tokens += 1
144
- total_tokens = prompt_tokens + completion_tokens
145
-
146
- # Create the delta object
147
- delta = ChoiceDelta(
148
- content=content,
149
- role="assistant",
150
- tool_calls=None
151
- )
152
-
153
- # Create the choice object
154
- choice = Choice(
155
- index=0,
156
- delta=delta,
157
- finish_reason=None,
158
- logprobs=None
159
- )
160
-
161
- # Create the chunk object
162
- chunk = ChatCompletionChunk(
163
- id=request_id,
164
- choices=[choice],
165
- created=created_time,
166
- model=model,
167
- system_fingerprint=None
168
- )
169
-
170
- # Convert to dict for proper formatting
171
- chunk_dict = chunk.to_dict()
172
-
173
- # Add usage information to match OpenAI format
174
- usage_dict = {
175
- "prompt_tokens": prompt_tokens,
176
- "completion_tokens": completion_tokens,
177
- "total_tokens": total_tokens,
178
- "estimated_cost": None
179
- }
180
-
181
- chunk_dict["usage"] = usage_dict
182
-
183
- # Return the chunk object for internal processing
184
- yield chunk
185
-
186
- # Clear buffer after processing
187
- buffer = ""
188
- # If buffer gets too long, reset it to avoid memory issues
189
- elif len(buffer) > 1024:
190
- buffer = ""
191
-
192
- # Final chunk with finish_reason="stop"
193
- delta = ChoiceDelta(
194
- content=None,
195
- role=None,
196
- tool_calls=None
197
- )
198
-
199
- choice = Choice(
200
- index=0,
201
- delta=delta,
202
- finish_reason="stop",
203
- logprobs=None
204
- )
205
-
206
- chunk = ChatCompletionChunk(
207
- id=request_id,
208
- choices=[choice],
209
- created=created_time,
210
- model=model,
211
- system_fingerprint=None
212
- )
213
-
214
- chunk_dict = chunk.to_dict()
215
- chunk_dict["usage"] = {
216
- "prompt_tokens": prompt_tokens,
217
- "completion_tokens": completion_tokens,
218
- "total_tokens": total_tokens,
219
- "estimated_cost": None
220
- }
221
-
222
- yield chunk
223
-
224
- except Exception as e:
225
- print(f"Error during ChatGPTClone stream request: {e}")
226
- raise IOError(f"ChatGPTClone request failed: {e}") from e
227
-
228
- def _create_non_stream(
229
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
230
- ) -> ChatCompletion:
231
- try:
232
- # For non-streaming, we still use streaming internally to collect the full response
233
- response = self._client.session.post(
234
- f"{self._client.url}/api/chat",
235
- headers=self._client.headers,
236
- cookies=self._client.cookies,
237
- json=payload,
238
- stream=True,
239
- timeout=self._client.timeout
240
- )
241
-
242
- # Handle non-200 responses
243
- if not response.ok:
244
- # If we get a non-200 response, try refreshing our identity once
245
- if response.status_code in [403, 429]:
246
- self._client.refresh_identity()
247
- # Retry with new identity
248
- response = self._client.session.post(
249
- f"{self._client.url}/api/chat",
250
- headers=self._client.headers,
251
- cookies=self._client.cookies,
252
- json=payload,
253
- stream=True,
254
- timeout=self._client.timeout
255
- )
256
- if not response.ok:
257
- raise IOError(
258
- f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
259
- )
260
- else:
261
- raise IOError(
262
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
263
- )
264
-
265
- # Collect the full response
266
- full_text = ""
267
- buffer = ""
268
- for line in response.iter_content():
269
- if line:
270
- if isinstance(line, bytes):
271
- line = line.decode("utf-8", errors="replace")
272
- buffer += line
273
- match = re.search(r'0:"(.*?)"', buffer)
274
- if match:
275
- content = match.group(1)
276
- full_text += content
277
- buffer = ""
278
- # If buffer gets too long, reset it to avoid memory issues
279
- elif len(buffer) > 1024:
280
- buffer = ""
281
-
282
- # Format the text (replace escaped newlines)
283
- full_text = self._client.format_text(full_text)
284
-
285
- # Estimate token counts
286
- prompt_tokens = 0
287
- for msg in payload.get("messages", []):
288
- prompt_tokens += len(msg.get("content", "").split())
289
-
290
- completion_tokens = len(full_text.split())
291
- total_tokens = prompt_tokens + completion_tokens
292
-
293
- # Create the message object
294
- message = ChatCompletionMessage(
295
- role="assistant",
296
- content=full_text
297
- )
298
-
299
- # Create the choice object
300
- choice = Choice(
301
- index=0,
302
- message=message,
303
- finish_reason="stop"
304
- )
305
-
306
- # Create the usage object
307
- usage = CompletionUsage(
308
- prompt_tokens=prompt_tokens,
309
- completion_tokens=completion_tokens,
310
- total_tokens=total_tokens
311
- )
312
-
313
- # Create the completion object
314
- completion = ChatCompletion(
315
- id=request_id,
316
- choices=[choice],
317
- created=created_time,
318
- model=model,
319
- usage=usage,
320
- )
321
-
322
- return completion
323
-
324
- except Exception as e:
325
- print(f"Error during ChatGPTClone non-stream request: {e}")
326
- raise IOError(f"ChatGPTClone request failed: {e}") from e
327
-
328
- class Chat(BaseChat):
329
- def __init__(self, client: 'ChatGPTClone'):
330
- self.completions = Completions(client)
331
-
332
- class ChatGPTClone(OpenAICompatibleProvider):
333
- """
334
- OpenAI-compatible client for ChatGPT Clone API.
335
-
336
- Usage:
337
- client = ChatGPTClone()
338
- response = client.chat.completions.create(
339
- model="gpt-4",
340
- messages=[{"role": "user", "content": "Hello!"}]
341
- )
342
- """
343
-
344
- url = "https://chatgpt-clone-ten-nu.vercel.app"
345
- AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
346
-
347
- def __init__(
348
- self,
349
- timeout: Optional[int] = None,
350
- browser: str = "chrome",
351
- impersonate: str = "chrome120"
352
- ):
353
- """
354
- Initialize the ChatGPTClone client.
355
-
356
- Args:
357
- timeout: Request timeout in seconds (None for no timeout)
358
- browser: Browser to emulate in user agent (for LitAgent fallback)
359
- impersonate: Browser impersonation for curl_cffi (default: chrome120)
360
- """
361
- self.timeout = timeout
362
- self.temperature = 0.6 # Default temperature
363
- self.top_p = 0.7 # Default top_p
364
-
365
- # Use curl_cffi for Cloudflare bypass and browser impersonation
366
- self.session = Session(impersonate=impersonate, timeout=timeout)
367
-
368
- # Use LitAgent for fingerprint if available, else fallback
369
- agent = LitAgent()
370
- self.fingerprint = agent.generate_fingerprint(browser)
371
-
372
- # Use the fingerprint for headers
373
- self.headers = {
374
- "Accept": self.fingerprint["accept"],
375
- "Accept-Encoding": "gzip, deflate, br, zstd",
376
- "Accept-Language": self.fingerprint["accept_language"],
377
- "Content-Type": "application/json",
378
- "DNT": "1",
379
- "Origin": self.url,
380
- "Referer": f"{self.url}/",
381
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
382
- "Sec-CH-UA-Mobile": "?0",
383
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
384
- "User-Agent": self.fingerprint["user_agent"],
385
- }
386
-
387
- # Create session cookies with unique identifiers
388
- self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
389
-
390
- # Set consistent headers for the scraper session
391
- for header, value in self.headers.items():
392
- self.session.headers[header] = value
393
-
394
- # Initialize the chat interface
395
- self.chat = Chat(self)
396
-
397
- def refresh_identity(self, browser: str = None, impersonate: str = None):
398
- """Refreshes the browser identity fingerprint and curl_cffi session."""
399
- browser = browser or self.fingerprint.get("browser_type", "chrome")
400
- impersonate = impersonate or "chrome120"
401
- self.fingerprint = LitAgent().generate_fingerprint(browser)
402
- self.session = Session(impersonate=impersonate, timeout=self.timeout)
403
- # Update headers with new fingerprint
404
- self.headers.update({
405
- "Accept": self.fingerprint["accept"],
406
- "Accept-Language": self.fingerprint["accept_language"],
407
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
408
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
409
- "User-Agent": self.fingerprint["user_agent"],
410
- })
411
-
412
- # Update session headers
413
- for header, value in self.headers.items():
414
- self.session.headers[header] = value
415
-
416
- # Generate new cookies
417
- self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
418
-
419
- return self.fingerprint
420
-
421
- def format_text(self, text: str) -> str:
422
- """
423
- Format text by replacing escaped newlines with actual newlines.
424
-
425
- Args:
426
- text: Text to format
427
-
428
- Returns:
429
- Formatted text
430
- """
431
- # Use a more comprehensive approach to handle all escape sequences
432
- try:
433
- # First handle double backslashes to avoid issues
434
- text = text.replace('\\\\', '\\')
435
-
436
- # Handle common escape sequences
437
- text = text.replace('\\n', '\n')
438
- text = text.replace('\\r', '\r')
439
- text = text.replace('\\t', '\t')
440
- text = text.replace('\\"', '"')
441
- text = text.replace("\\'\'", "'")
442
-
443
- # Handle any remaining escape sequences using JSON decoding
444
- # This is a fallback in case there are other escape sequences
445
- try:
446
- # Add quotes to make it a valid JSON string
447
- json_str = f'"{text}"'
448
- # Use json module to decode all escape sequences
449
- decoded = json.loads(json_str)
450
- return decoded
451
- except json.JSONDecodeError:
452
- # If JSON decoding fails, return the text with the replacements we've already done
453
- return text
454
- except Exception as e:
455
- # If any error occurs, return the original text
456
- print(f"Warning: Error formatting text: {e}")
457
- return text
458
-
459
- def convert_model_name(self, model: str) -> str:
460
- """
461
- Convert model names to ones supported by ChatGPTClone.
462
-
463
- Args:
464
- model: Model name to convert
465
-
466
- Returns:
467
- ChatGPTClone model name
468
- """
469
- # If the model is already a valid ChatGPTClone model, return it
470
- if model in self.AVAILABLE_MODELS:
471
- return model
472
-
473
- # Map similar models to supported ones
474
- if model.startswith("gpt-4"):
475
- return "gpt-4"
476
- elif model.startswith("gpt-3.5"):
477
- return "gpt-3.5-turbo"
478
-
479
- # Default to the most capable model
480
- print(f"Warning: Unknown model '{model}'. Using 'gpt-4' instead.")
481
- return "gpt-4"
482
-
483
- @property
484
- def models(self):
485
- class _ModelList:
486
- def list(inner_self):
487
- return type(self).AVAILABLE_MODELS
1
+ import time
2
+ import uuid
3
+ # import cloudscraper
4
+ from curl_cffi.requests import Session, RequestsError
5
+ import json
6
+ import re
7
+ from typing import List, Dict, Optional, Union, Generator, Any
8
+
9
+ # Import base classes and utility structures
10
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
11
+ from .utils import (
12
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
13
+ ChatCompletionMessage, CompletionUsage, count_tokens
14
+ )
15
+
16
+ # Attempt to import LitAgent, fallback if not available
17
+ try:
18
+ from webscout.litagent import LitAgent
19
+ except ImportError:
20
+ # Define a dummy LitAgent if webscout is not installed or accessible
21
+ class LitAgent:
22
+ def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
23
+ # Return minimal default headers if LitAgent is unavailable
24
+ print("Warning: LitAgent not found. Using default minimal headers.")
25
+ return {
26
+ "accept": "*/*",
27
+ "accept_language": "en-US,en;q=0.9",
28
+ "platform": "Windows",
29
+ "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
30
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
31
+ "browser_type": browser,
32
+ }
33
+
34
+ # --- ChatGPTClone Client ---
35
+
36
+ class Completions(BaseCompletions):
37
+ def __init__(self, client: 'ChatGPTClone'):
38
+ self._client = client
39
+
40
+ def create(
41
+ self,
42
+ *,
43
+ model: str,
44
+ messages: List[Dict[str, str]],
45
+ max_tokens: Optional[int] = 2049,
46
+ stream: bool = False,
47
+ temperature: Optional[float] = None,
48
+ top_p: Optional[float] = None,
49
+ timeout: Optional[int] = None,
50
+ proxies: Optional[dict] = None,
51
+ **kwargs: Any
52
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
53
+ """
54
+ Creates a model response for the given chat conversation.
55
+ Mimics openai.chat.completions.create
56
+ """
57
+ # Prepare the payload for ChatGPTClone API
58
+ payload = {
59
+ "messages": messages,
60
+ "model": self._client.convert_model_name(model),
61
+ }
62
+
63
+ # Add optional parameters if provided
64
+ if max_tokens is not None and max_tokens > 0:
65
+ payload["max_tokens"] = max_tokens
66
+
67
+ if temperature is not None:
68
+ payload["temperature"] = temperature
69
+
70
+ if top_p is not None:
71
+ payload["top_p"] = top_p
72
+
73
+ # Add any additional parameters
74
+ payload.update(kwargs)
75
+
76
+ request_id = f"chatcmpl-{uuid.uuid4()}"
77
+ created_time = int(time.time())
78
+
79
+ if stream:
80
+ return self._create_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
81
+ else:
82
+ return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
83
+
84
+ def _create_stream(
85
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
86
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
87
+ ) -> Generator[ChatCompletionChunk, None, None]:
88
+ original_proxies = self._client.session.proxies
89
+ if proxies is not None:
90
+ self._client.session.proxies = proxies
91
+ else:
92
+ self._client.session.proxies = {}
93
+ try:
94
+ timeout_val = timeout if timeout is not None else self._client.timeout
95
+ response = self._client.session.post(
96
+ f"{self._client.url}/api/chat",
97
+ headers=self._client.headers,
98
+ cookies=self._client.cookies,
99
+ json=payload,
100
+ stream=True,
101
+ timeout=timeout_val
102
+ )
103
+
104
+ # Handle non-200 responses
105
+ if not response.ok:
106
+ # If we get a non-200 response, try refreshing our identity once
107
+ if response.status_code in [403, 429]:
108
+ self._client.refresh_identity()
109
+ # Retry with new identity
110
+ response = self._client.session.post(
111
+ f"{self._client.url}/api/chat",
112
+ headers=self._client.headers,
113
+ cookies=self._client.cookies,
114
+ json=payload,
115
+ stream=True,
116
+ timeout=timeout_val
117
+ )
118
+ if not response.ok:
119
+ raise IOError(
120
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
121
+ )
122
+ else:
123
+ raise IOError(
124
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
125
+ )
126
+
127
+ # Track token usage across chunks
128
+ prompt_tokens = 0
129
+ completion_tokens = 0
130
+ total_tokens = 0
131
+
132
+ # Estimate prompt tokens based on message length
133
+ for msg in payload.get("messages", []):
134
+ prompt_tokens += count_tokens(msg.get("content", ""))
135
+
136
+ buffer = ""
137
+ for line in response.iter_content():
138
+ if line:
139
+ if isinstance(line, bytes):
140
+ line = line.decode("utf-8", errors="replace")
141
+ buffer += line
142
+
143
+ # ChatGPTClone uses a different format, so we need to extract the content
144
+ match = re.search(r'0:"(.*?)"', buffer)
145
+ if match:
146
+ content = match.group(1)
147
+
148
+ # Format the content (replace escaped newlines)
149
+ content = self._client.format_text(content)
150
+
151
+ # Update token counts using count_tokens
152
+ completion_tokens += count_tokens(content)
153
+ total_tokens = prompt_tokens + completion_tokens
154
+
155
+ # Create the delta object
156
+ delta = ChoiceDelta(
157
+ content=content,
158
+ role="assistant",
159
+ tool_calls=None
160
+ )
161
+
162
+ # Create the choice object
163
+ choice = Choice(
164
+ index=0,
165
+ delta=delta,
166
+ finish_reason=None,
167
+ logprobs=None
168
+ )
169
+
170
+ # Create the chunk object
171
+ chunk = ChatCompletionChunk(
172
+ id=request_id,
173
+ choices=[choice],
174
+ created=created_time,
175
+ model=model,
176
+ system_fingerprint=None
177
+ )
178
+
179
+ # Convert chunk to dict using Pydantic's API
180
+ if hasattr(chunk, "model_dump"):
181
+ chunk_dict = chunk.model_dump(exclude_none=True)
182
+ else:
183
+ chunk_dict = chunk.dict(exclude_none=True)
184
+
185
+ # Add usage information to match OpenAI format
186
+ usage_dict = {
187
+ "prompt_tokens": prompt_tokens,
188
+ "completion_tokens": completion_tokens,
189
+ "total_tokens": total_tokens,
190
+ "estimated_cost": None
191
+ }
192
+
193
+ chunk_dict["usage"] = usage_dict
194
+
195
+ # Return the chunk object for internal processing
196
+ yield chunk
197
+
198
+ # Clear buffer after processing
199
+ buffer = ""
200
+ # If buffer gets too long, reset it to avoid memory issues
201
+ elif len(buffer) > 1024:
202
+ buffer = ""
203
+
204
+ # Final chunk with finish_reason="stop"
205
+ delta = ChoiceDelta(
206
+ content=None,
207
+ role=None,
208
+ tool_calls=None
209
+ )
210
+
211
+ choice = Choice(
212
+ index=0,
213
+ delta=delta,
214
+ finish_reason="stop",
215
+ logprobs=None
216
+ )
217
+
218
+ chunk = ChatCompletionChunk(
219
+ id=request_id,
220
+ choices=[choice],
221
+ created=created_time,
222
+ model=model,
223
+ system_fingerprint=None
224
+ )
225
+
226
+ if hasattr(chunk, "model_dump"):
227
+ chunk_dict = chunk.model_dump(exclude_none=True)
228
+ else:
229
+ chunk_dict = chunk.dict(exclude_none=True)
230
+ chunk_dict["usage"] = {
231
+ "prompt_tokens": prompt_tokens,
232
+ "completion_tokens": completion_tokens,
233
+ "total_tokens": total_tokens,
234
+ "estimated_cost": None
235
+ }
236
+
237
+ yield chunk
238
+
239
+ except Exception as e:
240
+ print(f"Error during ChatGPTClone stream request: {e}")
241
+ raise IOError(f"ChatGPTClone request failed: {e}") from e
242
+ finally:
243
+ self._client.session.proxies = original_proxies
244
+
245
+ def _create_non_stream(
246
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
247
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
248
+ ) -> ChatCompletion:
249
+ original_proxies = self._client.session.proxies
250
+ if proxies is not None:
251
+ self._client.session.proxies = proxies
252
+ else:
253
+ self._client.session.proxies = {}
254
+ try:
255
+ timeout_val = timeout if timeout is not None else self._client.timeout
256
+ # For non-streaming, we still use streaming internally to collect the full response
257
+ response = self._client.session.post(
258
+ f"{self._client.url}/api/chat",
259
+ headers=self._client.headers,
260
+ cookies=self._client.cookies,
261
+ json=payload,
262
+ stream=True,
263
+ timeout=timeout_val
264
+ )
265
+
266
+ # Handle non-200 responses
267
+ if not response.ok:
268
+ # If we get a non-200 response, try refreshing our identity once
269
+ if response.status_code in [403, 429]:
270
+ self._client.refresh_identity()
271
+ # Retry with new identity
272
+ response = self._client.session.post(
273
+ f"{self._client.url}/api/chat",
274
+ headers=self._client.headers,
275
+ cookies=self._client.cookies,
276
+ json=payload,
277
+ stream=True,
278
+ timeout=timeout_val
279
+ )
280
+ if not response.ok:
281
+ raise IOError(
282
+ f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
283
+ )
284
+ else:
285
+ raise IOError(
286
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
287
+ )
288
+
289
+ # Collect the full response
290
+ full_text = ""
291
+ buffer = ""
292
+ for line in response.iter_content():
293
+ if line:
294
+ if isinstance(line, bytes):
295
+ line = line.decode("utf-8", errors="replace")
296
+ buffer += line
297
+ match = re.search(r'0:"(.*?)"', buffer)
298
+ if match:
299
+ content = match.group(1)
300
+ full_text += content
301
+ buffer = ""
302
+ # If buffer gets too long, reset it to avoid memory issues
303
+ elif len(buffer) > 1024:
304
+ buffer = ""
305
+
306
+ # Format the text (replace escaped newlines)
307
+ full_text = self._client.format_text(full_text)
308
+
309
+ # Estimate token counts
310
+ prompt_tokens = 0
311
+ for msg in payload.get("messages", []):
312
+ prompt_tokens += count_tokens(msg.get("content", ""))
313
+
314
+ completion_tokens = count_tokens(full_text)
315
+ total_tokens = prompt_tokens + completion_tokens
316
+
317
+ # Create the message object
318
+ message = ChatCompletionMessage(
319
+ role="assistant",
320
+ content=full_text
321
+ )
322
+
323
+ # Create the choice object
324
+ choice = Choice(
325
+ index=0,
326
+ message=message,
327
+ finish_reason="stop"
328
+ )
329
+
330
+ # Create the usage object
331
+ usage = CompletionUsage(
332
+ prompt_tokens=prompt_tokens,
333
+ completion_tokens=completion_tokens,
334
+ total_tokens=total_tokens
335
+ )
336
+
337
+ # Create the completion object
338
+ completion = ChatCompletion(
339
+ id=request_id,
340
+ choices=[choice],
341
+ created=created_time,
342
+ model=model,
343
+ usage=usage,
344
+ )
345
+
346
+ return completion
347
+
348
+ except Exception as e:
349
+ print(f"Error during ChatGPTClone non-stream request: {e}")
350
+ raise IOError(f"ChatGPTClone request failed: {e}") from e
351
+ finally:
352
+ self._client.session.proxies = original_proxies
353
+
354
+ class Chat(BaseChat):
355
+ def __init__(self, client: 'ChatGPTClone'):
356
+ self.completions = Completions(client)
357
+
358
+ class ChatGPTClone(OpenAICompatibleProvider):
359
+ """
360
+ OpenAI-compatible client for ChatGPT Clone API.
361
+
362
+ Usage:
363
+ client = ChatGPTClone()
364
+ response = client.chat.completions.create(
365
+ model="gpt-4",
366
+ messages=[{"role": "user", "content": "Hello!"}]
367
+ )
368
+ """
369
+
370
+ url = "https://chatgpt-clone-ten-nu.vercel.app"
371
+ AVAILABLE_MODELS = ["gpt-4", "gpt-3.5-turbo"]
372
+
373
+ def __init__(
374
+ self,
375
+ browser: str = "chrome",
376
+ impersonate: str = "chrome120"
377
+ ):
378
+ """
379
+ Initialize the ChatGPTClone client.
380
+
381
+ Args:
382
+ browser: Browser to emulate in user agent (for LitAgent fallback)
383
+ impersonate: Browser impersonation for curl_cffi (default: chrome120)
384
+ """
385
+ self.timeout = 30
386
+ self.temperature = 0.6 # Default temperature
387
+ self.top_p = 0.7 # Default top_p
388
+
389
+ # Use curl_cffi for Cloudflare bypass and browser impersonation
390
+ self.session = Session(impersonate=impersonate)
391
+ self.session.proxies = {}
392
+
393
+ # Use LitAgent for fingerprint if available, else fallback
394
+ agent = LitAgent()
395
+ self.fingerprint = agent.generate_fingerprint(browser)
396
+
397
+ # Use the fingerprint for headers
398
+ self.headers = {
399
+ "Accept": self.fingerprint["accept"],
400
+ "Accept-Encoding": "gzip, deflate, br, zstd",
401
+ "Accept-Language": self.fingerprint["accept_language"],
402
+ "Content-Type": "application/json",
403
+ "DNT": "1",
404
+ "Origin": self.url,
405
+ "Referer": f"{self.url}/",
406
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
407
+ "Sec-CH-UA-Mobile": "?0",
408
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
409
+ "User-Agent": self.fingerprint["user_agent"],
410
+ }
411
+
412
+ # Create session cookies with unique identifiers
413
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
414
+
415
+ # Set consistent headers for the scraper session
416
+ for header, value in self.headers.items():
417
+ self.session.headers[header] = value
418
+
419
+ # Initialize the chat interface
420
+ self.chat = Chat(self)
421
+
422
+ def refresh_identity(self, browser: str = None, impersonate: str = None):
423
+ """Refreshes the browser identity fingerprint and curl_cffi session."""
424
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
425
+ impersonate = impersonate or "chrome120"
426
+ self.fingerprint = LitAgent().generate_fingerprint(browser)
427
+ self.session = Session(impersonate=impersonate)
428
+ # Update headers with new fingerprint
429
+ self.headers.update({
430
+ "Accept": self.fingerprint["accept"],
431
+ "Accept-Language": self.fingerprint["accept_language"],
432
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
433
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
434
+ "User-Agent": self.fingerprint["user_agent"],
435
+ })
436
+
437
+ # Update session headers
438
+ for header, value in self.headers.items():
439
+ self.session.headers[header] = value
440
+
441
+ # Generate new cookies
442
+ self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
443
+
444
+ return self.fingerprint
445
+
446
+ def format_text(self, text: str) -> str:
447
+ """
448
+ Format text by replacing escaped newlines with actual newlines.
449
+
450
+ Args:
451
+ text: Text to format
452
+
453
+ Returns:
454
+ Formatted text
455
+ """
456
+ # Use a more comprehensive approach to handle all escape sequences
457
+ try:
458
+ # First handle double backslashes to avoid issues
459
+ text = text.replace('\\\\', '\\')
460
+
461
+ # Handle common escape sequences
462
+ text = text.replace('\\n', '\n')
463
+ text = text.replace('\\r', '\r')
464
+ text = text.replace('\\t', '\t')
465
+ text = text.replace('\\"', '"')
466
+ text = text.replace("\\'\'", "'")
467
+
468
+ # Handle any remaining escape sequences using JSON decoding
469
+ # This is a fallback in case there are other escape sequences
470
+ try:
471
+ # Add quotes to make it a valid JSON string
472
+ json_str = f'"{text}"'
473
+ # Use json module to decode all escape sequences
474
+ decoded = json.loads(json_str)
475
+ return decoded
476
+ except json.JSONDecodeError:
477
+ # If JSON decoding fails, return the text with the replacements we've already done
478
+ return text
479
+ except Exception as e:
480
+ # If any error occurs, return the original text
481
+ print(f"Warning: Error formatting text: {e}")
482
+ return text
483
+
484
+ def convert_model_name(self, model: str) -> str:
485
+ """
486
+ Convert model names to ones supported by ChatGPTClone.
487
+
488
+ Args:
489
+ model: Model name to convert
490
+
491
+ Returns:
492
+ ChatGPTClone model name
493
+ """
494
+ # If the model is already a valid ChatGPTClone model, return it
495
+ if model in self.AVAILABLE_MODELS:
496
+ return model
497
+
498
+ # Map similar models to supported ones
499
+ if model.startswith("gpt-4"):
500
+ return "gpt-4"
501
+ elif model.startswith("gpt-3.5"):
502
+ return "gpt-3.5-turbo"
503
+
504
+ # Default to the most capable model
505
+ print(f"Warning: Unknown model '{model}'. Using 'gpt-4' instead.")
506
+ return "gpt-4"
507
+
508
+ @property
509
+ def models(self):
510
+ class _ModelList:
511
+ def list(inner_self):
512
+ return type(self).AVAILABLE_MODELS
488
513
  return _ModelList()