webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (122) hide show
  1. webscout/AIutel.py +226 -14
  2. webscout/Bard.py +579 -206
  3. webscout/DWEBS.py +78 -35
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Extra/tempmail/base.py +1 -1
  6. webscout/Provider/AISEARCH/hika_search.py +4 -0
  7. webscout/Provider/AISEARCH/scira_search.py +2 -5
  8. webscout/Provider/Aitopia.py +75 -51
  9. webscout/Provider/AllenAI.py +181 -147
  10. webscout/Provider/ChatGPTClone.py +97 -86
  11. webscout/Provider/ChatSandbox.py +342 -0
  12. webscout/Provider/Cloudflare.py +79 -32
  13. webscout/Provider/Deepinfra.py +135 -94
  14. webscout/Provider/ElectronHub.py +103 -39
  15. webscout/Provider/ExaChat.py +36 -20
  16. webscout/Provider/GPTWeb.py +103 -47
  17. webscout/Provider/GithubChat.py +52 -49
  18. webscout/Provider/GizAI.py +283 -0
  19. webscout/Provider/Glider.py +39 -28
  20. webscout/Provider/Groq.py +222 -91
  21. webscout/Provider/HeckAI.py +93 -69
  22. webscout/Provider/HuggingFaceChat.py +113 -106
  23. webscout/Provider/Hunyuan.py +94 -83
  24. webscout/Provider/Jadve.py +104 -79
  25. webscout/Provider/LambdaChat.py +142 -123
  26. webscout/Provider/Llama3.py +94 -39
  27. webscout/Provider/MCPCore.py +315 -0
  28. webscout/Provider/Marcus.py +95 -37
  29. webscout/Provider/Netwrck.py +94 -52
  30. webscout/Provider/OPENAI/__init__.py +4 -1
  31. webscout/Provider/OPENAI/ai4chat.py +286 -0
  32. webscout/Provider/OPENAI/chatgptclone.py +35 -14
  33. webscout/Provider/OPENAI/deepinfra.py +37 -0
  34. webscout/Provider/OPENAI/exachat.py +4 -0
  35. webscout/Provider/OPENAI/groq.py +354 -0
  36. webscout/Provider/OPENAI/heckai.py +6 -2
  37. webscout/Provider/OPENAI/mcpcore.py +376 -0
  38. webscout/Provider/OPENAI/multichat.py +368 -0
  39. webscout/Provider/OPENAI/netwrck.py +3 -1
  40. webscout/Provider/OPENAI/scirachat.py +2 -4
  41. webscout/Provider/OPENAI/textpollinations.py +20 -22
  42. webscout/Provider/OPENAI/toolbaz.py +1 -0
  43. webscout/Provider/OpenGPT.py +48 -38
  44. webscout/Provider/PI.py +178 -93
  45. webscout/Provider/PizzaGPT.py +66 -36
  46. webscout/Provider/StandardInput.py +42 -30
  47. webscout/Provider/TeachAnything.py +95 -52
  48. webscout/Provider/TextPollinationsAI.py +138 -78
  49. webscout/Provider/TwoAI.py +162 -81
  50. webscout/Provider/TypliAI.py +305 -0
  51. webscout/Provider/Venice.py +97 -58
  52. webscout/Provider/VercelAI.py +33 -14
  53. webscout/Provider/WiseCat.py +65 -28
  54. webscout/Provider/Writecream.py +37 -11
  55. webscout/Provider/WritingMate.py +135 -63
  56. webscout/Provider/__init__.py +9 -27
  57. webscout/Provider/ai4chat.py +6 -7
  58. webscout/Provider/asksteve.py +53 -44
  59. webscout/Provider/cerebras.py +77 -31
  60. webscout/Provider/chatglm.py +47 -37
  61. webscout/Provider/copilot.py +0 -3
  62. webscout/Provider/elmo.py +109 -60
  63. webscout/Provider/granite.py +102 -54
  64. webscout/Provider/hermes.py +95 -48
  65. webscout/Provider/koala.py +1 -1
  66. webscout/Provider/learnfastai.py +113 -54
  67. webscout/Provider/llama3mitril.py +86 -51
  68. webscout/Provider/llmchat.py +88 -46
  69. webscout/Provider/llmchatco.py +110 -115
  70. webscout/Provider/meta.py +41 -37
  71. webscout/Provider/multichat.py +67 -28
  72. webscout/Provider/scira_chat.py +49 -30
  73. webscout/Provider/scnet.py +106 -53
  74. webscout/Provider/searchchat.py +87 -88
  75. webscout/Provider/sonus.py +113 -63
  76. webscout/Provider/toolbaz.py +115 -82
  77. webscout/Provider/turboseek.py +90 -43
  78. webscout/Provider/tutorai.py +82 -64
  79. webscout/Provider/typefully.py +85 -35
  80. webscout/Provider/typegpt.py +118 -61
  81. webscout/Provider/uncovr.py +132 -76
  82. webscout/Provider/x0gpt.py +69 -26
  83. webscout/Provider/yep.py +79 -66
  84. webscout/cli.py +256 -0
  85. webscout/conversation.py +34 -22
  86. webscout/exceptions.py +23 -0
  87. webscout/prompt_manager.py +56 -42
  88. webscout/version.py +1 -1
  89. webscout/webscout_search.py +65 -47
  90. webscout/webscout_search_async.py +81 -126
  91. webscout/yep_search.py +93 -43
  92. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
  93. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
  94. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
  95. webscout-8.2.5.dist-info/entry_points.txt +3 -0
  96. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
  97. inferno/__init__.py +0 -6
  98. inferno/__main__.py +0 -9
  99. inferno/cli.py +0 -6
  100. webscout/Local/__init__.py +0 -12
  101. webscout/Local/__main__.py +0 -9
  102. webscout/Local/api.py +0 -576
  103. webscout/Local/cli.py +0 -516
  104. webscout/Local/config.py +0 -75
  105. webscout/Local/llm.py +0 -287
  106. webscout/Local/model_manager.py +0 -253
  107. webscout/Local/server.py +0 -721
  108. webscout/Local/utils.py +0 -93
  109. webscout/Provider/C4ai.py +0 -432
  110. webscout/Provider/ChatGPTES.py +0 -237
  111. webscout/Provider/Chatify.py +0 -175
  112. webscout/Provider/DeepSeek.py +0 -196
  113. webscout/Provider/Llama.py +0 -200
  114. webscout/Provider/Phind.py +0 -535
  115. webscout/Provider/WebSim.py +0 -228
  116. webscout/Provider/askmyai.py +0 -158
  117. webscout/Provider/gaurish.py +0 -244
  118. webscout/Provider/labyrinth.py +0 -340
  119. webscout/Provider/lepton.py +0 -194
  120. webscout/Provider/llamatutor.py +0 -192
  121. webscout-8.2.3.dist-info/entry_points.txt +0 -5
  122. {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
@@ -0,0 +1,368 @@
1
+ import time
2
+ import uuid
3
+ import json
4
+ from datetime import datetime
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ # Import base classes and utility structures
8
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from .utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage,
12
+ format_prompt
13
+ )
14
+
15
+ # Import curl_cffi for Cloudflare bypass
16
+ from curl_cffi.requests import Session
17
+ from curl_cffi import CurlError
18
+
19
+ # Import LitAgent for user agent generation
20
+ from webscout.litagent import LitAgent
21
+
22
+ # ANSI escape codes for formatting
23
+ BOLD = "\033[1m"
24
+ RED = "\033[91m"
25
+ RESET = "\033[0m"
26
+
27
+ # Model configurations
28
+ MODEL_CONFIGS = {
29
+ "llama": {
30
+ "endpoint": "https://www.multichatai.com/api/chat/meta",
31
+ "models": {
32
+ "llama-3.3-70b-versatile": {"contextLength": 131072},
33
+ "llama-3.2-11b-vision-preview": {"contextLength": 32768},
34
+ "deepseek-r1-distill-llama-70b": {"contextLength": 128000},
35
+ },
36
+ },
37
+ "cohere": {
38
+ "endpoint": "https://www.multichatai.com/api/chat/cohere",
39
+ "models": {
40
+ "command-r": {"contextLength": 128000},
41
+ "command": {"contextLength": 4096},
42
+ },
43
+ },
44
+ "google": {
45
+ "endpoint": "https://www.multichatai.com/api/chat/google",
46
+ "models": {
47
+ "gemini-1.5-flash-002": {"contextLength": 1048576},
48
+ "gemma2-9b-it": {"contextLength": 8192},
49
+ "gemini-2.0-flash": {"contextLength": 128000},
50
+ },
51
+ "message_format": "parts",
52
+ },
53
+ "deepinfra": {
54
+ "endpoint": "https://www.multichatai.com/api/chat/deepinfra",
55
+ "models": {
56
+ "Sao10K/L3.1-70B-Euryale-v2.2": {"contextLength": 8192},
57
+ "Gryphe/MythoMax-L2-13b": {"contextLength": 8192},
58
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct": {"contextLength": 131072},
59
+ "deepseek-ai/DeepSeek-V3": {"contextLength": 32000},
60
+ "meta-llama/Meta-Llama-3.1-405B-Instruct": {"contextLength": 131072},
61
+ "NousResearch/Hermes-3-Llama-3.1-405B": {"contextLength": 131072},
62
+ "gemma-2-27b-it": {"contextLength": 8192},
63
+ },
64
+ },
65
+ "mistral": {
66
+ "endpoint": "https://www.multichatai.com/api/chat/mistral",
67
+ "models": {
68
+ "mistral-small-latest": {"contextLength": 32000},
69
+ "codestral-latest": {"contextLength": 32000},
70
+ "open-mistral-7b": {"contextLength": 8000},
71
+ "open-mixtral-8x7b": {"contextLength": 8000},
72
+ },
73
+ },
74
+ "alibaba": {
75
+ "endpoint": "https://www.multichatai.com/api/chat/alibaba",
76
+ "models": {
77
+ "Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
78
+ "Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
79
+ "Qwen/QwQ-32B-Preview": {"contextLength": 32768},
80
+ },
81
+ },
82
+ }
83
+
84
+ class Completions(BaseCompletions):
85
+ def __init__(self, client: 'MultiChatAI'):
86
+ self._client = client
87
+
88
+ def create(
89
+ self,
90
+ *,
91
+ model: str,
92
+ messages: List[Dict[str, str]],
93
+ max_tokens: Optional[int] = None,
94
+ stream: bool = False,
95
+ temperature: Optional[float] = None,
96
+ top_p: Optional[float] = None,
97
+ **kwargs: Any
98
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
99
+ """
100
+ Create a chat completion using the MultiChatAI API.
101
+
102
+ Args:
103
+ model: The model to use
104
+ messages: A list of messages in the conversation
105
+ max_tokens: Maximum number of tokens to generate
106
+ stream: Whether to stream the response
107
+ temperature: Temperature for response generation
108
+ top_p: Top-p sampling parameter
109
+
110
+ Returns:
111
+ Either a ChatCompletion object or a generator of ChatCompletionChunk objects
112
+ """
113
+ try:
114
+ # Set client parameters based on function arguments
115
+ self._client.model = model
116
+ if temperature is not None:
117
+ self._client.temperature = temperature
118
+ if max_tokens is not None:
119
+ self._client.max_tokens_to_sample = max_tokens
120
+
121
+ # Extract system messages and set as system prompt
122
+ for message in messages:
123
+ if message.get("role") == "system":
124
+ self._client.system_prompt = message.get("content", "")
125
+ break
126
+
127
+ # Format all messages into a single prompt
128
+ user_message = format_prompt(messages)
129
+
130
+ # Generate a unique request ID
131
+ request_id = f"multichat-{str(uuid.uuid4())}"
132
+ created_time = int(time.time())
133
+
134
+ # Make the API request
135
+ response_text = self._client._make_api_request(user_message)
136
+
137
+ # If streaming is requested, simulate streaming with the full response
138
+ if stream:
139
+ def generate_chunks():
140
+ # Create a single chunk with the full response
141
+ delta = ChoiceDelta(content=response_text)
142
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
143
+ chunk = ChatCompletionChunk(
144
+ id=request_id,
145
+ choices=[choice],
146
+ created=created_time,
147
+ model=model,
148
+ )
149
+ yield chunk
150
+
151
+ return generate_chunks()
152
+
153
+ # For non-streaming, create a complete response
154
+ message = ChatCompletionMessage(role="assistant", content=response_text)
155
+ choice = Choice(index=0, message=message, finish_reason="stop")
156
+
157
+ # Estimate token usage (this is approximate)
158
+ prompt_tokens = len(user_message) // 4 # Rough estimate
159
+ completion_tokens = len(response_text) // 4 # Rough estimate
160
+ total_tokens = prompt_tokens + completion_tokens
161
+
162
+ usage = CompletionUsage(
163
+ prompt_tokens=prompt_tokens,
164
+ completion_tokens=completion_tokens,
165
+ total_tokens=total_tokens
166
+ )
167
+
168
+ # Create the completion object
169
+ completion = ChatCompletion(
170
+ id=request_id,
171
+ choices=[choice],
172
+ created=created_time,
173
+ model=model,
174
+ usage=usage,
175
+ )
176
+
177
+ return completion
178
+
179
+ except Exception as e:
180
+ print(f"{RED}Error during MultiChatAI request: {e}{RESET}")
181
+ raise IOError(f"MultiChatAI request failed: {e}") from e
182
+
183
+ class Chat(BaseChat):
184
+ def __init__(self, client: 'MultiChatAI'):
185
+ self.completions = Completions(client)
186
+
187
+ class MultiChatAI(OpenAICompatibleProvider):
188
+ """
189
+ OpenAI-compatible client for MultiChatAI API.
190
+
191
+ Usage:
192
+ client = MultiChatAI()
193
+ response = client.chat.completions.create(
194
+ model="llama-3.3-70b-versatile",
195
+ messages=[{"role": "user", "content": "Hello!"}]
196
+ )
197
+ print(response.choices[0].message.content)
198
+ """
199
+
200
+ AVAILABLE_MODELS = [
201
+ # Llama Models
202
+ "llama-3.3-70b-versatile",
203
+ "llama-3.2-11b-vision-preview",
204
+ "deepseek-r1-distill-llama-70b",
205
+
206
+ # Google Models
207
+ "gemma2-9b-it",
208
+ "gemini-2.0-flash",
209
+
210
+ # DeepInfra Models
211
+ "Sao10K/L3.1-70B-Euryale-v2.2",
212
+ "Gryphe/MythoMax-L2-13b",
213
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct",
214
+ "deepseek-ai/DeepSeek-V3",
215
+ "meta-llama/Meta-Llama-3.1-405B-Instruct",
216
+ "NousResearch/Hermes-3-Llama-3.1-405B",
217
+
218
+ # Alibaba Models
219
+ "Qwen/Qwen2.5-72B-Instruct",
220
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
221
+ "Qwen/QwQ-32B-Preview"
222
+ ]
223
+
224
+ def __init__(
225
+ self,
226
+ timeout: int = 30,
227
+ proxies: dict = {},
228
+ model: str = "llama-3.3-70b-versatile",
229
+ system_prompt: str = "You are a friendly, helpful AI assistant.",
230
+ temperature: float = 0.5,
231
+ max_tokens: int = 4000
232
+ ):
233
+ """
234
+ Initialize the MultiChatAI client.
235
+
236
+ Args:
237
+ timeout: Request timeout in seconds
238
+ proxies: Optional proxy configuration
239
+ model: Default model to use
240
+ system_prompt: System prompt to use
241
+ temperature: Temperature for response generation
242
+ max_tokens: Maximum number of tokens to generate
243
+ """
244
+ if model not in self.AVAILABLE_MODELS:
245
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
246
+
247
+ # Initialize curl_cffi Session
248
+ self.session = Session()
249
+ self.timeout = timeout
250
+ self.model = model
251
+ self.system_prompt = system_prompt
252
+ self.temperature = temperature
253
+ self.max_tokens_to_sample = max_tokens
254
+
255
+ # Initialize LitAgent for user agent generation
256
+ self.agent = LitAgent()
257
+
258
+ self.headers = {
259
+ "accept": "*/*",
260
+ "accept-language": "en-US,en;q=0.9",
261
+ "content-type": "text/plain;charset=UTF-8",
262
+ "origin": "https://www.multichatai.com",
263
+ "referer": "https://www.multichatai.com/",
264
+ "user-agent": self.agent.random(),
265
+ }
266
+
267
+ # Update curl_cffi session headers, proxies, and cookies
268
+ self.session.headers.update(self.headers)
269
+ self.session.proxies = proxies
270
+ self.session.cookies.set("session", uuid.uuid4().hex)
271
+
272
+ # Initialize the provider based on the model
273
+ self.provider = self._get_provider_from_model(self.model)
274
+ self.model_name = self.model
275
+
276
+ # Initialize the chat interface
277
+ self.chat = Chat(self)
278
+
279
+ def _get_endpoint(self) -> str:
280
+ """Get the API endpoint for the current provider."""
281
+ return MODEL_CONFIGS[self.provider]["endpoint"]
282
+
283
+ def _get_chat_settings(self) -> Dict[str, Any]:
284
+ """Get chat settings for the current model."""
285
+ base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
286
+ return {
287
+ "model": self.model,
288
+ "prompt": self.system_prompt,
289
+ "temperature": self.temperature,
290
+ "contextLength": base_settings["contextLength"],
291
+ "includeProfileContext": True,
292
+ "includeWorkspaceInstructions": True,
293
+ "embeddingsProvider": "openai"
294
+ }
295
+
296
+ def _get_system_message(self) -> str:
297
+ """Generate system message with current date."""
298
+ current_date = datetime.now().strftime("%d/%m/%Y")
299
+ return f"Today is {current_date}.\n\nUser Instructions:\n{self.system_prompt}"
300
+
301
+ def _build_messages(self, conversation_prompt: str) -> list:
302
+ """Build messages array based on provider type."""
303
+ if self.provider == "google":
304
+ return [
305
+ {"role": "user", "parts": self._get_system_message()},
306
+ {"role": "model", "parts": "I will follow your instructions."},
307
+ {"role": "user", "parts": conversation_prompt}
308
+ ]
309
+ else:
310
+ return [
311
+ {"role": "system", "content": self._get_system_message()},
312
+ {"role": "user", "content": conversation_prompt}
313
+ ]
314
+
315
+ def _get_provider_from_model(self, model: str) -> str:
316
+ """Determine the provider based on the model name."""
317
+ for provider, config in MODEL_CONFIGS.items():
318
+ if model in config["models"]:
319
+ return provider
320
+
321
+ available_models = []
322
+ for provider, config in MODEL_CONFIGS.items():
323
+ for model_name in config["models"].keys():
324
+ available_models.append(f"{provider}/{model_name}")
325
+
326
+ error_msg = f"Invalid model: {model}\nAvailable models: {', '.join(available_models)}"
327
+ raise ValueError(error_msg)
328
+
329
+ def _make_api_request(self, prompt: str) -> str:
330
+ """Make the API request with proper error handling."""
331
+ try:
332
+ payload = {
333
+ "chatSettings": self._get_chat_settings(),
334
+ "messages": self._build_messages(prompt),
335
+ "customModelId": "",
336
+ }
337
+
338
+ # Use curl_cffi session post with impersonate
339
+ response = self.session.post(
340
+ self._get_endpoint(),
341
+ json=payload,
342
+ timeout=self.timeout,
343
+ impersonate="chrome110"
344
+ )
345
+ response.raise_for_status()
346
+
347
+ # Return the response text
348
+ return response.text.strip()
349
+
350
+ except CurlError as e:
351
+ raise IOError(f"API request failed (CurlError): {e}") from e
352
+ except Exception as e:
353
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
354
+ raise IOError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
355
+
356
+ if __name__ == "__main__":
357
+ print(f"{BOLD}Testing MultiChatAI OpenAI-compatible provider{RESET}")
358
+
359
+ client = MultiChatAI()
360
+ response = client.chat.completions.create(
361
+ model="llama-3.3-70b-versatile",
362
+ messages=[
363
+ {"role": "system", "content": "You are a helpful assistant."},
364
+ {"role": "user", "content": "Say 'Hello' in one word"}
365
+ ]
366
+ )
367
+
368
+ print(f"Response: {response.choices[0].message.content}")
@@ -203,12 +203,14 @@ class Netwrck(OpenAICompatibleProvider):
203
203
  "x-ai/grok-2",
204
204
  "anthropic/claude-3-7-sonnet-20250219",
205
205
  "sao10k/l3-euryale-70b",
206
- "openai/gpt-4o-mini",
206
+ "openai/gpt-4.1-mini",
207
207
  "gryphe/mythomax-l2-13b",
208
208
  "google/gemini-pro-1.5",
209
+ "google/gemini-2.5-flash-preview-04-17",
209
210
  "nvidia/llama-3.1-nemotron-70b-instruct",
210
211
  "deepseek/deepseek-r1",
211
212
  "deepseek/deepseek-chat"
213
+
212
214
  ]
213
215
 
214
216
  # Default greeting used by Netwrck
@@ -324,15 +324,13 @@ class SciraChat(OpenAICompatibleProvider):
324
324
  """
325
325
 
326
326
  AVAILABLE_MODELS = {
327
- "scira-default": "Grok3",
328
- "scira-grok-3-mini": "Grok3-mini", # thinking model
327
+ "scira-default": "Grok3-mini", # thinking model
328
+ "scira-grok-3": "Grok3",
329
329
  "scira-vision" : "Grok2-Vision", # vision model
330
330
  "scira-4.1-mini": "GPT4.1-mini",
331
331
  "scira-qwq": "QWQ-32B",
332
332
  "scira-o4-mini": "o4-mini",
333
333
  "scira-google": "gemini 2.5 flash"
334
-
335
-
336
334
  }
337
335
 
338
336
  def __init__(
@@ -268,28 +268,26 @@ class TextPollinations(OpenAICompatibleProvider):
268
268
  """
269
269
 
270
270
  AVAILABLE_MODELS = [
271
- "openai", # OpenAI GPT-4.1-nano (Azure) - vision capable
272
- "openai-large", # OpenAI GPT-4.1 mini (Azure) - vision capable
273
- "openai-reasoning", # OpenAI o4-mini (Azure) - vision capable, reasoning
274
- "qwen-coder", # Qwen 2.5 Coder 32B (Scaleway)
275
- "llama", # Llama 3.3 70B (Cloudflare)
276
- "llamascout", # Llama 4 Scout 17B (Cloudflare)
277
- "mistral", # Mistral Small 3 (Scaleway) - vision capable
278
- "unity", # Unity Mistral Large (Scaleway) - vision capable, uncensored
279
- "midijourney", # Midijourney (Azure)
280
- "rtist", # Rtist (Azure)
281
- "searchgpt", # SearchGPT (Azure) - vision capable
282
- "evil", # Evil (Scaleway) - vision capable, uncensored
283
- "deepseek-reasoning", # DeepSeek-R1 Distill Qwen 32B (Cloudflare) - reasoning
284
- "deepseek-reasoning-large", # DeepSeek R1 - Llama 70B (Scaleway) - reasoning
285
- "phi", # Phi-4 Instruct (Cloudflare) - vision and audio capable
286
- "llama-vision", # Llama 3.2 11B Vision (Cloudflare) - vision capable
287
- "gemini", # gemini-2.5-flash-preview-04-17 (Azure) - vision and audio capable
288
- "hormoz", # Hormoz 8b (Modal)
289
- "hypnosis-tracy", # Hypnosis Tracy 7B (Azure) - audio capable
290
- "deepseek", # DeepSeek-V3 (DeepSeek)
291
- "sur", # Sur AI Assistant (Mistral) (Scaleway) - vision capable
292
- "openai-audio", # OpenAI GPT-4o-audio-preview (Azure) - vision and audio capable
271
+ "openai",
272
+ "openai-large",
273
+ "qwen-coder",
274
+ "llama",
275
+ "llamascout",
276
+ "mistral",
277
+ "unity",
278
+ "midijourney",
279
+ "rtist",
280
+ "searchgpt",
281
+ "evil",
282
+ "deepseek-reasoning",
283
+ "deepseek-reasoning-large",
284
+ "phi",
285
+ "llama-vision",
286
+ "hormoz",
287
+ "hypnosis-tracy",
288
+ "deepseek",
289
+ "sur",
290
+ "openai-audio",
293
291
  ]
294
292
 
295
293
  def __init__(
@@ -288,6 +288,7 @@ class Toolbaz(OpenAICompatibleProvider):
288
288
  "gemini-2.0-flash-thinking",
289
289
  "gemini-2.0-flash",
290
290
  "gemini-1.5-flash",
291
+ "o3-mini",
291
292
  "gpt-4o-latest",
292
293
  "gpt-4o",
293
294
  "deepseek-r1",
@@ -1,4 +1,5 @@
1
- import requests
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
2
3
  import json
3
4
  from typing import Dict, Generator, Union
4
5
 
@@ -9,6 +10,7 @@ from webscout.AIbase import Provider
9
10
  from webscout import exceptions
10
11
  from webscout.litagent import LitAgent
11
12
 
13
+
12
14
  class OpenGPT(Provider):
13
15
  """
14
16
  A class to interact with the Open-GPT API.
@@ -17,7 +19,7 @@ class OpenGPT(Provider):
17
19
  def __init__(
18
20
  self,
19
21
  is_conversation: bool = True,
20
- max_tokens: int = 600,
22
+ max_tokens: int = 600, # Note: max_tokens is not used by this API
21
23
  timeout: int = 30,
22
24
  intro: str = None,
23
25
  filepath: str = None,
@@ -41,8 +43,9 @@ class OpenGPT(Provider):
41
43
  act (str, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
42
44
  app_id (str, optional): The OpenGPT application ID. Defaults to "clf3yg8730000ih08ndbdi2v4".
43
45
  """
44
- self.session = requests.Session()
45
- self.agent = LitAgent()
46
+ # Initialize curl_cffi Session
47
+ self.session = Session()
48
+ self.agent = LitAgent() # Keep for potential future use or other headers
46
49
 
47
50
  self.is_conversation = is_conversation
48
51
  self.max_tokens_to_sample = max_tokens
@@ -50,15 +53,17 @@ class OpenGPT(Provider):
50
53
  self.last_response = {}
51
54
  self.app_id = app_id
52
55
 
53
- # Set up headers with dynamic user agent
56
+ # Set up headers (remove User-Agent if using impersonate)
54
57
  self.headers = {
55
58
  "Content-Type": "application/json",
56
- "User-Agent": self.agent.random(),
57
- "Referer": f"https://open-gpt.app/id/app/{app_id}"
59
+ # "User-Agent": self.agent.random(), # Removed, handled by impersonate
60
+ "Referer": f"https://open-gpt.app/id/app/{self.app_id}",
61
+ # Add sec-ch-ua headers if needed for impersonation consistency
58
62
  }
59
63
 
64
+ # Update curl_cffi session headers and proxies
60
65
  self.session.headers.update(self.headers)
61
- self.session.proxies.update(proxies)
66
+ self.session.proxies = proxies # Assign proxies directly
62
67
 
63
68
  # Initialize optimizers
64
69
  self.__available_optimizers = (
@@ -82,7 +87,7 @@ class OpenGPT(Provider):
82
87
  def ask(
83
88
  self,
84
89
  prompt: str,
85
- stream: bool = False,
90
+ stream: bool = False, # Note: API does not support streaming
86
91
  raw: bool = False,
87
92
  optimizer: str = None,
88
93
  conversationally: bool = False,
@@ -117,31 +122,34 @@ class OpenGPT(Provider):
117
122
  "userKey": "" # Assuming userKey is meant to be empty as in the original code
118
123
  }
119
124
 
125
+ # API does not stream, implement non-stream logic directly
120
126
  def for_non_stream():
121
127
  try:
128
+ # Use curl_cffi session post with impersonate
122
129
  response = self.session.post(
123
130
  "https://open-gpt.app/api/generate",
124
- data=json.dumps(payload),
125
- timeout=self.timeout
131
+ # headers are set on the session
132
+ data=json.dumps(payload), # Keep data as JSON string
133
+ timeout=self.timeout,
134
+ # proxies are set on the session
135
+ impersonate="chrome110" # Use a common impersonation profile
126
136
  )
127
137
 
128
- # Raise an exception for bad status codes
129
- response.raise_for_status()
138
+ response.raise_for_status() # Check for HTTP errors
130
139
 
140
+ # Use response.text which is already decoded
131
141
  response_text = response.text
132
142
  self.last_response = {"text": response_text}
133
143
  self.conversation.update_chat_history(prompt, response_text)
134
144
 
135
- return {"text": response_text} if not raw else {"raw": response_text}
145
+ # Return dict or raw string based on raw flag
146
+ return {"raw": response_text} if raw else {"text": response_text}
136
147
 
137
- except requests.exceptions.RequestException as e:
138
- # Handle potential errors during the request
139
- error_msg = f"Error fetching data: {e}"
140
- raise exceptions.FailedToGenerateResponseError(error_msg)
141
- except Exception as e:
142
- # Catch any other unexpected errors
143
- error_msg = f"An unexpected error occurred: {e}"
144
- raise exceptions.FailedToGenerateResponseError(error_msg)
148
+ except CurlError as e: # Catch CurlError
149
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
150
+ except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
151
+ err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
152
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
145
153
 
146
154
  # This provider doesn't support streaming, so just return non-stream
147
155
  return for_non_stream()
@@ -149,7 +157,7 @@ class OpenGPT(Provider):
149
157
  def chat(
150
158
  self,
151
159
  prompt: str,
152
- stream: bool = False,
160
+ stream: bool = False, # Keep stream param for interface consistency
153
161
  optimizer: str = None,
154
162
  conversationally: bool = False,
155
163
  ) -> Union[str, Generator[str, None, None]]:
@@ -165,10 +173,22 @@ class OpenGPT(Provider):
165
173
  Returns:
166
174
  A string with the response text.
167
175
  """
168
- response = self.ask(
169
- prompt, False, optimizer=optimizer, conversationally=conversationally
176
+ # Since ask() now handles both stream=True/False by returning the full response dict:
177
+ response_data = self.ask(
178
+ prompt,
179
+ stream=False, # Call ask in non-stream mode internally
180
+ raw=False, # Ensure ask returns dict with 'text' key
181
+ optimizer=optimizer,
182
+ conversationally=conversationally
170
183
  )
171
- return self.get_message(response)
184
+ # If stream=True was requested, simulate streaming by yielding the full message at once
185
+ if stream:
186
+ def stream_wrapper():
187
+ yield self.get_message(response_data)
188
+ return stream_wrapper()
189
+ else:
190
+ # If stream=False, return the full message directly
191
+ return self.get_message(response_data)
172
192
 
173
193
  def get_message(self, response: dict) -> str:
174
194
  """
@@ -185,15 +205,5 @@ class OpenGPT(Provider):
185
205
 
186
206
 
187
207
  if __name__ == "__main__":
188
- # Test the provider
189
- print("-" * 80)
190
- print("Testing OpenGPT provider")
191
- print("-" * 80)
192
-
193
- try:
194
- test_ai = OpenGPT()
195
- response = test_ai.chat("Explain quantum physics simply.")
196
- print(response)
197
- except Exception as e:
198
- print(f"Error: {e}")
199
-
208
+ ai = OpenGPT()
209
+ print(ai.chat("Hello, how are you?"))