webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (159) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  5. webscout/Provider/AISEARCH/__init__.py +18 -11
  6. webscout/Provider/AISEARCH/scira_search.py +3 -1
  7. webscout/Provider/Aitopia.py +2 -3
  8. webscout/Provider/Andi.py +3 -3
  9. webscout/Provider/ChatGPTClone.py +1 -1
  10. webscout/Provider/ChatSandbox.py +1 -0
  11. webscout/Provider/Cloudflare.py +1 -1
  12. webscout/Provider/Cohere.py +1 -0
  13. webscout/Provider/Deepinfra.py +13 -10
  14. webscout/Provider/ExaAI.py +1 -1
  15. webscout/Provider/ExaChat.py +1 -80
  16. webscout/Provider/Flowith.py +6 -1
  17. webscout/Provider/Gemini.py +7 -5
  18. webscout/Provider/GeminiProxy.py +1 -0
  19. webscout/Provider/GithubChat.py +4 -1
  20. webscout/Provider/Groq.py +1 -1
  21. webscout/Provider/HeckAI.py +8 -4
  22. webscout/Provider/Jadve.py +23 -38
  23. webscout/Provider/K2Think.py +308 -0
  24. webscout/Provider/Koboldai.py +8 -186
  25. webscout/Provider/LambdaChat.py +2 -4
  26. webscout/Provider/Nemotron.py +3 -4
  27. webscout/Provider/Netwrck.py +6 -8
  28. webscout/Provider/OLLAMA.py +1 -0
  29. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  30. webscout/Provider/OPENAI/FalconH1.py +2 -7
  31. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  32. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  33. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  34. webscout/Provider/OPENAI/PI.py +5 -4
  35. webscout/Provider/OPENAI/Qwen3.py +2 -3
  36. webscout/Provider/OPENAI/README.md +2 -1
  37. webscout/Provider/OPENAI/TogetherAI.py +52 -57
  38. webscout/Provider/OPENAI/TwoAI.py +3 -4
  39. webscout/Provider/OPENAI/__init__.py +17 -56
  40. webscout/Provider/OPENAI/ai4chat.py +313 -303
  41. webscout/Provider/OPENAI/base.py +9 -29
  42. webscout/Provider/OPENAI/chatgpt.py +7 -2
  43. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  44. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  45. webscout/Provider/OPENAI/deepinfra.py +12 -6
  46. webscout/Provider/OPENAI/e2b.py +60 -8
  47. webscout/Provider/OPENAI/flowith.py +4 -3
  48. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  49. webscout/Provider/OPENAI/heckai.py +4 -1
  50. webscout/Provider/OPENAI/netwrck.py +9 -12
  51. webscout/Provider/OPENAI/refact.py +274 -0
  52. webscout/Provider/OPENAI/scirachat.py +6 -0
  53. webscout/Provider/OPENAI/textpollinations.py +3 -14
  54. webscout/Provider/OPENAI/toolbaz.py +14 -10
  55. webscout/Provider/OpenGPT.py +1 -1
  56. webscout/Provider/Openai.py +150 -402
  57. webscout/Provider/PI.py +1 -0
  58. webscout/Provider/Perplexitylabs.py +1 -2
  59. webscout/Provider/QwenLM.py +107 -89
  60. webscout/Provider/STT/__init__.py +17 -2
  61. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  62. webscout/Provider/StandardInput.py +1 -1
  63. webscout/Provider/TTI/__init__.py +18 -12
  64. webscout/Provider/TTI/bing.py +14 -2
  65. webscout/Provider/TTI/together.py +10 -9
  66. webscout/Provider/TTS/README.md +0 -1
  67. webscout/Provider/TTS/__init__.py +18 -11
  68. webscout/Provider/TTS/base.py +479 -159
  69. webscout/Provider/TTS/deepgram.py +409 -156
  70. webscout/Provider/TTS/elevenlabs.py +425 -111
  71. webscout/Provider/TTS/freetts.py +317 -140
  72. webscout/Provider/TTS/gesserit.py +192 -128
  73. webscout/Provider/TTS/murfai.py +248 -113
  74. webscout/Provider/TTS/openai_fm.py +347 -129
  75. webscout/Provider/TTS/speechma.py +620 -586
  76. webscout/Provider/TeachAnything.py +1 -0
  77. webscout/Provider/TextPollinationsAI.py +5 -15
  78. webscout/Provider/TogetherAI.py +136 -142
  79. webscout/Provider/TwoAI.py +53 -309
  80. webscout/Provider/TypliAI.py +2 -1
  81. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  82. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  83. webscout/Provider/Venice.py +2 -1
  84. webscout/Provider/VercelAI.py +1 -0
  85. webscout/Provider/WiseCat.py +2 -1
  86. webscout/Provider/WrDoChat.py +2 -1
  87. webscout/Provider/__init__.py +18 -174
  88. webscout/Provider/ai4chat.py +1 -1
  89. webscout/Provider/akashgpt.py +7 -10
  90. webscout/Provider/cerebras.py +194 -38
  91. webscout/Provider/chatglm.py +170 -83
  92. webscout/Provider/cleeai.py +1 -2
  93. webscout/Provider/deepseek_assistant.py +1 -1
  94. webscout/Provider/elmo.py +1 -1
  95. webscout/Provider/geminiapi.py +1 -1
  96. webscout/Provider/granite.py +1 -1
  97. webscout/Provider/hermes.py +1 -3
  98. webscout/Provider/julius.py +1 -0
  99. webscout/Provider/learnfastai.py +1 -1
  100. webscout/Provider/llama3mitril.py +1 -1
  101. webscout/Provider/llmchat.py +1 -1
  102. webscout/Provider/llmchatco.py +1 -1
  103. webscout/Provider/meta.py +3 -3
  104. webscout/Provider/oivscode.py +2 -2
  105. webscout/Provider/scira_chat.py +51 -124
  106. webscout/Provider/searchchat.py +1 -0
  107. webscout/Provider/sonus.py +1 -1
  108. webscout/Provider/toolbaz.py +15 -11
  109. webscout/Provider/turboseek.py +31 -22
  110. webscout/Provider/typefully.py +2 -1
  111. webscout/Provider/x0gpt.py +1 -0
  112. webscout/Provider/yep.py +2 -1
  113. webscout/conversation.py +22 -20
  114. webscout/sanitize.py +14 -10
  115. webscout/scout/README.md +20 -23
  116. webscout/scout/core/crawler.py +125 -38
  117. webscout/scout/core/scout.py +26 -5
  118. webscout/tempid.py +6 -0
  119. webscout/version.py +1 -1
  120. webscout/webscout_search.py +13 -6
  121. webscout/webscout_search_async.py +10 -8
  122. webscout/yep_search.py +13 -5
  123. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
  124. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
  125. webscout/Provider/AllenAI.py +0 -440
  126. webscout/Provider/Blackboxai.py +0 -793
  127. webscout/Provider/FreeGemini.py +0 -250
  128. webscout/Provider/Glider.py +0 -225
  129. webscout/Provider/Hunyuan.py +0 -283
  130. webscout/Provider/MCPCore.py +0 -322
  131. webscout/Provider/MiniMax.py +0 -207
  132. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  133. webscout/Provider/OPENAI/MiniMax.py +0 -298
  134. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  135. webscout/Provider/OPENAI/c4ai.py +0 -394
  136. webscout/Provider/OPENAI/copilot.py +0 -305
  137. webscout/Provider/OPENAI/glider.py +0 -330
  138. webscout/Provider/OPENAI/mcpcore.py +0 -431
  139. webscout/Provider/OPENAI/multichat.py +0 -378
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/TTS/sthir.py +0 -94
  142. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  143. webscout/Provider/asksteve.py +0 -220
  144. webscout/Provider/copilot.py +0 -422
  145. webscout/Provider/freeaichat.py +0 -294
  146. webscout/Provider/koala.py +0 -182
  147. webscout/Provider/lmarena.py +0 -198
  148. webscout/Provider/monochat.py +0 -275
  149. webscout/Provider/multichat.py +0 -375
  150. webscout/Provider/scnet.py +0 -244
  151. webscout/Provider/talkai.py +0 -194
  152. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  153. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  154. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  155. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  156. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  157. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  158. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  159. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from typing import List, Dict, Optional, Union, Generator, Any, TypedDict, Callable
3
3
  import json
4
+ import requests
4
5
  from dataclasses import dataclass
5
6
 
6
7
  # Import WebScout Litlogger instead of standard logging
@@ -8,13 +9,6 @@ from webscout.Litlogger import Logger, LogLevel
8
9
 
9
10
  logger = Logger(name="OpenAIBase", level=LogLevel.INFO)
10
11
 
11
- # Import the LitMeta metaclass from Litproxy
12
- try:
13
- from litproxy import LitMeta
14
- except ImportError:
15
- from .autoproxy import ProxyAutoMeta as LitMeta
16
-
17
-
18
12
  # Import the utils for response structures
19
13
  from webscout.Provider.OPENAI.utils import ChatCompletion, ChatCompletionChunk
20
14
 
@@ -182,39 +176,25 @@ class BaseChat(ABC):
182
176
  completions: BaseCompletions
183
177
 
184
178
 
185
- class OpenAICompatibleProvider(ABC, metaclass=LitMeta):
179
+ class OpenAICompatibleProvider(ABC):
186
180
  """
187
181
  Abstract Base Class for providers mimicking the OpenAI Python client structure.
188
182
  Requires a nested 'chat.completions' structure with tool support.
189
- All subclasses automatically get proxy support via LitMeta.
190
-
191
- # Available proxy helpers:
192
- # - self.get_proxied_session() - returns a requests.Session with proxies
193
- # - self.get_proxied_curl_session() - returns a curl_cffi.Session with proxies
194
- # - self.get_proxied_curl_async_session() - returns a curl_cffi.AsyncSession with proxies
195
-
196
- # Proxy support is automatically injected into:
197
- # - requests.Session objects
198
- # - httpx.Client objects
199
- # - curl_cffi.requests.Session objects
200
- # - curl_cffi.requests.AsyncSession objects
201
- #
202
- # Inbuilt auto-retry is also enabled for all requests.Session and curl_cffi.Session objects.
183
+ Users can provide their own proxies via the proxies parameter.
203
184
  """
204
185
  chat: BaseChat
205
186
  available_tools: Dict[str, Tool] = {} # Dictionary of available tools
206
187
  supports_tools: bool = False # Whether the provider supports tools
207
188
  supports_tool_choice: bool = False # Whether the provider supports tool_choice
208
189
 
209
- @abstractmethod
210
- def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, proxies: Optional[dict] = None, disable_auto_proxy: bool = False, **kwargs: Any):
190
+ def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, proxies: Optional[dict] = None, **kwargs: Any):
211
191
  self.available_tools = {}
212
192
  if tools:
213
193
  self.register_tools(tools)
214
- # self.proxies is set by ProxyAutoMeta
215
- # Subclasses should use self.proxies for all network requests
216
- # Optionally, use self.get_proxied_session() for a requests.Session with proxies
217
- # The disable_auto_proxy parameter is handled by ProxyAutoMeta
194
+ self.proxies = proxies or {}
195
+ self.session = requests.Session()
196
+ if self.proxies:
197
+ self.session.proxies.update(self.proxies)
218
198
  # raise NotImplementedError # <-- Commented out for metaclass test
219
199
 
220
200
  @property
@@ -266,4 +246,4 @@ class OpenAICompatibleProvider(ABC, metaclass=LitMeta):
266
246
  updated_messages.append(tool_message)
267
247
  break
268
248
 
269
- return updated_messages
249
+ return updated_messages
@@ -558,11 +558,16 @@ class ChatGPT(OpenAICompatibleProvider):
558
558
  """
559
559
 
560
560
  def __init__(
561
- self
561
+ self,
562
+ proxies: Optional[Dict[str, str]] = None
562
563
  ):
563
564
  """
564
565
  Initialize the ChatGPT client.
566
+
567
+ Args:
568
+ proxies: Optional proxy configuration dict, e.g. {"http": "http://proxy:8080", "https": "https://proxy:8080"}
565
569
  """
570
+ super().__init__(proxies=proxies)
566
571
  # Initialize chat interface
567
572
  self.chat = Chat(self)
568
573
 
@@ -585,4 +590,4 @@ if __name__ == "__main__":
585
590
  messages=[{"role": "user", "content": "How manr r in strawberry"}]
586
591
  )
587
592
  print(response.choices[0].message.content)
588
- print()
593
+ print()
@@ -373,7 +373,8 @@ class ChatGPTClone(OpenAICompatibleProvider):
373
373
  def __init__(
374
374
  self,
375
375
  browser: str = "chrome",
376
- impersonate: str = "chrome120"
376
+ impersonate: str = "chrome120",
377
+ proxies: Optional[Dict[str, str]] = None
377
378
  ):
378
379
  """
379
380
  Initialize the ChatGPTClone client.
@@ -381,15 +382,13 @@ class ChatGPTClone(OpenAICompatibleProvider):
381
382
  Args:
382
383
  browser: Browser to emulate in user agent (for LitAgent fallback)
383
384
  impersonate: Browser impersonation for curl_cffi (default: chrome120)
385
+ proxies: Optional proxy configuration dict, e.g. {"http": "http://proxy:8080", "https": "https://proxy:8080"}
384
386
  """
387
+ super().__init__(proxies=proxies)
385
388
  self.timeout = 30
386
389
  self.temperature = 0.6 # Default temperature
387
390
  self.top_p = 0.7 # Default top_p
388
391
 
389
- # Use curl_cffi for Cloudflare bypass and browser impersonation
390
- self.session = Session(impersonate=impersonate)
391
- self.session.proxies = {}
392
-
393
392
  # Use LitAgent for fingerprint if available, else fallback
394
393
  agent = LitAgent()
395
394
  self.fingerprint = agent.generate_fingerprint(browser)
@@ -520,5 +519,3 @@ if __name__ == "__main__":
520
519
  )
521
520
  print(response.choices[0].message.content)
522
521
  print()
523
- print("Proxies on instance:", client.proxies)
524
- print("Proxies on session:", client.session.proxies)
@@ -2,8 +2,8 @@ from typing import List, Dict, Optional, Union, Generator, Any
2
2
  import time
3
3
  import json
4
4
  from webscout.litagent import LitAgent
5
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
6
- from .utils import (
5
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
6
+ from webscout.Provider.OPENAI.utils import (
7
7
  ChatCompletion,
8
8
  ChatCompletionChunk,
9
9
  Choice,
@@ -13,7 +13,10 @@ from .utils import (
13
13
  format_prompt,
14
14
  count_tokens
15
15
  )
16
- import requests
16
+ from curl_cffi.requests import Session
17
+ from curl_cffi.const import CurlHttpVersion
18
+ from webscout.AIutel import sanitize_stream
19
+ from webscout import exceptions
17
20
 
18
21
  # ANSI escape codes for formatting
19
22
  BOLD = "\033[1m"
@@ -24,6 +27,19 @@ class Completions(BaseCompletions):
24
27
  def __init__(self, client: 'ChatSandbox'):
25
28
  self._client = client
26
29
 
30
+ @staticmethod
31
+ def _chatsandbox_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
32
+ """Extracts content from the chatsandbox stream format."""
33
+ if isinstance(chunk, str):
34
+ try:
35
+ data = json.loads(chunk)
36
+ if isinstance(data, dict) and "reasoning_content" in data:
37
+ return data["reasoning_content"]
38
+ return chunk
39
+ except json.JSONDecodeError:
40
+ return chunk
41
+ return None
42
+
27
43
  def create(
28
44
  self,
29
45
  *,
@@ -43,7 +59,7 @@ class Completions(BaseCompletions):
43
59
  # Use model name conversion for compatibility
44
60
  model = self._client.convert_model_name(model)
45
61
  # Compose the conversation prompt using format_prompt
46
- question = format_prompt(messages, add_special_tokens=True)
62
+ question = format_prompt(messages, add_special_tokens=False, do_continue=True)
47
63
  payload = {
48
64
  "messages": [question],
49
65
  "character": model
@@ -70,7 +86,7 @@ class Completions(BaseCompletions):
70
86
  'dnt': '1',
71
87
  'sec-gpc': '1',
72
88
  }
73
- session = requests.Session()
89
+ session = Session()
74
90
  session.headers.update(headers)
75
91
  session.proxies = proxies if proxies is not None else {}
76
92
 
@@ -80,29 +96,37 @@ class Completions(BaseCompletions):
80
96
  url,
81
97
  json=payload,
82
98
  stream=True,
83
- timeout=timeout if timeout is not None else 30
99
+ timeout=timeout if timeout is not None else 30,
100
+ impersonate="chrome120",
101
+ http_version=CurlHttpVersion.V1_1
84
102
  )
85
- response.raise_for_status()
86
- streaming_text = ""
87
- for chunk in response.iter_content(chunk_size=None):
88
- if not chunk:
89
- continue
90
- text = chunk.decode('utf-8', errors='replace')
91
- try:
92
- data = json.loads(text)
93
- content = data.get("reasoning_content", text)
94
- except Exception:
95
- content = text
96
- streaming_text += content
97
- delta = ChoiceDelta(content=content)
98
- choice = Choice(index=0, delta=delta, finish_reason=None)
99
- chunk_obj = ChatCompletionChunk(
100
- id=request_id,
101
- choices=[choice],
102
- created=created_time,
103
- model=model,
103
+ if not response.ok:
104
+ raise exceptions.FailedToGenerateResponseError(
105
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
104
106
  )
105
- yield chunk_obj
107
+
108
+ streaming_text = ""
109
+ # Use sanitize_stream with the custom extractor
110
+ processed_stream = sanitize_stream(
111
+ data=response.iter_content(chunk_size=None), # Pass byte iterator
112
+ intro_value=None, # No simple prefix to remove here
113
+ to_json=False, # Content is not JSON
114
+ content_extractor=self._chatsandbox_extractor # Use the specific extractor
115
+ )
116
+
117
+ for content_chunk in processed_stream:
118
+ if content_chunk and isinstance(content_chunk, str):
119
+ streaming_text += content_chunk
120
+ delta = ChoiceDelta(content=content_chunk)
121
+ choice = Choice(index=0, delta=delta, finish_reason=None)
122
+ chunk_obj = ChatCompletionChunk(
123
+ id=request_id,
124
+ choices=[choice],
125
+ created=created_time,
126
+ model=model,
127
+ )
128
+ yield chunk_obj
129
+
106
130
  # Final chunk
107
131
  delta = ChoiceDelta(content=None)
108
132
  choice = Choice(index=0, delta=delta, finish_reason="stop")
@@ -116,39 +140,28 @@ class Completions(BaseCompletions):
116
140
  except Exception as e:
117
141
  raise RuntimeError(f"ChatSandbox streaming request failed: {e}")
118
142
  def for_non_stream():
119
- try:
120
- response = session.post(
121
- url,
122
- json=payload,
123
- timeout=timeout if timeout is not None else 30
124
- )
125
- response.raise_for_status()
126
- text = response.text
127
- try:
128
- data = json.loads(text)
129
- content = data.get("reasoning_content", text)
130
- except Exception:
131
- content = text
132
- prompt_tokens = count_tokens(question)
133
- completion_tokens = count_tokens(content)
134
- total_tokens = prompt_tokens + completion_tokens
135
- usage = CompletionUsage(
136
- prompt_tokens=prompt_tokens,
137
- completion_tokens=completion_tokens,
138
- total_tokens=total_tokens
139
- )
140
- message = ChatCompletionMessage(role="assistant", content=content)
141
- choice = Choice(index=0, message=message, finish_reason="stop")
142
- completion = ChatCompletion(
143
- id=request_id,
144
- choices=[choice],
145
- created=created_time,
146
- model=model,
147
- usage=usage,
148
- )
149
- return completion
150
- except Exception as e:
151
- raise RuntimeError(f"ChatSandbox request failed: {e}")
143
+ streaming_text = ""
144
+ for chunk_obj in for_stream():
145
+ if chunk_obj.choices[0].delta.content:
146
+ streaming_text += chunk_obj.choices[0].delta.content
147
+ prompt_tokens = count_tokens(question)
148
+ completion_tokens = count_tokens(streaming_text)
149
+ total_tokens = prompt_tokens + completion_tokens
150
+ usage = CompletionUsage(
151
+ prompt_tokens=prompt_tokens,
152
+ completion_tokens=completion_tokens,
153
+ total_tokens=total_tokens
154
+ )
155
+ message = ChatCompletionMessage(role="assistant", content=streaming_text)
156
+ choice = Choice(index=0, message=message, finish_reason="stop")
157
+ completion = ChatCompletion(
158
+ id=request_id,
159
+ choices=[choice],
160
+ created=created_time,
161
+ model=model,
162
+ usage=usage,
163
+ )
164
+ return completion
152
165
  return for_stream() if stream else for_non_stream()
153
166
 
154
167
  class Chat(BaseChat):
@@ -175,3 +188,15 @@ class ChatSandbox(OpenAICompatibleProvider):
175
188
  # Default to openai if no match
176
189
  print(f"{RED}{BOLD}Warning: Model '{model}' not found, using default model 'openai'{RESET}")
177
190
  return "openai"
191
+
192
+ if __name__ == "__main__":
193
+ client = ChatSandbox()
194
+ response = client.chat.completions.create(
195
+ model="openai",
196
+ messages=[
197
+ {"role": "system", "content": "You are a helpful assistant."},
198
+ {"role": "user", "content": "Explain the theory of relativity in simple terms."}
199
+ ],
200
+ stream=False
201
+ )
202
+ print(response.choices[0].message.content)
@@ -195,9 +195,10 @@ class Chat(BaseChat):
195
195
 
196
196
  class DeepInfra(OpenAICompatibleProvider):
197
197
  AVAILABLE_MODELS = [
198
- "anthropic/claude-4-opus",
199
198
  "moonshotai/Kimi-K2-Instruct",
200
- "anthropic/claude-4-sonnet",
199
+ "Qwen/Qwen3-Next-80B-A3B-Instruct",
200
+ "Qwen/Qwen3-Next-80B-A3B-Thinking",
201
+ "moonshotai/Kimi-K2-Instruct-0905",
201
202
  "deepseek-ai/DeepSeek-R1-0528-Turbo",
202
203
  "Qwen/Qwen3-235B-A22B-Thinking-2507",
203
204
  "Qwen/Qwen3-Coder-480B-A35B-Instruct",
@@ -217,14 +218,13 @@ class DeepInfra(OpenAICompatibleProvider):
217
218
  "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
218
219
  "microsoft/phi-4-reasoning-plus",
219
220
  "Qwen/QwQ-32B",
220
- "google/gemini-2.5-flash",
221
- "google/gemini-2.5-pro",
222
221
  "google/gemma-3-27b-it",
223
222
  "google/gemma-3-12b-it",
224
223
  "google/gemma-3-4b-it",
225
224
  "microsoft/Phi-4-multimodal-instruct",
226
225
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
227
226
  "deepseek-ai/DeepSeek-V3",
227
+ "deepseek-ai/DeepSeek-V3.1",
228
228
  "meta-llama/Llama-3.3-70B-Instruct-Turbo",
229
229
  "meta-llama/Llama-3.3-70B-Instruct",
230
230
  "microsoft/phi-4",
@@ -238,7 +238,6 @@ class DeepInfra(OpenAICompatibleProvider):
238
238
  "Sao10K/L3-8B-Lunaris-v1-Turbo",
239
239
  "Sao10K/L3.1-70B-Euryale-v2.2",
240
240
  "Sao10K/L3.3-70B-Euryale-v2.3",
241
- "anthropic/claude-3-7-sonnet-latest",
242
241
  "deepseek-ai/DeepSeek-R1",
243
242
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
244
243
  "deepseek-ai/DeepSeek-R1-Turbo",
@@ -261,7 +260,14 @@ class DeepInfra(OpenAICompatibleProvider):
261
260
  "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
262
261
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
263
262
  "nvidia/Llama-3.1-Nemotron-70B-Instruct",
263
+ "zai-org/GLM-4.5-Air",
264
+ "zai-org/GLM-4.5",
265
+ "zai-org/GLM-4.5V",
266
+ "openai/gpt-oss-120b",
267
+ "openai/gpt-oss-20b",
268
+ "allenai/olmOCR-7B-0725-FP8",
264
269
  ]
270
+
265
271
  def __init__(self, browser: str = "chrome", api_key: str = None):
266
272
  self.timeout = None
267
273
  self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
@@ -306,4 +312,4 @@ if __name__ == "__main__":
306
312
  max_tokens=10000,
307
313
  stream=False
308
314
  )
309
- print(response)
315
+ print(response.choices[0].message.content)
@@ -114,6 +114,35 @@ MODEL_PROMPT = {
114
114
  }
115
115
  }
116
116
  },
117
+ "claude-opus-4-1-20250805": {
118
+ "apiUrl": "https://fragments.e2b.dev/api/chat",
119
+ "id": "claude-opus-4-1-20250805",
120
+ "name": "Claude Opus 4.1",
121
+ "Knowledge": "2024-10",
122
+ "provider": "Anthropic",
123
+ "providerId": "anthropic",
124
+ "multiModal": True,
125
+ "templates": {
126
+ "system": {
127
+ "intro": "You are Claude Opus 4.1, Anthropic's most capable AI assistant for complex reasoning and analysis. You excel at sophisticated problem-solving, creative thinking, and providing nuanced insights across a wide range of domains. You can analyze images, code, and complex data to deliver comprehensive and thoughtful responses.",
128
+ "principles": ["honesty", "ethics", "diligence", "helpfulness", "accuracy", "thoughtfulness", "creativity"],
129
+ "latex": {
130
+ "inline": "\\(\\nabla \\cdot \\vec{E} = \\frac{\\rho}{\\epsilon_0}\\)",
131
+ "block": "\\begin{align}\n\\nabla \\cdot \\vec{E} &= \\frac{\\rho}{\\epsilon_0} \\\\\n\\nabla \\times \\vec{B} &= \\mu_0\\vec{J} + \\mu_0\\epsilon_0\\frac{\\partial\\vec{E}}{\\partial t} \\\\\nE &= mc^2 \\\\\n\\psi(x,t) &= Ae^{i(kx-\\omega t)}\n\\end{align}"
132
+ }
133
+ }
134
+ },
135
+ "requestConfig": {
136
+ "template": {
137
+ "txt": {
138
+ "name": "chat with users and start role-playing, Above of all: Follow the latest news from users",
139
+ "lib": [""],
140
+ "file": "pages/ChatWithUsers.txt",
141
+ "port": 3000
142
+ }
143
+ }
144
+ }
145
+ },
117
146
  "o1-mini": {
118
147
  "apiUrl": "https://fragments.e2b.dev/api/chat",
119
148
  "id": "o1-mini",
@@ -1013,6 +1042,10 @@ class Completions(BaseCompletions):
1013
1042
  """Enhanced request method with IP rotation, session rotation, and advanced rate limit bypass."""
1014
1043
  url = model_config["apiUrl"]
1015
1044
  target_origin = "https://fragments.e2b.dev"
1045
+
1046
+ # Use client proxies if none provided
1047
+ if proxies is None:
1048
+ proxies = getattr(self._client, "proxies", None)
1016
1049
 
1017
1050
  for attempt in range(retries):
1018
1051
  try:
@@ -1055,13 +1088,13 @@ class Completions(BaseCompletions):
1055
1088
 
1056
1089
  json_data = json.dumps(enhanced_request_body)
1057
1090
 
1058
- # Use curl_cffi session with enhanced fingerprinting
1091
+ # Use curl_cffi session with enhanced fingerprinting and proxy support
1059
1092
  response = self._client.session.post(
1060
1093
  url=url,
1061
1094
  headers=headers,
1062
1095
  data=json_data,
1063
1096
  timeout=timeout or self._client.timeout,
1064
- proxies=proxies or getattr(self._client, "proxies", None),
1097
+ proxies=proxies,
1065
1098
  impersonate=self._client.impersonation
1066
1099
  )
1067
1100
 
@@ -1225,17 +1258,21 @@ class E2B(OpenAICompatibleProvider):
1225
1258
  'deepseek-r1-instruct': 'deepseek-r1'
1226
1259
  }
1227
1260
 
1228
- def __init__(self, retries: int = 3):
1261
+ def __init__(self, retries: int = 3, proxies: Optional[Dict[str, str]] = None, **kwargs):
1229
1262
  """
1230
1263
  Initialize the E2B client with curl_cffi and browser fingerprinting.
1231
1264
 
1232
1265
  Args:
1233
1266
  retries: Number of retries for failed requests.
1267
+ proxies: Proxy configuration for requests.
1268
+ **kwargs: Additional arguments passed to parent class.
1234
1269
  """
1235
1270
  self.timeout = 60 # Default timeout in seconds
1236
- self.proxies = None # Default proxies
1237
1271
  self.retries = retries
1238
-
1272
+
1273
+ # Handle proxy configuration
1274
+ self.proxies = proxies or {}
1275
+
1239
1276
  # Use LitAgent for user-agent
1240
1277
  self.headers = LitAgent().generate_fingerprint()
1241
1278
 
@@ -1243,6 +1280,20 @@ class E2B(OpenAICompatibleProvider):
1243
1280
  self.impersonation = curl_requests.impersonate.DEFAULT_CHROME
1244
1281
  self.session = curl_requests.Session()
1245
1282
  self.session.headers.update(self.headers)
1283
+
1284
+ # Apply proxy configuration if provided
1285
+ if self.proxies:
1286
+ self.session.proxies.update(self.proxies)
1287
+
1288
+ # Initialize bypass session data
1289
+ self._session_rotation_data = {}
1290
+ self._last_rotation_time = 0
1291
+ self._rotation_interval = 300 # Rotate session every 5 minutes
1292
+ self._rate_limit_failures = 0
1293
+ self._max_rate_limit_failures = 3
1294
+
1295
+ # Initialize the chat interface
1296
+ self.chat = Chat(self)
1246
1297
 
1247
1298
  # Initialize bypass session data
1248
1299
  self._session_rotation_data = {}
@@ -1589,13 +1640,13 @@ if __name__ == "__main__":
1589
1640
  print("-" * 80)
1590
1641
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
1591
1642
  print("-" * 80)
1592
- print("\n--- Streaming Simulation Test (gpt-4.1-mini) ---")
1643
+ print("\n--- Streaming Simulation Test (claude-opus-4-1-20250805) ---")
1593
1644
  try:
1594
1645
  client_stream = E2B()
1595
1646
  stream = client_stream.chat.completions.create(
1596
- model="gpt-4.1-mini",
1647
+ model="claude-opus-4-1-20250805",
1597
1648
  messages=[
1598
- {"role": "user", "content": "Write a poem about AI."}
1649
+ {"role": "user", "content": "hi."}
1599
1650
  ],
1600
1651
  stream=True
1601
1652
  )
@@ -1607,6 +1658,7 @@ if __name__ == "__main__":
1607
1658
  print(content, end="", flush=True)
1608
1659
  full_stream_response += content
1609
1660
  print("\n--- End of Stream ---")
1661
+ print(client_stream.proxies)
1610
1662
  if not full_stream_response:
1611
1663
  print(f"{RED}Stream test failed: No content received.{RESET}")
1612
1664
  except Exception as e:
@@ -150,8 +150,9 @@ class Chat(BaseChat):
150
150
 
151
151
  class Flowith(OpenAICompatibleProvider):
152
152
  AVAILABLE_MODELS = [
153
- "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku",
154
- "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"
153
+ "gpt-5-nano", "gpt-5-mini", "glm-4.5", "gpt-oss-120b", "gpt-oss-20b", "kimi-k2",
154
+ "gpt-4.1", "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner",
155
+ "gemini-2.5-flash", "grok-3-mini"
155
156
  ]
156
157
 
157
158
  chat: Chat
@@ -170,7 +171,7 @@ if __name__ == "__main__":
170
171
  client = Flowith()
171
172
  messages = [{"role": "user", "content": "Hello, how are you?"}]
172
173
  response = client.chat.completions.create(
173
- model="gpt-4.1-mini",
174
+ model="gpt-5-nano",
174
175
  messages=messages,
175
176
  stream=True
176
177
  )
@@ -0,0 +1,48 @@
1
+ import random
2
+ import string
3
+
4
+ def generate_api_key_suffix(length: int = 4) -> str:
5
+ """Generate a random API key suffix like 'C1Z5'
6
+
7
+ Args:
8
+ length: Length of the suffix (default: 4)
9
+
10
+ Returns:
11
+ A random string with uppercase letters and digits
12
+ """
13
+ # Use uppercase letters and digits for the suffix
14
+ chars = string.ascii_uppercase + string.digits
15
+ return ''.join(random.choice(chars) for _ in range(length))
16
+
17
+ def generate_full_api_key(prefix: str = "EU1CW20nX5oau42xBSgm") -> str:
18
+ """Generate a full API key with the given prefix pattern
19
+
20
+ Args:
21
+ prefix: The base prefix to use (default uses the pattern from the example)
22
+
23
+ Returns:
24
+ A full API key string with a random suffix like 'C1Z5'
25
+ """
26
+ # Generate the suffix (last 4 characters like C1Z5)
27
+ suffix = generate_api_key_suffix(4)
28
+
29
+ # Combine prefix with the generated suffix
30
+ return prefix + suffix
31
+
32
+ if __name__ == "__main__":
33
+ # Example usage
34
+ print("Generate API key suffix (like C1Z5):")
35
+ for i in range(5):
36
+ suffix = generate_api_key_suffix()
37
+ print(f" {suffix}")
38
+
39
+ print("\nGenerate full API key with prefix:")
40
+ for i in range(5):
41
+ api_key = generate_full_api_key()
42
+ print(f" {api_key}")
43
+
44
+ print("\nGenerate with custom prefix:")
45
+ custom_prefix = "EU1CW20nX5oau42xBSgm"
46
+ for i in range(3):
47
+ api_key = generate_full_api_key(custom_prefix)
48
+ print(f" {api_key}")
@@ -220,7 +220,10 @@ class HeckAI(OpenAICompatibleProvider):
220
220
  "openai/gpt-4o-mini",
221
221
  "openai/gpt-4.1-mini",
222
222
  "x-ai/grok-3-mini-beta",
223
- "meta-llama/llama-4-scout"
223
+ "meta-llama/llama-4-scout",
224
+ "openai/gpt-5-mini",
225
+ "openai/gpt-5-nano"
226
+
224
227
  ]
225
228
 
226
229
  def __init__(
@@ -4,10 +4,9 @@ import requests
4
4
  import json
5
5
  from typing import List, Dict, Optional, Union, Generator, Any
6
6
 
7
- from webscout.Provider.yep import T
8
7
  from webscout.litagent import LitAgent
9
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
- from .utils import (
8
+ from webscout.Provider.OPENAI.base import BaseChat, BaseCompletions, OpenAICompatibleProvider
9
+ from webscout.Provider.OPENAI.utils import (
11
10
  ChatCompletion,
12
11
  ChatCompletionChunk,
13
12
  Choice,
@@ -205,13 +204,11 @@ class Netwrck(OpenAICompatibleProvider):
205
204
 
206
205
  AVAILABLE_MODELS = [
207
206
  "thedrummer/valkyrie-49b-v1",
207
+ "thedrummer/skyfall-36b-v2",
208
208
  "sao10k/l3-euryale-70b",
209
209
  "deepseek/deepseek-chat",
210
210
  "deepseek/deepseek-r1",
211
- "anthropic/claude-sonnet-4-20250514",
212
- "openai/gpt-4.1-mini",
213
211
  "gryphe/mythomax-l2-13b",
214
- "google/gemini-2.5-flash-preview-04-17",
215
212
  "nvidia/llama-3.1-nemotron-70b-instruct",
216
213
  ]
217
214
 
@@ -308,9 +305,9 @@ class Netwrck(OpenAICompatibleProvider):
308
305
  if model.lower() in available_model.lower():
309
306
  return available_model
310
307
 
311
- # Default to Claude if no match
312
- print(f"{BOLD}Warning: Model '{model}' not found, using default model 'anthropic/claude-3-7-sonnet-20250219'{RESET}")
313
- return "anthropic/claude-3-7-sonnet-20250219"
308
+ # Default to DeepSeek if no match
309
+ print(f"{BOLD}Warning: Model '{model}' not found, using default model 'deepseek/deepseek-r1'{RESET}")
310
+ return "deepseek/deepseek-r1"
314
311
 
315
312
  @property
316
313
  def models(self):
@@ -327,9 +324,9 @@ if __name__ == "__main__":
327
324
 
328
325
  # Test a subset of models to avoid excessive API calls
329
326
  test_models = [
330
- "anthropic/claude-3-7-sonnet-20250219",
331
- "openai/gpt-4o-mini",
332
- "deepseek/deepseek-chat"
327
+ "deepseek/deepseek-r1",
328
+ "deepseek/deepseek-chat",
329
+ "gryphe/mythomax-l2-13b"
333
330
  ]
334
331
 
335
332
  for model in test_models: