webscout 8.2.9__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +2 -2
  2. webscout/Provider/Blackboxai.py +2 -0
  3. webscout/Provider/ChatSandbox.py +2 -1
  4. webscout/Provider/Deepinfra.py +1 -1
  5. webscout/Provider/HeckAI.py +1 -1
  6. webscout/Provider/LambdaChat.py +1 -0
  7. webscout/Provider/MCPCore.py +7 -3
  8. webscout/Provider/OPENAI/BLACKBOXAI.py +1017 -766
  9. webscout/Provider/OPENAI/Cloudflare.py +31 -14
  10. webscout/Provider/OPENAI/FalconH1.py +457 -0
  11. webscout/Provider/OPENAI/FreeGemini.py +29 -13
  12. webscout/Provider/OPENAI/NEMOTRON.py +26 -14
  13. webscout/Provider/OPENAI/PI.py +427 -0
  14. webscout/Provider/OPENAI/Qwen3.py +303 -282
  15. webscout/Provider/OPENAI/TwoAI.py +29 -12
  16. webscout/Provider/OPENAI/__init__.py +3 -1
  17. webscout/Provider/OPENAI/ai4chat.py +33 -23
  18. webscout/Provider/OPENAI/api.py +78 -12
  19. webscout/Provider/OPENAI/base.py +2 -0
  20. webscout/Provider/OPENAI/c4ai.py +31 -10
  21. webscout/Provider/OPENAI/chatgpt.py +41 -22
  22. webscout/Provider/OPENAI/chatgptclone.py +32 -13
  23. webscout/Provider/OPENAI/chatsandbox.py +7 -3
  24. webscout/Provider/OPENAI/copilot.py +26 -10
  25. webscout/Provider/OPENAI/deepinfra.py +327 -321
  26. webscout/Provider/OPENAI/e2b.py +77 -99
  27. webscout/Provider/OPENAI/exaai.py +13 -10
  28. webscout/Provider/OPENAI/exachat.py +10 -6
  29. webscout/Provider/OPENAI/flowith.py +7 -3
  30. webscout/Provider/OPENAI/freeaichat.py +10 -6
  31. webscout/Provider/OPENAI/glider.py +10 -6
  32. webscout/Provider/OPENAI/heckai.py +11 -8
  33. webscout/Provider/OPENAI/llmchatco.py +9 -7
  34. webscout/Provider/OPENAI/mcpcore.py +10 -7
  35. webscout/Provider/OPENAI/multichat.py +3 -1
  36. webscout/Provider/OPENAI/netwrck.py +10 -6
  37. webscout/Provider/OPENAI/oivscode.py +12 -9
  38. webscout/Provider/OPENAI/opkfc.py +14 -3
  39. webscout/Provider/OPENAI/scirachat.py +14 -8
  40. webscout/Provider/OPENAI/sonus.py +10 -6
  41. webscout/Provider/OPENAI/standardinput.py +18 -9
  42. webscout/Provider/OPENAI/textpollinations.py +14 -7
  43. webscout/Provider/OPENAI/toolbaz.py +16 -10
  44. webscout/Provider/OPENAI/typefully.py +14 -7
  45. webscout/Provider/OPENAI/typegpt.py +10 -6
  46. webscout/Provider/OPENAI/uncovrAI.py +22 -8
  47. webscout/Provider/OPENAI/venice.py +10 -6
  48. webscout/Provider/OPENAI/writecream.py +166 -163
  49. webscout/Provider/OPENAI/x0gpt.py +367 -365
  50. webscout/Provider/OPENAI/yep.py +384 -382
  51. webscout/Provider/PI.py +2 -1
  52. webscout/Provider/__init__.py +0 -2
  53. webscout/Provider/granite.py +41 -6
  54. webscout/Provider/oivscode.py +37 -37
  55. webscout/Provider/scnet.py +1 -0
  56. webscout/version.py +1 -1
  57. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/METADATA +2 -1
  58. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/RECORD +62 -61
  59. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  60. webscout/Provider/ChatGPTGratis.py +0 -194
  61. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/entry_points.txt +0 -0
  62. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  63. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -39,6 +39,8 @@ class Completions(BaseCompletions):
39
39
  stream: bool = False,
40
40
  temperature: Optional[float] = None,
41
41
  top_p: Optional[float] = None,
42
+ timeout: Optional[int] = None,
43
+ proxies: Optional[Dict[str, str]] = None,
42
44
  **kwargs: Any
43
45
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
44
46
  """Create a chat completion using TwoAI."""
@@ -59,19 +61,26 @@ class Completions(BaseCompletions):
59
61
  created_time = int(time.time())
60
62
 
61
63
  if stream:
62
- return self._create_stream(request_id, created_time, model, payload)
63
- return self._create_non_stream(request_id, created_time, model, payload)
64
+ return self._create_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
65
+ return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
64
66
 
65
67
  def _create_stream(
66
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
68
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
69
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
67
70
  ) -> Generator[ChatCompletionChunk, None, None]:
71
+ original_proxies = self._client.session.proxies.copy()
72
+ if proxies is not None:
73
+ self._client.session.proxies = proxies
74
+ else:
75
+ self._client.session.proxies = {}
68
76
  try:
69
77
  response = self._client.session.post(
70
78
  self._client.base_url,
71
79
  headers=self._client.headers,
72
80
  json=payload,
73
81
  stream=True,
74
- timeout=self._client.timeout,
82
+ timeout=timeout if timeout is not None else self._client.timeout,
83
+ proxies=proxies or getattr(self._client, "proxies", None)
75
84
  )
76
85
  response.raise_for_status()
77
86
 
@@ -129,18 +138,25 @@ class Completions(BaseCompletions):
129
138
  yield chunk
130
139
  except Exception as e:
131
140
  raise IOError(f"TwoAI request failed: {e}") from e
132
- except Exception as e:
133
- raise IOError(f"Error processing TwoAI stream: {e}") from e
141
+ finally:
142
+ self._client.session.proxies = original_proxies
134
143
 
135
144
  def _create_non_stream(
136
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
145
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
146
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
137
147
  ) -> ChatCompletion:
148
+ original_proxies = self._client.session.proxies.copy()
149
+ if proxies is not None:
150
+ self._client.session.proxies = proxies
151
+ else:
152
+ self._client.session.proxies = {}
138
153
  try:
139
154
  response = self._client.session.post(
140
155
  self._client.base_url,
141
156
  headers=self._client.headers,
142
157
  json=payload,
143
- timeout=self._client.timeout,
158
+ timeout=timeout if timeout is not None else self._client.timeout,
159
+ proxies=proxies or getattr(self._client, "proxies", None)
144
160
  )
145
161
  response.raise_for_status()
146
162
  data = response.json()
@@ -179,8 +195,8 @@ class Completions(BaseCompletions):
179
195
  return completion
180
196
  except Exception as e:
181
197
  raise IOError(f"TwoAI request failed: {e}") from e
182
- except Exception as e:
183
- raise IOError(f"Error processing TwoAI response: {e}") from e
198
+ finally:
199
+ self._client.session.proxies = original_proxies
184
200
 
185
201
 
186
202
  class Chat(BaseChat):
@@ -285,12 +301,13 @@ class TwoAI(OpenAICompatibleProvider):
285
301
  raise RuntimeError("Failed to get API key from confirmation email")
286
302
  return api_key
287
303
 
288
- def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
304
+ def __init__(self, browser: str = "chrome"):
289
305
  api_key = self.generate_api_key()
290
- self.timeout = timeout
306
+ self.timeout = 30
291
307
  self.base_url = "https://api.two.ai/v2/chat/completions"
292
308
  self.api_key = api_key
293
309
  self.session = Session()
310
+ self.session.proxies = {}
294
311
 
295
312
  headers: Dict[str, str] = {
296
313
  "Content-Type": "application/json",
@@ -37,4 +37,6 @@ from .BLACKBOXAI import *
37
37
  from .copilot import * # Add Microsoft Copilot
38
38
  from .TwoAI import *
39
39
  from .oivscode import * # Add OnRender provider
40
- from .Qwen3 import *
40
+ from .Qwen3 import *
41
+ from .FalconH1 import *
42
+ from .PI import * # Add PI.ai provider
@@ -26,6 +26,8 @@ class Completions(BaseCompletions):
26
26
  stream: bool = False,
27
27
  temperature: Optional[float] = None,
28
28
  top_p: Optional[float] = None,
29
+ timeout: Optional[int] = None,
30
+ proxies: Optional[dict] = None,
29
31
  **kwargs: Any
30
32
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
31
33
  """
@@ -48,18 +50,19 @@ class Completions(BaseCompletions):
48
50
 
49
51
  # AI4Chat doesn't support streaming, so we'll simulate it if requested
50
52
  if stream:
51
- return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
53
+ return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
52
54
  else:
53
- return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
55
+ return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param, timeout=timeout, proxies=proxies)
54
56
 
55
57
  def _create_stream(
56
58
  self, request_id: str, created_time: int, model: str,
57
- conversation_prompt: str, country: str, user_id: str
59
+ conversation_prompt: str, country: str, user_id: str,
60
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
58
61
  ) -> Generator[ChatCompletionChunk, None, None]:
59
62
  """Simulate streaming by breaking up the full response into fixed-size character chunks."""
60
63
  try:
61
64
  # Get the full response first
62
- full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
65
+ full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
63
66
 
64
67
  # Track token usage
65
68
  prompt_tokens = count_tokens(conversation_prompt)
@@ -133,12 +136,13 @@ class Completions(BaseCompletions):
133
136
 
134
137
  def _create_non_stream(
135
138
  self, request_id: str, created_time: int, model: str,
136
- conversation_prompt: str, country: str, user_id: str
139
+ conversation_prompt: str, country: str, user_id: str,
140
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
137
141
  ) -> ChatCompletion:
138
142
  """Get a complete response from AI4Chat."""
139
143
  try:
140
144
  # Get the full response
141
- full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
145
+ full_response = self._get_ai4chat_response(conversation_prompt, country, user_id, timeout=timeout, proxies=proxies)
142
146
 
143
147
  # Estimate token counts
144
148
  prompt_tokens = count_tokens(conversation_prompt)
@@ -183,22 +187,31 @@ class Completions(BaseCompletions):
183
187
  print(f"Unexpected error during AI4Chat non-stream request: {e}")
184
188
  raise IOError(f"AI4Chat request failed: {e}") from e
185
189
 
186
- def _get_ai4chat_response(self, prompt: str, country: str, user_id: str) -> str:
190
+ def _get_ai4chat_response(self, prompt: str, country: str, user_id: str,
191
+ timeout: Optional[int] = None, proxies: Optional[dict] = None) -> str:
187
192
  """Make the actual API request to AI4Chat."""
188
- # URL encode parameters
189
- encoded_text = urllib.parse.quote(prompt)
190
- encoded_country = urllib.parse.quote(country)
191
- encoded_user_id = urllib.parse.quote(user_id)
193
+ timeout_val = timeout if timeout is not None else self._client.timeout
194
+ original_proxies = self._client.session.proxies
195
+ if proxies is not None:
196
+ self._client.session.proxies = proxies
197
+
198
+ try:
199
+ # URL encode parameters
200
+ encoded_text = urllib.parse.quote(prompt)
201
+ encoded_country = urllib.parse.quote(country)
202
+ encoded_user_id = urllib.parse.quote(user_id)
192
203
 
193
- # Construct the API URL
194
- url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
204
+ # Construct the API URL
205
+ url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
195
206
 
196
- # Make the request
197
- try:
198
- response = self._client.session.get(url, headers=self._client.headers, timeout=self._client.timeout)
207
+ # Make the request
208
+ response = self._client.session.get(url, headers=self._client.headers, timeout=timeout_val)
199
209
  response.raise_for_status()
200
210
  except RequestsError as e:
201
211
  raise IOError(f"Failed to generate response: {e}")
212
+ finally:
213
+ if proxies is not None:
214
+ self._client.session.proxies = original_proxies
202
215
 
203
216
  # Process the response text
204
217
  response_text = response.text
@@ -235,8 +248,6 @@ class AI4Chat(OpenAICompatibleProvider):
235
248
 
236
249
  def __init__(
237
250
  self,
238
- timeout: int = 30,
239
- proxies: dict = {},
240
251
  system_prompt: str = "You are a helpful and informative AI assistant.",
241
252
  country: str = "Asia",
242
253
  user_id: str = "usersmjb2oaz7y"
@@ -245,14 +256,11 @@ class AI4Chat(OpenAICompatibleProvider):
245
256
  Initialize the AI4Chat client.
246
257
 
247
258
  Args:
248
- timeout: Request timeout in seconds
249
- proxies: Optional proxy configuration
250
259
  system_prompt: System prompt to guide the AI's behavior
251
260
  country: Country parameter for API
252
261
  user_id: User ID for API
253
262
  """
254
- self.timeout = timeout
255
- self.proxies = proxies
263
+ self.timeout = 30
256
264
  self.system_prompt = system_prompt
257
265
  self.country = country
258
266
  self.user_id = user_id
@@ -261,7 +269,9 @@ class AI4Chat(OpenAICompatibleProvider):
261
269
  self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
262
270
 
263
271
  # Initialize session
264
- self.session = Session(timeout=timeout, proxies=proxies)
272
+ self.session = Session()
273
+ self.session.proxies = {}
274
+ # self.session.timeout = self.timeout # Timeout is per-request for curl_cffi
265
275
 
266
276
  # Set headers
267
277
  self.headers = {
@@ -16,6 +16,8 @@ import sys
16
16
  import time
17
17
  import uuid
18
18
  import inspect
19
+ import re
20
+ import codecs
19
21
  from typing import List, Dict, Optional, Union, Any, Generator, Callable
20
22
  import types
21
23
 
@@ -28,6 +30,18 @@ from fastapi.routing import APIRoute
28
30
  from fastapi.exceptions import RequestValidationError
29
31
  from fastapi.security import APIKeyHeader
30
32
  from starlette.exceptions import HTTPException as StarletteHTTPException
33
+
34
+ def clean_text(text):
35
+ """Clean text by removing null bytes and control characters except newlines and tabs."""
36
+ if not isinstance(text, str):
37
+ return text
38
+
39
+ # Remove null bytes
40
+ text = text.replace('\x00', '')
41
+
42
+ # Keep newlines, tabs, and other printable characters, remove other control chars
43
+ # This regex matches control characters except \n, \r, \t
44
+ return re.sub(r'[\x01-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
31
45
  from starlette.status import (
32
46
  HTTP_422_UNPROCESSABLE_ENTITY,
33
47
  HTTP_404_NOT_FOUND,
@@ -153,7 +167,7 @@ class ChatCompletionRequest(BaseModel):
153
167
  extra = "ignore" # Ignore extra fields that aren't in the model
154
168
  schema_extra = {
155
169
  "example": {
156
- "model": "ChatGPT/gpt-4",
170
+ "model": "ChatGPT/gpt-4o",
157
171
  "messages": [
158
172
  {"role": "system", "content": "You are a helpful assistant."},
159
173
  {"role": "user", "content": "Hello, how are you?"}
@@ -634,7 +648,16 @@ def resolve_provider_and_model(model_identifier: str) -> tuple[Any, str]:
634
648
 
635
649
  # Validate model availability
636
650
  if hasattr(provider_class, "AVAILABLE_MODELS") and model_name is not None:
637
- available = getattr(provider_class, "AVAILABLE_MODELS", [])
651
+ available = getattr(provider_class, "AVAILABLE_MODELS", None)
652
+ # If it's a property, get from instance
653
+ if isinstance(available, property):
654
+ try:
655
+ available = getattr(provider_class(), "AVAILABLE_MODELS", [])
656
+ except Exception:
657
+ available = []
658
+ # If still not iterable, fallback to empty list
659
+ if not isinstance(available, (list, tuple, set)):
660
+ available = list(available) if hasattr(available, "__iter__") and not isinstance(available, str) else []
638
661
  if available and model_name not in available:
639
662
  raise APIError(
640
663
  f"Model '{model_name}' not supported by provider '{provider_class.__name__}'. Available models: {available}",
@@ -722,34 +745,69 @@ async def handle_streaming_response(provider: Any, params: Dict[str, Any], reque
722
745
  chunk_data = chunk
723
746
  else: # Fallback for unknown chunk types
724
747
  chunk_data = chunk
725
- yield f"data: {json.dumps(chunk_data)}\n\n"
748
+
749
+ # Clean text content in the chunk to remove control characters
750
+ if isinstance(chunk_data, dict) and 'choices' in chunk_data:
751
+ for choice in chunk_data.get('choices', []):
752
+ if isinstance(choice, dict):
753
+ # Handle delta for streaming
754
+ if 'delta' in choice and isinstance(choice['delta'], dict) and 'content' in choice['delta']:
755
+ choice['delta']['content'] = clean_text(choice['delta']['content'])
756
+ # Handle message for non-streaming
757
+ elif 'message' in choice and isinstance(choice['message'], dict) and 'content' in choice['message']:
758
+ choice['message']['content'] = clean_text(choice['message']['content'])
759
+
760
+ yield f"data: {json.dumps(chunk_data, ensure_ascii=False)}\n\n"
726
761
  except TypeError as te:
727
762
  logger.error(f"Error iterating over completion_stream: {te}")
728
763
  # Fall back to treating as non-generator response
729
764
  if hasattr(completion_stream, 'model_dump'):
730
- yield f"data: {json.dumps(completion_stream.model_dump(exclude_none=True))}\n\n"
765
+ response_data = completion_stream.model_dump(exclude_none=True)
731
766
  elif hasattr(completion_stream, 'dict'):
732
- yield f"data: {json.dumps(completion_stream.dict(exclude_none=True))}\n\n"
767
+ response_data = completion_stream.dict(exclude_none=True)
733
768
  else:
734
- yield f"data: {json.dumps(completion_stream)}\n\n"
769
+ response_data = completion_stream
770
+
771
+ # Clean text content in the response
772
+ if isinstance(response_data, dict) and 'choices' in response_data:
773
+ for choice in response_data.get('choices', []):
774
+ if isinstance(choice, dict):
775
+ if 'delta' in choice and isinstance(choice['delta'], dict) and 'content' in choice['delta']:
776
+ choice['delta']['content'] = clean_text(choice['delta']['content'])
777
+ elif 'message' in choice and isinstance(choice['message'], dict) and 'content' in choice['message']:
778
+ choice['message']['content'] = clean_text(choice['message']['content'])
779
+
780
+ yield f"data: {json.dumps(response_data, ensure_ascii=False)}\n\n"
735
781
  else: # Non-generator response
736
782
  if hasattr(completion_stream, 'model_dump'):
737
- yield f"data: {json.dumps(completion_stream.model_dump(exclude_none=True))}\n\n"
783
+ response_data = completion_stream.model_dump(exclude_none=True)
738
784
  elif hasattr(completion_stream, 'dict'):
739
- yield f"data: {json.dumps(completion_stream.dict(exclude_none=True))}\n\n"
785
+ response_data = completion_stream.dict(exclude_none=True)
740
786
  else:
741
- yield f"data: {json.dumps(completion_stream)}\n\n"
787
+ response_data = completion_stream
788
+
789
+ # Clean text content in the response
790
+ if isinstance(response_data, dict) and 'choices' in response_data:
791
+ for choice in response_data.get('choices', []):
792
+ if isinstance(choice, dict):
793
+ if 'delta' in choice and isinstance(choice['delta'], dict) and 'content' in choice['delta']:
794
+ choice['delta']['content'] = clean_text(choice['delta']['content'])
795
+ elif 'message' in choice and isinstance(choice['message'], dict) and 'content' in choice['message']:
796
+ choice['message']['content'] = clean_text(choice['message']['content'])
797
+
798
+ yield f"data: {json.dumps(response_data, ensure_ascii=False)}\n\n"
742
799
 
743
800
  except Exception as e:
744
801
  logger.error(f"Error in streaming response for request {request_id}: {e}")
802
+ error_message = clean_text(str(e))
745
803
  error_data = {
746
804
  "error": {
747
- "message": str(e),
805
+ "message": error_message,
748
806
  "type": "server_error",
749
807
  "code": "streaming_error"
750
808
  }
751
809
  }
752
- yield f"data: {json.dumps(error_data)}\n\n"
810
+ yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
753
811
  finally:
754
812
  yield "data: [DONE]\n\n"
755
813
  return StreamingResponse(streaming(), media_type="text/event-stream")
@@ -789,6 +847,13 @@ async def handle_non_streaming_response(provider: Any, params: Dict[str, Any],
789
847
  HTTP_500_INTERNAL_SERVER_ERROR,
790
848
  "provider_error"
791
849
  )
850
+
851
+ # Clean text content in the response to remove control characters
852
+ if isinstance(response_data, dict) and 'choices' in response_data:
853
+ for choice in response_data.get('choices', []):
854
+ if isinstance(choice, dict) and 'message' in choice:
855
+ if isinstance(choice['message'], dict) and 'content' in choice['message']:
856
+ choice['message']['content'] = clean_text(choice['message']['content'])
792
857
 
793
858
  elapsed = time.time() - start_time
794
859
  logger.info(f"Completed non-streaming request {request_id} in {elapsed:.2f}s")
@@ -797,8 +862,9 @@ async def handle_non_streaming_response(provider: Any, params: Dict[str, Any],
797
862
 
798
863
  except Exception as e:
799
864
  logger.error(f"Error in non-streaming response for request {request_id}: {e}")
865
+ error_message = clean_text(str(e))
800
866
  raise APIError(
801
- f"Provider error: {str(e)}",
867
+ f"Provider error: {error_message}",
802
868
  HTTP_500_INTERNAL_SERVER_ERROR,
803
869
  "provider_error"
804
870
  )
@@ -78,6 +78,8 @@ class BaseCompletions(ABC):
78
78
  top_p: Optional[float] = None,
79
79
  tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None, # Support for tool definitions
80
80
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None, # Support for tool_choice parameter
81
+ timeout: Optional[int] = None,
82
+ proxies: Optional[dict] = None,
81
83
  **kwargs: Any
82
84
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
83
85
  """
@@ -49,6 +49,8 @@ class Completions(BaseCompletions):
49
49
  stream: bool = False,
50
50
  temperature: Optional[float] = None,
51
51
  top_p: Optional[float] = None,
52
+ timeout: Optional[int] = None,
53
+ proxies: Optional[dict] = None,
52
54
  **kwargs: Any
53
55
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
54
56
  """
@@ -88,14 +90,21 @@ class Completions(BaseCompletions):
88
90
 
89
91
  # Pass the formatted conversation prompt
90
92
  if stream:
91
- return self._create_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
93
+ return self._create_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt, timeout=timeout, proxies=proxies)
92
94
  else:
93
- return self._create_non_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
95
+ return self._create_non_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt, timeout=timeout, proxies=proxies)
94
96
 
95
97
  def _create_stream(
96
- self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
98
+ self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str,
99
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
97
100
  ) -> Generator[ChatCompletionChunk, None, None]:
101
+ original_proxies = self._client.session.proxies
102
+ if proxies is not None:
103
+ self._client.session.proxies = proxies
104
+ else:
105
+ self._client.session.proxies = {}
98
106
  try:
107
+ timeout_val = timeout if timeout is not None else self._client.timeout
99
108
  message_id = self._client._conversation_data[model]["messageId"]
100
109
  url = f"{self._client.url}/api/chat/message"
101
110
  payload = {
@@ -117,7 +126,8 @@ class Completions(BaseCompletions):
117
126
  headers=self._client.headers,
118
127
  json=payload,
119
128
  stream=True,
120
- timeout=self._client.timeout
129
+ timeout=timeout_val,
130
+ proxies=proxies or getattr(self._client, "proxies", None)
121
131
  )
122
132
  response.raise_for_status()
123
133
 
@@ -160,11 +170,20 @@ class Completions(BaseCompletions):
160
170
  except Exception as e:
161
171
  print(f"Error during C4AI stream request: {e}")
162
172
  raise IOError(f"C4AI request failed: {e}") from e
173
+ finally:
174
+ self._client.session.proxies = original_proxies
163
175
 
164
176
  def _create_non_stream(
165
- self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
177
+ self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str,
178
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
166
179
  ) -> ChatCompletion:
180
+ original_proxies = self._client.session.proxies
181
+ if proxies is not None:
182
+ self._client.session.proxies = proxies
183
+ else:
184
+ self._client.session.proxies = {}
167
185
  try:
186
+ timeout_val = timeout if timeout is not None else self._client.timeout
168
187
  message_id = self._client._conversation_data[model]["messageId"]
169
188
  url = f"{self._client.url}/api/chat/message"
170
189
  payload = {
@@ -185,7 +204,8 @@ class Completions(BaseCompletions):
185
204
  url,
186
205
  headers=self._client.headers,
187
206
  json=payload,
188
- timeout=self._client.timeout
207
+ timeout=timeout_val,
208
+ proxies=proxies or getattr(self._client, "proxies", None)
189
209
  )
190
210
  response.raise_for_status()
191
211
 
@@ -213,6 +233,8 @@ class Completions(BaseCompletions):
213
233
  except Exception as e:
214
234
  print(f"Error during C4AI non-stream request: {e}")
215
235
  raise IOError(f"C4AI request failed: {e}") from e
236
+ finally:
237
+ self._client.session.proxies = original_proxies
216
238
 
217
239
  class Chat(BaseChat):
218
240
  def __init__(self, client: 'C4AI'):
@@ -242,19 +264,18 @@ class C4AI(OpenAICompatibleProvider):
242
264
 
243
265
  def __init__(
244
266
  self,
245
- timeout: Optional[int] = None,
246
267
  browser: str = "chrome"
247
268
  ):
248
269
  """
249
270
  Initialize the C4AI client.
250
271
 
251
272
  Args:
252
- timeout: Request timeout in seconds.
253
273
  browser: Browser name for LitAgent to generate User-Agent.
254
274
  """
255
- self.timeout = timeout
275
+ self.timeout = 30
256
276
  self.url = "https://cohereforai-c4ai-command.hf.space"
257
277
  self.session = requests.Session()
278
+ self.session.proxies = {}
258
279
  self.max_tokens_to_sample = 2000
259
280
 
260
281
  agent = LitAgent()
@@ -370,4 +391,4 @@ class C4AI(OpenAICompatibleProvider):
370
391
  class _ModelList:
371
392
  def list(inner_self):
372
393
  return type(self).AVAILABLE_MODELS
373
- return _ModelList()
394
+ return _ModelList()
@@ -9,8 +9,8 @@ from datetime import datetime, timedelta
9
9
  from typing import List, Dict, Optional, Union, Generator, Any
10
10
 
11
11
  # Import base classes and utility structures
12
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
- from .utils import (
12
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
13
+ from webscout.Provider.OPENAI.utils import (
14
14
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
15
15
  ChatCompletionMessage, CompletionUsage, count_tokens
16
16
  )
@@ -21,9 +21,21 @@ RED = "\033[91m"
21
21
  RESET = "\033[0m"
22
22
 
23
23
  class ChatGPTReversed:
24
+ AVAILABLE_MODELS = [
25
+ "auto",
26
+ "gpt-4o-mini",
27
+ "gpt-4o",
28
+ "o4-mini",
29
+ "gpt-4-1",
30
+ "gpt-4-1-mini",
31
+ "o3",
32
+ "o4-mini-high"
33
+
34
+
35
+
36
+ ]
24
37
  csrf_token = None
25
38
  initialized = False
26
- AVAILABLE_MODELS = ["auto", "gpt-4o-mini", "gpt-4o", "o4-mini"]
27
39
 
28
40
  def __init__(self, model="auto"):
29
41
  if ChatGPTReversed.initialized:
@@ -332,6 +344,8 @@ class Completions(BaseCompletions):
332
344
  stream: bool = False,
333
345
  temperature: Optional[float] = None,
334
346
  top_p: Optional[float] = None,
347
+ timeout: Optional[int] = None,
348
+ proxies: Optional[dict] = None,
335
349
  **kwargs: Any
336
350
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
337
351
  """
@@ -362,6 +376,8 @@ class Completions(BaseCompletions):
362
376
  max_tokens=max_tokens,
363
377
  temperature=temperature,
364
378
  top_p=top_p,
379
+ timeout=timeout,
380
+ proxies=proxies,
365
381
  **kwargs
366
382
  )
367
383
 
@@ -372,6 +388,8 @@ class Completions(BaseCompletions):
372
388
  max_tokens=max_tokens,
373
389
  temperature=temperature,
374
390
  top_p=top_p,
391
+ timeout=timeout,
392
+ proxies=proxies,
375
393
  **kwargs
376
394
  )
377
395
 
@@ -383,6 +401,8 @@ class Completions(BaseCompletions):
383
401
  max_tokens: Optional[int] = None,
384
402
  temperature: Optional[float] = None,
385
403
  top_p: Optional[float] = None,
404
+ timeout: Optional[int] = None,
405
+ proxies: Optional[dict] = None,
386
406
  **kwargs: Any
387
407
  ) -> Generator[ChatCompletionChunk, None, None]:
388
408
  """Implementation for streaming chat completions."""
@@ -448,6 +468,8 @@ class Completions(BaseCompletions):
448
468
  max_tokens: Optional[int] = None,
449
469
  temperature: Optional[float] = None,
450
470
  top_p: Optional[float] = None,
471
+ timeout: Optional[int] = None,
472
+ proxies: Optional[dict] = None,
451
473
  **kwargs: Any
452
474
  ) -> ChatCompletion:
453
475
  """Implementation for non-streaming chat completions."""
@@ -523,34 +545,31 @@ class ChatGPT(OpenAICompatibleProvider):
523
545
  print(response.choices[0].message.content)
524
546
  """
525
547
 
526
- AVAILABLE_MODELS = [
527
- "auto",
528
- "gpt-4o-mini",
529
- "gpt-4o",
530
- "o4-mini"
531
- ]
532
-
533
548
  def __init__(
534
- self,
535
- timeout: int = 60,
536
- proxies: dict = {}
549
+ self
537
550
  ):
538
551
  """
539
552
  Initialize the ChatGPT client.
540
-
541
- Args:
542
- timeout: Request timeout in seconds
543
- proxies: Optional proxy configuration
544
553
  """
545
- self.timeout = timeout
546
- self.proxies = proxies
547
-
548
554
  # Initialize chat interface
549
555
  self.chat = Chat(self)
550
556
 
557
+ @property
558
+ def AVAILABLE_MODELS(self):
559
+ return ChatGPTReversed.AVAILABLE_MODELS
560
+
551
561
  @property
552
562
  def models(self):
553
563
  class _ModelList:
554
564
  def list(inner_self):
555
- return type(self).AVAILABLE_MODELS
556
- return _ModelList()
565
+ return ChatGPTReversed.AVAILABLE_MODELS
566
+ return _ModelList()
567
+
568
+ if __name__ == "__main__":
569
+ # Example usage
570
+ client = ChatGPT()
571
+ response = client.chat.completions.create(
572
+ model="o4-mini-high",
573
+ messages=[{"role": "user", "content": "How manr r in strawberry"}]
574
+ )
575
+ print(response.choices[0].message.content)