webscout 8.2.9__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +2 -2
  2. webscout/Provider/Blackboxai.py +2 -0
  3. webscout/Provider/ChatSandbox.py +2 -1
  4. webscout/Provider/Deepinfra.py +1 -1
  5. webscout/Provider/HeckAI.py +1 -1
  6. webscout/Provider/LambdaChat.py +1 -0
  7. webscout/Provider/MCPCore.py +7 -3
  8. webscout/Provider/OPENAI/BLACKBOXAI.py +1017 -766
  9. webscout/Provider/OPENAI/Cloudflare.py +31 -14
  10. webscout/Provider/OPENAI/FalconH1.py +457 -0
  11. webscout/Provider/OPENAI/FreeGemini.py +29 -13
  12. webscout/Provider/OPENAI/NEMOTRON.py +26 -14
  13. webscout/Provider/OPENAI/PI.py +427 -0
  14. webscout/Provider/OPENAI/Qwen3.py +303 -282
  15. webscout/Provider/OPENAI/TwoAI.py +29 -12
  16. webscout/Provider/OPENAI/__init__.py +3 -1
  17. webscout/Provider/OPENAI/ai4chat.py +33 -23
  18. webscout/Provider/OPENAI/api.py +78 -12
  19. webscout/Provider/OPENAI/base.py +2 -0
  20. webscout/Provider/OPENAI/c4ai.py +31 -10
  21. webscout/Provider/OPENAI/chatgpt.py +41 -22
  22. webscout/Provider/OPENAI/chatgptclone.py +32 -13
  23. webscout/Provider/OPENAI/chatsandbox.py +7 -3
  24. webscout/Provider/OPENAI/copilot.py +26 -10
  25. webscout/Provider/OPENAI/deepinfra.py +327 -321
  26. webscout/Provider/OPENAI/e2b.py +77 -99
  27. webscout/Provider/OPENAI/exaai.py +13 -10
  28. webscout/Provider/OPENAI/exachat.py +10 -6
  29. webscout/Provider/OPENAI/flowith.py +7 -3
  30. webscout/Provider/OPENAI/freeaichat.py +10 -6
  31. webscout/Provider/OPENAI/glider.py +10 -6
  32. webscout/Provider/OPENAI/heckai.py +11 -8
  33. webscout/Provider/OPENAI/llmchatco.py +9 -7
  34. webscout/Provider/OPENAI/mcpcore.py +10 -7
  35. webscout/Provider/OPENAI/multichat.py +3 -1
  36. webscout/Provider/OPENAI/netwrck.py +10 -6
  37. webscout/Provider/OPENAI/oivscode.py +12 -9
  38. webscout/Provider/OPENAI/opkfc.py +14 -3
  39. webscout/Provider/OPENAI/scirachat.py +14 -8
  40. webscout/Provider/OPENAI/sonus.py +10 -6
  41. webscout/Provider/OPENAI/standardinput.py +18 -9
  42. webscout/Provider/OPENAI/textpollinations.py +14 -7
  43. webscout/Provider/OPENAI/toolbaz.py +16 -10
  44. webscout/Provider/OPENAI/typefully.py +14 -7
  45. webscout/Provider/OPENAI/typegpt.py +10 -6
  46. webscout/Provider/OPENAI/uncovrAI.py +22 -8
  47. webscout/Provider/OPENAI/venice.py +10 -6
  48. webscout/Provider/OPENAI/writecream.py +166 -163
  49. webscout/Provider/OPENAI/x0gpt.py +367 -365
  50. webscout/Provider/OPENAI/yep.py +384 -382
  51. webscout/Provider/PI.py +2 -1
  52. webscout/Provider/__init__.py +0 -2
  53. webscout/Provider/granite.py +41 -6
  54. webscout/Provider/oivscode.py +37 -37
  55. webscout/Provider/scnet.py +1 -0
  56. webscout/version.py +1 -1
  57. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/METADATA +2 -1
  58. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/RECORD +62 -61
  59. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  60. webscout/Provider/ChatGPTGratis.py +0 -194
  61. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/entry_points.txt +0 -0
  62. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  63. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -34,6 +34,8 @@ class Completions(BaseCompletions):
34
34
  stream: bool = False,
35
35
  temperature: Optional[float] = None, # Not used by HeckAI but kept for compatibility
36
36
  top_p: Optional[float] = None, # Not used by HeckAI but kept for compatibility
37
+ timeout: Optional[int] = None,
38
+ proxies: Optional[Dict[str, str]] = None,
37
39
  **kwargs: Any # Not used by HeckAI but kept for compatibility
38
40
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
41
  """
@@ -62,12 +64,12 @@ class Completions(BaseCompletions):
62
64
  created_time = int(time.time())
63
65
 
64
66
  if stream:
65
- return self._create_stream(request_id, created_time, model, payload)
67
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
66
68
  else:
67
- return self._create_non_stream(request_id, created_time, model, payload)
69
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
68
70
 
69
71
  def _create_stream(
70
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
72
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
71
73
  ) -> Generator[ChatCompletionChunk, None, None]:
72
74
  try:
73
75
  response = self._client.session.post(
@@ -75,7 +77,8 @@ class Completions(BaseCompletions):
75
77
  headers=self._client.headers,
76
78
  json=payload,
77
79
  stream=True,
78
- timeout=self._client.timeout
80
+ timeout=timeout or self._client.timeout,
81
+ proxies=proxies or getattr(self._client, "proxies", None)
79
82
  )
80
83
  response.raise_for_status()
81
84
 
@@ -128,7 +131,7 @@ class Completions(BaseCompletions):
128
131
  raise IOError(f"HeckAI request failed: {e}") from e
129
132
 
130
133
  def _create_non_stream(
131
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
134
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
132
135
  ) -> ChatCompletion:
133
136
  try:
134
137
  answer_lines = []
@@ -138,7 +141,8 @@ class Completions(BaseCompletions):
138
141
  headers=self._client.headers,
139
142
  json=payload,
140
143
  stream=True,
141
- timeout=self._client.timeout
144
+ timeout=timeout or self._client.timeout,
145
+ proxies=proxies or getattr(self._client, "proxies", None)
142
146
  )
143
147
  response.raise_for_status()
144
148
  for line in response.iter_lines(decode_unicode=True):
@@ -210,14 +214,13 @@ class HeckAI(OpenAICompatibleProvider):
210
214
  """
211
215
 
212
216
  AVAILABLE_MODELS = [
213
- "google/gemini-2.0-flash-001",
217
+ "google/gemini-2.5-flash-preview",
214
218
  "deepseek/deepseek-chat",
215
219
  "deepseek/deepseek-r1",
216
220
  "openai/gpt-4o-mini",
217
221
  "openai/gpt-4.1-mini",
218
222
  "x-ai/grok-3-mini-beta",
219
223
  "meta-llama/llama-4-scout"
220
-
221
224
  ]
222
225
 
223
226
  def __init__(
@@ -37,6 +37,8 @@ class Completions(BaseCompletions):
37
37
  top_p: Optional[float] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
38
38
  web_search: bool = False, # LLMChatCo specific parameter
39
39
  system_prompt: Optional[str] = "You are a helpful assistant.", # Default system prompt if not provided
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[Dict[str, str]] = None,
40
42
  **kwargs: Any
41
43
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
44
  """
@@ -88,12 +90,12 @@ class Completions(BaseCompletions):
88
90
  created_time = int(time.time())
89
91
 
90
92
  if stream:
91
- return self._create_stream(request_id, created_time, actual_model, payload)
93
+ return self._create_stream(request_id, created_time, actual_model, payload, timeout, proxies)
92
94
  else:
93
- return self._create_non_stream(request_id, created_time, actual_model, payload)
95
+ return self._create_non_stream(request_id, created_time, actual_model, payload, timeout, proxies)
94
96
 
95
97
  def _create_stream(
96
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
98
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
97
99
  ) -> Generator[ChatCompletionChunk, None, None]:
98
100
  try:
99
101
  response = self._client.session.post(
@@ -101,7 +103,8 @@ class Completions(BaseCompletions):
101
103
  headers=self._client.headers,
102
104
  json=payload,
103
105
  stream=True,
104
- timeout=self._client.timeout
106
+ timeout=timeout or self._client.timeout,
107
+ proxies=proxies or getattr(self._client, "proxies", None)
105
108
  )
106
109
 
107
110
  if not response.ok:
@@ -197,14 +200,14 @@ class Completions(BaseCompletions):
197
200
 
198
201
 
199
202
  def _create_non_stream(
200
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
203
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
201
204
  ) -> ChatCompletion:
202
205
  # Non-streaming requires accumulating stream chunks
203
206
  full_response_content = ""
204
207
  finish_reason = "stop" # Assume stop unless error occurs
205
208
 
206
209
  try:
207
- stream_generator = self._create_stream(request_id, created_time, model, payload)
210
+ stream_generator = self._create_stream(request_id, created_time, model, payload, timeout, proxies)
208
211
  for chunk in stream_generator:
209
212
  if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
210
213
  full_response_content += chunk.choices[0].delta.content
@@ -332,4 +335,3 @@ class LLMChatCo(OpenAICompatibleProvider):
332
335
  def list(inner_self):
333
336
  return type(self).AVAILABLE_MODELS
334
337
  return _ModelList()
335
-
@@ -43,6 +43,8 @@ class Completions(BaseCompletions):
43
43
  stream: bool = False,
44
44
  temperature: Optional[float] = None,
45
45
  top_p: Optional[float] = None,
46
+ timeout: Optional[int] = None,
47
+ proxies: Optional[Dict[str, str]] = None,
46
48
  **kwargs: Any
47
49
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
48
50
  """
@@ -75,12 +77,12 @@ class Completions(BaseCompletions):
75
77
  created_time = int(time.time())
76
78
 
77
79
  if stream:
78
- return self._create_stream(request_id, created_time, model, payload)
80
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
79
81
  else:
80
- return self._create_non_stream_from_stream(request_id, created_time, model, payload)
82
+ return self._create_non_stream_from_stream(request_id, created_time, model, payload, timeout, proxies)
81
83
 
82
84
  def _create_stream(
83
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
85
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
84
86
  ) -> Generator[ChatCompletionChunk, None, None]:
85
87
  """Handles the streaming response from MCPCore."""
86
88
  final_usage_data = None # To store usage if received
@@ -90,7 +92,8 @@ class Completions(BaseCompletions):
90
92
  headers=self._client.headers,
91
93
  json=payload,
92
94
  stream=True,
93
- timeout=self._client.timeout,
95
+ timeout=timeout or self._client.timeout,
96
+ proxies=proxies or getattr(self._client, "proxies", None),
94
97
  impersonate="chrome110" # Impersonation often helps
95
98
  )
96
99
 
@@ -193,7 +196,7 @@ class Completions(BaseCompletions):
193
196
  raise IOError(f"MCPCore stream processing failed: {e}{error_details}") from e
194
197
 
195
198
  def _create_non_stream_from_stream(
196
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
199
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
197
200
  ) -> ChatCompletion:
198
201
  """Handles the non-streaming response by making a single POST request (like deepinfra)."""
199
202
  try:
@@ -205,7 +208,8 @@ class Completions(BaseCompletions):
205
208
  self._client.api_endpoint,
206
209
  headers=self._client.headers,
207
210
  json=payload,
208
- timeout=self._client.timeout,
211
+ timeout=timeout or self._client.timeout,
212
+ proxies=proxies or getattr(self._client, "proxies", None),
209
213
  impersonate="chrome110"
210
214
  )
211
215
  if not response.ok:
@@ -386,4 +390,3 @@ class MCPCore(OpenAICompatibleProvider):
386
390
  def list(inner_self):
387
391
  return type(self).AVAILABLE_MODELS
388
392
  return _ModelList()
389
-
@@ -94,6 +94,8 @@ class Completions(BaseCompletions):
94
94
  stream: bool = False,
95
95
  temperature: Optional[float] = None,
96
96
  top_p: Optional[float] = None,
97
+ timeout: Optional[int] = None,
98
+ proxies: Optional[Dict[str, str]] = None,
97
99
  **kwargs: Any
98
100
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
99
101
  """
@@ -132,7 +134,7 @@ class Completions(BaseCompletions):
132
134
  created_time = int(time.time())
133
135
 
134
136
  # Make the API request
135
- response_text = self._client._make_api_request(user_message)
137
+ response_text = self._client._make_api_request(user_message, timeout=timeout, proxies=proxies)
136
138
 
137
139
  # If streaming is requested, simulate streaming with the full response
138
140
  if stream:
@@ -37,6 +37,8 @@ class Completions(BaseCompletions):
37
37
  stream: bool = False,
38
38
  temperature: Optional[float] = None,
39
39
  top_p: Optional[float] = None,
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[Dict[str, str]] = None,
40
42
  **kwargs: Any
41
43
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
44
  """
@@ -61,19 +63,20 @@ class Completions(BaseCompletions):
61
63
  created_time = int(time.time())
62
64
 
63
65
  if stream:
64
- return self._create_stream(request_id, created_time, model, payload)
66
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
65
67
  else:
66
- return self._create_non_stream(request_id, created_time, model, payload)
68
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
67
69
 
68
70
  def _create_stream(
69
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
71
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
70
72
  ) -> Generator[ChatCompletionChunk, None, None]:
71
73
  try:
72
74
  response = self._client.session.post(
73
75
  "https://netwrck.com/api/chatpred_or",
74
76
  json=payload,
75
77
  headers=self._client.headers,
76
- timeout=self._client.timeout,
78
+ timeout=timeout or self._client.timeout,
79
+ proxies=proxies or getattr(self._client, "proxies", None),
77
80
  stream=True
78
81
  )
79
82
  response.raise_for_status()
@@ -127,14 +130,15 @@ class Completions(BaseCompletions):
127
130
  raise IOError(f"Netwrck request failed: {e}") from e
128
131
 
129
132
  def _create_non_stream(
130
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
133
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
131
134
  ) -> ChatCompletion:
132
135
  try:
133
136
  response = self._client.session.post(
134
137
  "https://netwrck.com/api/chatpred_or",
135
138
  json=payload,
136
139
  headers=self._client.headers,
137
- timeout=self._client.timeout
140
+ timeout=timeout or self._client.timeout,
141
+ proxies=proxies or getattr(self._client, "proxies", None)
138
142
  )
139
143
  response.raise_for_status()
140
144
 
@@ -29,6 +29,8 @@ class Completions(BaseCompletions):
29
29
  stream: bool = False,
30
30
  temperature: Optional[float] = None,
31
31
  top_p: Optional[float] = None,
32
+ timeout: Optional[int] = None,
33
+ proxies: Optional[Dict[str, str]] = None,
32
34
  **kwargs: Any
33
35
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
34
36
  """
@@ -52,11 +54,11 @@ class Completions(BaseCompletions):
52
54
  created_time = int(time.time())
53
55
 
54
56
  if stream:
55
- return self._create_stream(request_id, created_time, model, payload)
57
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
56
58
  else:
57
- return self._create_non_stream(request_id, created_time, model, payload)
59
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
58
60
 
59
- def _post_with_retry(self, payload, stream=False):
61
+ def _post_with_retry(self, payload, stream=False, timeout=None, proxies=None):
60
62
  """
61
63
  Try all endpoints until one succeeds or all fail.
62
64
  """
@@ -68,7 +70,8 @@ class Completions(BaseCompletions):
68
70
  headers=self._client.headers,
69
71
  json=payload,
70
72
  stream=stream,
71
- timeout=self._client.timeout
73
+ timeout=timeout or self._client.timeout,
74
+ proxies=proxies or getattr(self._client, "proxies", None)
72
75
  )
73
76
  response.raise_for_status()
74
77
  self._client.base_url = endpoint # Update to working endpoint
@@ -79,10 +82,10 @@ class Completions(BaseCompletions):
79
82
  raise IOError(f"All oivscode endpoints failed: {last_exception}") from last_exception
80
83
 
81
84
  def _create_stream(
82
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
85
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
83
86
  ) -> Generator[ChatCompletionChunk, None, None]:
84
87
  try:
85
- response = self._post_with_retry(payload, stream=True)
88
+ response = self._post_with_retry(payload, stream=True, timeout=timeout, proxies=proxies)
86
89
  prompt_tokens = 0
87
90
  completion_tokens = 0
88
91
  total_tokens = 0
@@ -160,10 +163,10 @@ class Completions(BaseCompletions):
160
163
  raise
161
164
 
162
165
  def _create_non_stream(
163
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
166
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
164
167
  ) -> ChatCompletion:
165
168
  try:
166
- response = self._post_with_retry(payload, stream=False)
169
+ response = self._post_with_retry(payload, stream=False, timeout=timeout, proxies=proxies)
167
170
  data = response.json()
168
171
 
169
172
  choices_data = data.get('choices', [])
@@ -284,4 +287,4 @@ if __name__ == "__main__":
284
287
  max_tokens=50,
285
288
  stream=False
286
289
  )
287
- print(response)
290
+ print(response)
@@ -30,6 +30,8 @@ class Completions(BaseCompletions):
30
30
  stream: bool = False,
31
31
  temperature: Optional[float] = None,
32
32
  top_p: Optional[float] = None,
33
+ timeout: Optional[int] = None,
34
+ proxies: Optional[Dict[str, str]] = None,
33
35
  **kwargs: Any
34
36
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
37
  """
@@ -56,6 +58,8 @@ class Completions(BaseCompletions):
56
58
  max_tokens=max_tokens,
57
59
  temperature=temperature,
58
60
  top_p=top_p,
61
+ timeout=timeout,
62
+ proxies=proxies,
59
63
  **kwargs
60
64
  )
61
65
 
@@ -66,6 +70,8 @@ class Completions(BaseCompletions):
66
70
  max_tokens=max_tokens,
67
71
  temperature=temperature,
68
72
  top_p=top_p,
73
+ timeout=timeout,
74
+ proxies=proxies,
69
75
  **kwargs
70
76
  )
71
77
 
@@ -77,6 +83,8 @@ class Completions(BaseCompletions):
77
83
  max_tokens: Optional[int] = None,
78
84
  temperature: Optional[float] = None,
79
85
  top_p: Optional[float] = None,
86
+ timeout: Optional[int] = None,
87
+ proxies: Optional[Dict[str, str]] = None,
80
88
  **kwargs: Any
81
89
  ) -> Generator[ChatCompletionChunk, None, None]:
82
90
  """Implementation for streaming chat completions."""
@@ -144,7 +152,8 @@ class Completions(BaseCompletions):
144
152
  headers=headers,
145
153
  json=payload,
146
154
  stream=True,
147
- timeout=self._client.timeout
155
+ timeout=timeout or self._client.timeout,
156
+ proxies=proxies or getattr(self._client, "proxies", None)
148
157
  )
149
158
  response.raise_for_status()
150
159
 
@@ -254,6 +263,8 @@ class Completions(BaseCompletions):
254
263
  max_tokens: Optional[int] = None,
255
264
  temperature: Optional[float] = None,
256
265
  top_p: Optional[float] = None,
266
+ timeout: Optional[int] = None,
267
+ proxies: Optional[Dict[str, str]] = None,
257
268
  **kwargs: Any
258
269
  ) -> ChatCompletion:
259
270
  """Implementation for non-streaming chat completions."""
@@ -322,7 +333,8 @@ class Completions(BaseCompletions):
322
333
  headers=headers,
323
334
  json=payload,
324
335
  stream=True,
325
- timeout=self._client.timeout
336
+ timeout=timeout or self._client.timeout,
337
+ proxies=proxies or getattr(self._client, "proxies", None)
326
338
  )
327
339
  response.raise_for_status()
328
340
 
@@ -493,4 +505,3 @@ class OPKFC(OpenAICompatibleProvider):
493
505
  def list(inner_self):
494
506
  return type(self).AVAILABLE_MODELS
495
507
  return _ModelList()
496
-
@@ -45,6 +45,8 @@ class Completions(BaseCompletions):
45
45
  stream: bool = False,
46
46
  temperature: Optional[float] = None,
47
47
  top_p: Optional[float] = None,
48
+ timeout: Optional[int] = None,
49
+ proxies: Optional[Dict[str, str]] = None,
48
50
  **kwargs: Any
49
51
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
50
52
  """
@@ -75,19 +77,20 @@ class Completions(BaseCompletions):
75
77
  created_time = int(time.time())
76
78
 
77
79
  if stream:
78
- return self._create_stream(request_id, created_time, model, payload)
80
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
79
81
  else:
80
- return self._create_non_stream(request_id, created_time, model, payload)
82
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
81
83
 
82
84
  def _create_stream(
83
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
85
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
84
86
  ) -> Generator[ChatCompletionChunk, None, None]:
85
87
  try:
86
88
  response = self._client.session.post(
87
89
  self._client.api_endpoint,
88
90
  json=payload,
89
91
  stream=True,
90
- timeout=self._client.timeout
92
+ timeout=timeout or self._client.timeout,
93
+ proxies=proxies or getattr(self._client, "proxies", None)
91
94
  )
92
95
 
93
96
  # Handle non-200 responses
@@ -100,7 +103,8 @@ class Completions(BaseCompletions):
100
103
  self._client.api_endpoint,
101
104
  json=payload,
102
105
  stream=True,
103
- timeout=self._client.timeout
106
+ timeout=timeout or self._client.timeout,
107
+ proxies=proxies or getattr(self._client, "proxies", None)
104
108
  )
105
109
  if not response.ok:
106
110
  raise IOError(
@@ -225,13 +229,14 @@ class Completions(BaseCompletions):
225
229
  raise IOError(f"SciraChat request failed: {e}") from e
226
230
 
227
231
  def _create_non_stream(
228
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
232
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
229
233
  ) -> ChatCompletion:
230
234
  try:
231
235
  response = self._client.session.post(
232
236
  self._client.api_endpoint,
233
237
  json=payload,
234
- timeout=self._client.timeout
238
+ timeout=timeout or self._client.timeout,
239
+ proxies=proxies or getattr(self._client, "proxies", None)
235
240
  )
236
241
 
237
242
  # Handle non-200 responses
@@ -243,7 +248,8 @@ class Completions(BaseCompletions):
243
248
  response = self._client.session.post(
244
249
  self._client.api_endpoint,
245
250
  json=payload,
246
- timeout=self._client.timeout
251
+ timeout=timeout or self._client.timeout,
252
+ proxies=proxies or getattr(self._client, "proxies", None)
247
253
  )
248
254
  if not response.ok:
249
255
  raise IOError(
@@ -35,6 +35,8 @@ class Completions(BaseCompletions):
35
35
  stream: bool = False,
36
36
  temperature: Optional[float] = None, # Not used by SonusAI but kept for compatibility
37
37
  top_p: Optional[float] = None, # Not used by SonusAI but kept for compatibility
38
+ timeout: Optional[int] = None,
39
+ proxies: Optional[Dict[str, str]] = None,
38
40
  **kwargs: Any # Not used by SonusAI but kept for compatibility
39
41
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
40
42
  """
@@ -61,12 +63,12 @@ class Completions(BaseCompletions):
61
63
  created_time = int(time.time())
62
64
 
63
65
  if stream:
64
- return self._create_stream(request_id, created_time, model, files)
66
+ return self._create_stream(request_id, created_time, model, files, timeout, proxies)
65
67
  else:
66
- return self._create_non_stream(request_id, created_time, model, files)
68
+ return self._create_non_stream(request_id, created_time, model, files, timeout, proxies)
67
69
 
68
70
  def _create_stream(
69
- self, request_id: str, created_time: int, model: str, files: Dict[str, Any]
71
+ self, request_id: str, created_time: int, model: str, files: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
70
72
  ) -> Generator[ChatCompletionChunk, None, None]:
71
73
  try:
72
74
  response = requests.post(
@@ -74,7 +76,8 @@ class Completions(BaseCompletions):
74
76
  files=files,
75
77
  headers=self._client.headers,
76
78
  stream=True,
77
- timeout=self._client.timeout
79
+ timeout=timeout or self._client.timeout,
80
+ proxies=proxies or getattr(self._client, "proxies", None)
78
81
  )
79
82
  response.raise_for_status()
80
83
 
@@ -131,14 +134,15 @@ class Completions(BaseCompletions):
131
134
  raise IOError(f"SonusAI request failed: {e}") from e
132
135
 
133
136
  def _create_non_stream(
134
- self, request_id: str, created_time: int, model: str, files: Dict[str, Any]
137
+ self, request_id: str, created_time: int, model: str, files: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
135
138
  ) -> ChatCompletion:
136
139
  try:
137
140
  response = requests.post(
138
141
  self._client.url,
139
142
  files=files,
140
143
  headers=self._client.headers,
141
- timeout=self._client.timeout
144
+ timeout=timeout or self._client.timeout,
145
+ proxies=proxies or getattr(self._client, "proxies", None)
142
146
  )
143
147
  response.raise_for_status()
144
148
 
@@ -48,6 +48,8 @@ class Completions(BaseCompletions):
48
48
  stream: bool = False,
49
49
  temperature: Optional[float] = None,
50
50
  top_p: Optional[float] = None,
51
+ timeout: Optional[int] = None,
52
+ proxies: Optional[Dict[str, str]] = None,
51
53
  **kwargs: Any
52
54
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
53
55
  """Create a chat completion."""
@@ -85,16 +87,18 @@ class Completions(BaseCompletions):
85
87
 
86
88
  # Handle streaming vs non-streaming
87
89
  if stream:
88
- return self._stream_request(request_id, created_time, model, payload)
90
+ return self._stream_request(request_id, created_time, model, payload, timeout, proxies)
89
91
  else:
90
- return self._non_stream_request(request_id, created_time, model, payload)
92
+ return self._non_stream_request(request_id, created_time, model, payload, timeout, proxies)
91
93
 
92
94
  def _non_stream_request(
93
95
  self,
94
96
  request_id: str,
95
97
  created_time: int,
96
98
  model: str,
97
- payload: Dict[str, Any]
99
+ payload: Dict[str, Any],
100
+ timeout: Optional[int] = None,
101
+ proxies: Optional[Dict[str, str]] = None
98
102
  ) -> ChatCompletion:
99
103
  """Handle non-streaming request."""
100
104
  try:
@@ -103,7 +107,8 @@ class Completions(BaseCompletions):
103
107
  self._client.api_endpoint,
104
108
  cookies=self._client.cookies,
105
109
  json=payload,
106
- timeout=self._client.timeout
110
+ timeout=timeout or self._client.timeout,
111
+ proxies=proxies or getattr(self._client, "proxies", None)
107
112
  )
108
113
 
109
114
  # Check for errors
@@ -121,7 +126,8 @@ class Completions(BaseCompletions):
121
126
  self._client.api_endpoint,
122
127
  cookies=self._client.cookies,
123
128
  json=payload,
124
- timeout=self._client.timeout
129
+ timeout=timeout or self._client.timeout,
130
+ proxies=proxies or getattr(self._client, "proxies", None)
125
131
  )
126
132
  if not response.ok:
127
133
  raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
@@ -189,7 +195,9 @@ class Completions(BaseCompletions):
189
195
  request_id: str,
190
196
  created_time: int,
191
197
  model: str,
192
- payload: Dict[str, Any]
198
+ payload: Dict[str, Any],
199
+ timeout: Optional[int] = None,
200
+ proxies: Optional[Dict[str, str]] = None
193
201
  ) -> Generator[ChatCompletionChunk, None, None]:
194
202
  """Handle streaming request."""
195
203
  try:
@@ -199,7 +207,8 @@ class Completions(BaseCompletions):
199
207
  cookies=self._client.cookies,
200
208
  json=payload,
201
209
  stream=True,
202
- timeout=self._client.timeout
210
+ timeout=timeout or self._client.timeout,
211
+ proxies=proxies or getattr(self._client, "proxies", None)
203
212
  )
204
213
 
205
214
  # Check for errors
@@ -218,7 +227,8 @@ class Completions(BaseCompletions):
218
227
  cookies=self._client.cookies,
219
228
  json=payload,
220
229
  stream=True,
221
- timeout=self._client.timeout
230
+ timeout=timeout or self._client.timeout,
231
+ proxies=proxies or getattr(self._client, "proxies", None)
222
232
  )
223
233
  if not response.ok:
224
234
  raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
@@ -430,4 +440,3 @@ class StandardInput(OpenAICompatibleProvider):
430
440
  def list(inner_self):
431
441
  return type(self).AVAILABLE_MODELS
432
442
  return _ModelList()
433
-
@@ -34,6 +34,8 @@ class Completions(BaseCompletions):
34
34
  top_p: Optional[float] = None,
35
35
  tools: Optional[List[Dict[str, Any]]] = None,
36
36
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
37
+ timeout: Optional[int] = None,
38
+ proxies: Optional[Dict[str, str]] = None,
37
39
  **kwargs: Any
38
40
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
41
  """
@@ -62,16 +64,18 @@ class Completions(BaseCompletions):
62
64
  created_time = int(time.time())
63
65
 
64
66
  if stream:
65
- return self._create_streaming(request_id, created_time, model, payload)
67
+ return self._create_streaming(request_id, created_time, model, payload, timeout, proxies)
66
68
  else:
67
- return self._create_non_streaming(request_id, created_time, model, payload)
69
+ return self._create_non_streaming(request_id, created_time, model, payload, timeout, proxies)
68
70
 
69
71
  def _create_streaming(
70
72
  self,
71
73
  request_id: str,
72
74
  created_time: int,
73
75
  model: str,
74
- payload: Dict[str, Any]
76
+ payload: Dict[str, Any],
77
+ timeout: Optional[int] = None,
78
+ proxies: Optional[Dict[str, str]] = None
75
79
  ) -> Generator[ChatCompletionChunk, None, None]:
76
80
  """Implementation for streaming chat completions."""
77
81
  try:
@@ -82,7 +86,8 @@ class Completions(BaseCompletions):
82
86
  headers=self._client.headers,
83
87
  json=payload,
84
88
  stream=True,
85
- timeout=self._client.timeout
89
+ timeout=timeout or self._client.timeout,
90
+ proxies=proxies or getattr(self._client, "proxies", None)
86
91
  )
87
92
 
88
93
  if not response.ok:
@@ -163,7 +168,9 @@ class Completions(BaseCompletions):
163
168
  request_id: str,
164
169
  created_time: int,
165
170
  model: str,
166
- payload: Dict[str, Any]
171
+ payload: Dict[str, Any],
172
+ timeout: Optional[int] = None,
173
+ proxies: Optional[Dict[str, str]] = None
167
174
  ) -> ChatCompletion:
168
175
  """Implementation for non-streaming chat completions."""
169
176
  try:
@@ -173,7 +180,8 @@ class Completions(BaseCompletions):
173
180
  self._client.api_endpoint,
174
181
  headers=self._client.headers,
175
182
  json=payload,
176
- timeout=self._client.timeout
183
+ timeout=timeout or self._client.timeout,
184
+ proxies=proxies or getattr(self._client, "proxies", None)
177
185
  )
178
186
 
179
187
  if not response.ok:
@@ -336,4 +344,3 @@ class TextPollinations(OpenAICompatibleProvider):
336
344
  def list(inner_self):
337
345
  return type(self).AVAILABLE_MODELS
338
346
  return _ModelList()
339
-