webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (100) hide show
  1. webscout/AIauto.py +6 -6
  2. webscout/AIbase.py +61 -1
  3. webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
  4. webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
  5. webscout/Extra/YTToolkit/ytapi/video.py +10 -10
  6. webscout/Extra/autocoder/autocoder_utiles.py +1 -1
  7. webscout/Litlogger/formats.py +9 -0
  8. webscout/Litlogger/handlers.py +18 -0
  9. webscout/Litlogger/logger.py +43 -1
  10. webscout/Provider/AISEARCH/scira_search.py +3 -2
  11. webscout/Provider/Blackboxai.py +2 -0
  12. webscout/Provider/ChatSandbox.py +2 -1
  13. webscout/Provider/Deepinfra.py +1 -1
  14. webscout/Provider/HeckAI.py +1 -1
  15. webscout/Provider/LambdaChat.py +8 -1
  16. webscout/Provider/MCPCore.py +7 -3
  17. webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
  18. webscout/Provider/OPENAI/Cloudflare.py +31 -14
  19. webscout/Provider/OPENAI/FalconH1.py +457 -0
  20. webscout/Provider/OPENAI/FreeGemini.py +29 -13
  21. webscout/Provider/OPENAI/NEMOTRON.py +26 -14
  22. webscout/Provider/OPENAI/PI.py +427 -0
  23. webscout/Provider/OPENAI/Qwen3.py +161 -140
  24. webscout/Provider/OPENAI/README.md +3 -0
  25. webscout/Provider/OPENAI/TogetherAI.py +355 -0
  26. webscout/Provider/OPENAI/TwoAI.py +29 -12
  27. webscout/Provider/OPENAI/__init__.py +4 -1
  28. webscout/Provider/OPENAI/ai4chat.py +33 -23
  29. webscout/Provider/OPENAI/api.py +375 -24
  30. webscout/Provider/OPENAI/autoproxy.py +39 -0
  31. webscout/Provider/OPENAI/base.py +91 -12
  32. webscout/Provider/OPENAI/c4ai.py +31 -10
  33. webscout/Provider/OPENAI/chatgpt.py +56 -24
  34. webscout/Provider/OPENAI/chatgptclone.py +46 -16
  35. webscout/Provider/OPENAI/chatsandbox.py +7 -3
  36. webscout/Provider/OPENAI/copilot.py +26 -10
  37. webscout/Provider/OPENAI/deepinfra.py +29 -12
  38. webscout/Provider/OPENAI/e2b.py +358 -158
  39. webscout/Provider/OPENAI/exaai.py +13 -10
  40. webscout/Provider/OPENAI/exachat.py +10 -6
  41. webscout/Provider/OPENAI/flowith.py +7 -3
  42. webscout/Provider/OPENAI/freeaichat.py +10 -6
  43. webscout/Provider/OPENAI/glider.py +10 -6
  44. webscout/Provider/OPENAI/heckai.py +11 -8
  45. webscout/Provider/OPENAI/llmchatco.py +9 -7
  46. webscout/Provider/OPENAI/mcpcore.py +10 -7
  47. webscout/Provider/OPENAI/multichat.py +3 -1
  48. webscout/Provider/OPENAI/netwrck.py +10 -6
  49. webscout/Provider/OPENAI/oivscode.py +12 -9
  50. webscout/Provider/OPENAI/opkfc.py +31 -8
  51. webscout/Provider/OPENAI/scirachat.py +17 -10
  52. webscout/Provider/OPENAI/sonus.py +10 -6
  53. webscout/Provider/OPENAI/standardinput.py +18 -9
  54. webscout/Provider/OPENAI/textpollinations.py +14 -7
  55. webscout/Provider/OPENAI/toolbaz.py +16 -11
  56. webscout/Provider/OPENAI/typefully.py +14 -7
  57. webscout/Provider/OPENAI/typegpt.py +10 -6
  58. webscout/Provider/OPENAI/uncovrAI.py +22 -8
  59. webscout/Provider/OPENAI/venice.py +10 -6
  60. webscout/Provider/OPENAI/writecream.py +13 -10
  61. webscout/Provider/OPENAI/x0gpt.py +11 -9
  62. webscout/Provider/OPENAI/yep.py +12 -10
  63. webscout/Provider/PI.py +2 -1
  64. webscout/Provider/STT/__init__.py +3 -0
  65. webscout/Provider/STT/base.py +281 -0
  66. webscout/Provider/STT/elevenlabs.py +265 -0
  67. webscout/Provider/TTI/__init__.py +3 -1
  68. webscout/Provider/TTI/aiarta.py +399 -365
  69. webscout/Provider/TTI/base.py +74 -2
  70. webscout/Provider/TTI/fastflux.py +63 -30
  71. webscout/Provider/TTI/gpt1image.py +149 -0
  72. webscout/Provider/TTI/imagen.py +196 -0
  73. webscout/Provider/TTI/magicstudio.py +60 -29
  74. webscout/Provider/TTI/piclumen.py +43 -32
  75. webscout/Provider/TTI/pixelmuse.py +232 -225
  76. webscout/Provider/TTI/pollinations.py +43 -32
  77. webscout/Provider/TTI/together.py +287 -0
  78. webscout/Provider/TTI/utils.py +2 -1
  79. webscout/Provider/TTS/README.md +1 -0
  80. webscout/Provider/TTS/__init__.py +2 -1
  81. webscout/Provider/TTS/freetts.py +140 -0
  82. webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
  83. webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
  84. webscout/Provider/__init__.py +3 -2
  85. webscout/Provider/granite.py +41 -6
  86. webscout/Provider/oivscode.py +37 -37
  87. webscout/Provider/scira_chat.py +3 -2
  88. webscout/Provider/scnet.py +1 -0
  89. webscout/Provider/toolbaz.py +0 -1
  90. webscout/litagent/Readme.md +12 -3
  91. webscout/litagent/agent.py +99 -62
  92. webscout/version.py +1 -1
  93. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
  94. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
  95. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
  96. webscout/Provider/ChatGPTGratis.py +0 -194
  97. webscout/Provider/TTI/artbit.py +0 -0
  98. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
  99. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
  100. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
@@ -38,6 +38,8 @@ class Completions(BaseCompletions):
38
38
  stream: bool = False,
39
39
  temperature: Optional[float] = None,
40
40
  top_p: Optional[float] = None,
41
+ timeout: Optional[int] = None,
42
+ proxies: Optional[Dict[str, str]] = None,
41
43
  **kwargs: Any
42
44
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
43
45
  """
@@ -90,12 +92,12 @@ class Completions(BaseCompletions):
90
92
  created_time = int(time.time())
91
93
 
92
94
  if stream:
93
- return self._create_stream(request_id, created_time, model, payload)
95
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
94
96
  else:
95
- return self._create_non_stream(request_id, created_time, model, payload)
97
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
96
98
 
97
99
  def _create_stream(
98
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
100
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
99
101
  ) -> Generator[ChatCompletionChunk, None, None]:
100
102
  try:
101
103
  response = self._client.session.post(
@@ -103,7 +105,8 @@ class Completions(BaseCompletions):
103
105
  headers=self._client.headers,
104
106
  json=payload,
105
107
  stream=True,
106
- timeout=self._client.timeout
108
+ timeout=timeout or self._client.timeout,
109
+ proxies=proxies or getattr(self._client, "proxies", None)
107
110
  )
108
111
 
109
112
  # Handle non-200 responses
@@ -217,7 +220,7 @@ class Completions(BaseCompletions):
217
220
  raise IOError(f"ExaAI request failed: {e}") from e
218
221
 
219
222
  def _create_non_stream(
220
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
223
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
221
224
  ) -> ChatCompletion:
222
225
  try:
223
226
  # For non-streaming, we still use streaming internally to collect the full response
@@ -226,7 +229,8 @@ class Completions(BaseCompletions):
226
229
  headers=self._client.headers,
227
230
  json=payload,
228
231
  stream=True,
229
- timeout=self._client.timeout
232
+ timeout=timeout or self._client.timeout,
233
+ proxies=proxies or getattr(self._client, "proxies", None)
230
234
  )
231
235
 
232
236
  # Handle non-200 responses
@@ -313,17 +317,16 @@ class ExaAI(OpenAICompatibleProvider):
313
317
 
314
318
  def __init__(
315
319
  self,
316
- timeout: Optional[int] = None,
317
320
  browser: str = "chrome"
318
321
  ):
319
322
  """
320
323
  Initialize the ExaAI client.
321
324
 
322
325
  Args:
323
- timeout: Request timeout in seconds (None for no timeout)
324
326
  browser: Browser to emulate in user agent
325
327
  """
326
- self.timeout = timeout
328
+ self.timeout = 60 # Default timeout in seconds
329
+ self.proxies = None # Default proxies
327
330
  self.api_endpoint = "https://o3minichat.exa.ai/api/chat"
328
331
  self.session = requests.Session()
329
332
 
@@ -414,4 +417,4 @@ class ExaAI(OpenAICompatibleProvider):
414
417
  class _ModelList:
415
418
  def list(inner_self):
416
419
  return type(self).AVAILABLE_MODELS
417
- return _ModelList()
420
+ return _ModelList()
@@ -100,6 +100,8 @@ class Completions(BaseCompletions):
100
100
  stream: bool = False,
101
101
  temperature: Optional[float] = None,
102
102
  top_p: Optional[float] = None,
103
+ timeout: Optional[int] = None,
104
+ proxies: Optional[Dict[str, str]] = None,
103
105
  **kwargs: Any
104
106
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
105
107
  """
@@ -136,12 +138,12 @@ class Completions(BaseCompletions):
136
138
  created_time = int(time.time())
137
139
 
138
140
  if stream:
139
- return self._create_stream(request_id, created_time, model, provider, payload)
141
+ return self._create_stream(request_id, created_time, model, provider, payload, timeout, proxies)
140
142
  else:
141
- return self._create_non_stream(request_id, created_time, model, provider, payload)
143
+ return self._create_non_stream(request_id, created_time, model, provider, payload, timeout, proxies)
142
144
 
143
145
  def _create_stream(
144
- self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
146
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
145
147
  ) -> Generator[ChatCompletionChunk, None, None]:
146
148
  try:
147
149
  endpoint = self._client._get_endpoint(provider)
@@ -150,7 +152,8 @@ class Completions(BaseCompletions):
150
152
  headers=self._client.headers,
151
153
  json=payload,
152
154
  stream=True,
153
- timeout=self._client.timeout
155
+ timeout=timeout or self._client.timeout,
156
+ proxies=proxies or getattr(self._client, "proxies", None)
154
157
  )
155
158
  response.raise_for_status()
156
159
 
@@ -203,7 +206,7 @@ class Completions(BaseCompletions):
203
206
  raise IOError(f"ExaChat request failed: {e}") from e
204
207
 
205
208
  def _create_non_stream(
206
- self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any]
209
+ self, request_id: str, created_time: int, model: str, provider: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
207
210
  ) -> ChatCompletion:
208
211
  try:
209
212
  endpoint = self._client._get_endpoint(provider)
@@ -211,7 +214,8 @@ class Completions(BaseCompletions):
211
214
  endpoint,
212
215
  headers=self._client.headers,
213
216
  json=payload,
214
- timeout=self._client.timeout
217
+ timeout=timeout or self._client.timeout,
218
+ proxies=proxies or getattr(self._client, "proxies", None)
215
219
  )
216
220
  response.raise_for_status()
217
221
 
@@ -37,6 +37,8 @@ class Completions(BaseCompletions):
37
37
  stream: bool = False,
38
38
  temperature: Optional[float] = None,
39
39
  top_p: Optional[float] = None,
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[Dict[str, str]] = None,
40
42
  **kwargs: Any
41
43
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
44
  """
@@ -76,7 +78,8 @@ class Completions(BaseCompletions):
76
78
  url,
77
79
  json=payload,
78
80
  stream=True,
79
- timeout=30
81
+ timeout=timeout or 30,
82
+ proxies=proxies
80
83
  )
81
84
  print(f"[DEBUG] Response status: {response.status_code}")
82
85
  response.raise_for_status()
@@ -105,7 +108,8 @@ class Completions(BaseCompletions):
105
108
  response = session.post(
106
109
  url,
107
110
  json=payload,
108
- timeout=30
111
+ timeout=timeout or 30,
112
+ proxies=proxies
109
113
  )
110
114
  print(f"[DEBUG] Response status: {response.status_code}")
111
115
  response.raise_for_status()
@@ -159,4 +163,4 @@ class Flowith(OpenAICompatibleProvider):
159
163
  class _ModelList:
160
164
  def list(inner_self):
161
165
  return type(self).AVAILABLE_MODELS
162
- return _ModelList()
166
+ return _ModelList()
@@ -32,6 +32,8 @@ class Completions(BaseCompletions):
32
32
  stream: bool = False,
33
33
  temperature: Optional[float] = None,
34
34
  top_p: Optional[float] = None,
35
+ timeout: Optional[int] = None,
36
+ proxies: Optional[Dict[str, str]] = None,
35
37
  **kwargs: Any
36
38
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
39
  """
@@ -55,12 +57,12 @@ class Completions(BaseCompletions):
55
57
  created_time = int(time.time())
56
58
 
57
59
  if stream:
58
- return self._create_stream(request_id, created_time, model, payload)
60
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
59
61
  else:
60
- return self._create_non_stream(request_id, created_time, model, payload)
62
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
61
63
 
62
64
  def _create_stream(
63
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
65
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
64
66
  ) -> Generator[ChatCompletionChunk, None, None]:
65
67
  try:
66
68
  response = self._client.session.post(
@@ -68,7 +70,8 @@ class Completions(BaseCompletions):
68
70
  headers=self._client.headers,
69
71
  json=payload,
70
72
  stream=True,
71
- timeout=self._client.timeout
73
+ timeout=timeout or self._client.timeout,
74
+ proxies=proxies or getattr(self._client, "proxies", None)
72
75
  )
73
76
 
74
77
  # Handle non-200 responses
@@ -169,14 +172,15 @@ class Completions(BaseCompletions):
169
172
  raise IOError(f"FreeAIChat request failed: {e}") from e
170
173
 
171
174
  def _create_non_stream(
172
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
175
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
173
176
  ) -> ChatCompletion:
174
177
  try:
175
178
  response = self._client.session.post(
176
179
  self._client.api_endpoint,
177
180
  headers=self._client.headers,
178
181
  json=payload,
179
- timeout=self._client.timeout
182
+ timeout=timeout or self._client.timeout,
183
+ proxies=proxies or getattr(self._client, "proxies", None)
180
184
  )
181
185
 
182
186
  # Handle non-200 responses
@@ -44,6 +44,8 @@ class Completions(BaseCompletions):
44
44
  stream: bool = False,
45
45
  temperature: Optional[float] = None,
46
46
  top_p: Optional[float] = None,
47
+ timeout: Optional[int] = None,
48
+ proxies: Optional[Dict[str, str]] = None,
47
49
  **kwargs: Any
48
50
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
49
51
  """
@@ -73,12 +75,12 @@ class Completions(BaseCompletions):
73
75
  created_time = int(time.time())
74
76
 
75
77
  if stream:
76
- return self._create_stream(request_id, created_time, model, payload)
78
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
77
79
  else:
78
- return self._create_non_stream(request_id, created_time, model, payload)
80
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
79
81
 
80
82
  def _create_stream(
81
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
83
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
82
84
  ) -> Generator[ChatCompletionChunk, None, None]:
83
85
  try:
84
86
  response = self._client.session.post(
@@ -86,7 +88,8 @@ class Completions(BaseCompletions):
86
88
  headers=self._client.headers,
87
89
  json=payload,
88
90
  stream=True,
89
- timeout=self._client.timeout
91
+ timeout=timeout or self._client.timeout,
92
+ proxies=proxies or getattr(self._client, "proxies", None)
90
93
  )
91
94
  response.raise_for_status()
92
95
 
@@ -186,14 +189,15 @@ class Completions(BaseCompletions):
186
189
  raise
187
190
 
188
191
  def _create_non_stream(
189
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
192
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
190
193
  ) -> ChatCompletion:
191
194
  try:
192
195
  response = self._client.session.post(
193
196
  self._client.api_endpoint,
194
197
  headers=self._client.headers,
195
198
  json=payload,
196
- timeout=self._client.timeout
199
+ timeout=timeout or self._client.timeout,
200
+ proxies=proxies or getattr(self._client, "proxies", None)
197
201
  )
198
202
  response.raise_for_status()
199
203
  data = response.json()
@@ -34,6 +34,8 @@ class Completions(BaseCompletions):
34
34
  stream: bool = False,
35
35
  temperature: Optional[float] = None, # Not used by HeckAI but kept for compatibility
36
36
  top_p: Optional[float] = None, # Not used by HeckAI but kept for compatibility
37
+ timeout: Optional[int] = None,
38
+ proxies: Optional[Dict[str, str]] = None,
37
39
  **kwargs: Any # Not used by HeckAI but kept for compatibility
38
40
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
41
  """
@@ -62,12 +64,12 @@ class Completions(BaseCompletions):
62
64
  created_time = int(time.time())
63
65
 
64
66
  if stream:
65
- return self._create_stream(request_id, created_time, model, payload)
67
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
66
68
  else:
67
- return self._create_non_stream(request_id, created_time, model, payload)
69
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
68
70
 
69
71
  def _create_stream(
70
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
72
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
71
73
  ) -> Generator[ChatCompletionChunk, None, None]:
72
74
  try:
73
75
  response = self._client.session.post(
@@ -75,7 +77,8 @@ class Completions(BaseCompletions):
75
77
  headers=self._client.headers,
76
78
  json=payload,
77
79
  stream=True,
78
- timeout=self._client.timeout
80
+ timeout=timeout or self._client.timeout,
81
+ proxies=proxies or getattr(self._client, "proxies", None)
79
82
  )
80
83
  response.raise_for_status()
81
84
 
@@ -128,7 +131,7 @@ class Completions(BaseCompletions):
128
131
  raise IOError(f"HeckAI request failed: {e}") from e
129
132
 
130
133
  def _create_non_stream(
131
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
134
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
132
135
  ) -> ChatCompletion:
133
136
  try:
134
137
  answer_lines = []
@@ -138,7 +141,8 @@ class Completions(BaseCompletions):
138
141
  headers=self._client.headers,
139
142
  json=payload,
140
143
  stream=True,
141
- timeout=self._client.timeout
144
+ timeout=timeout or self._client.timeout,
145
+ proxies=proxies or getattr(self._client, "proxies", None)
142
146
  )
143
147
  response.raise_for_status()
144
148
  for line in response.iter_lines(decode_unicode=True):
@@ -210,14 +214,13 @@ class HeckAI(OpenAICompatibleProvider):
210
214
  """
211
215
 
212
216
  AVAILABLE_MODELS = [
213
- "google/gemini-2.0-flash-001",
217
+ "google/gemini-2.5-flash-preview",
214
218
  "deepseek/deepseek-chat",
215
219
  "deepseek/deepseek-r1",
216
220
  "openai/gpt-4o-mini",
217
221
  "openai/gpt-4.1-mini",
218
222
  "x-ai/grok-3-mini-beta",
219
223
  "meta-llama/llama-4-scout"
220
-
221
224
  ]
222
225
 
223
226
  def __init__(
@@ -37,6 +37,8 @@ class Completions(BaseCompletions):
37
37
  top_p: Optional[float] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
38
38
  web_search: bool = False, # LLMChatCo specific parameter
39
39
  system_prompt: Optional[str] = "You are a helpful assistant.", # Default system prompt if not provided
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[Dict[str, str]] = None,
40
42
  **kwargs: Any
41
43
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
44
  """
@@ -88,12 +90,12 @@ class Completions(BaseCompletions):
88
90
  created_time = int(time.time())
89
91
 
90
92
  if stream:
91
- return self._create_stream(request_id, created_time, actual_model, payload)
93
+ return self._create_stream(request_id, created_time, actual_model, payload, timeout, proxies)
92
94
  else:
93
- return self._create_non_stream(request_id, created_time, actual_model, payload)
95
+ return self._create_non_stream(request_id, created_time, actual_model, payload, timeout, proxies)
94
96
 
95
97
  def _create_stream(
96
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
98
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
97
99
  ) -> Generator[ChatCompletionChunk, None, None]:
98
100
  try:
99
101
  response = self._client.session.post(
@@ -101,7 +103,8 @@ class Completions(BaseCompletions):
101
103
  headers=self._client.headers,
102
104
  json=payload,
103
105
  stream=True,
104
- timeout=self._client.timeout
106
+ timeout=timeout or self._client.timeout,
107
+ proxies=proxies or getattr(self._client, "proxies", None)
105
108
  )
106
109
 
107
110
  if not response.ok:
@@ -197,14 +200,14 @@ class Completions(BaseCompletions):
197
200
 
198
201
 
199
202
  def _create_non_stream(
200
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
203
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
201
204
  ) -> ChatCompletion:
202
205
  # Non-streaming requires accumulating stream chunks
203
206
  full_response_content = ""
204
207
  finish_reason = "stop" # Assume stop unless error occurs
205
208
 
206
209
  try:
207
- stream_generator = self._create_stream(request_id, created_time, model, payload)
210
+ stream_generator = self._create_stream(request_id, created_time, model, payload, timeout, proxies)
208
211
  for chunk in stream_generator:
209
212
  if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
210
213
  full_response_content += chunk.choices[0].delta.content
@@ -332,4 +335,3 @@ class LLMChatCo(OpenAICompatibleProvider):
332
335
  def list(inner_self):
333
336
  return type(self).AVAILABLE_MODELS
334
337
  return _ModelList()
335
-
@@ -43,6 +43,8 @@ class Completions(BaseCompletions):
43
43
  stream: bool = False,
44
44
  temperature: Optional[float] = None,
45
45
  top_p: Optional[float] = None,
46
+ timeout: Optional[int] = None,
47
+ proxies: Optional[Dict[str, str]] = None,
46
48
  **kwargs: Any
47
49
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
48
50
  """
@@ -75,12 +77,12 @@ class Completions(BaseCompletions):
75
77
  created_time = int(time.time())
76
78
 
77
79
  if stream:
78
- return self._create_stream(request_id, created_time, model, payload)
80
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
79
81
  else:
80
- return self._create_non_stream_from_stream(request_id, created_time, model, payload)
82
+ return self._create_non_stream_from_stream(request_id, created_time, model, payload, timeout, proxies)
81
83
 
82
84
  def _create_stream(
83
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
85
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
84
86
  ) -> Generator[ChatCompletionChunk, None, None]:
85
87
  """Handles the streaming response from MCPCore."""
86
88
  final_usage_data = None # To store usage if received
@@ -90,7 +92,8 @@ class Completions(BaseCompletions):
90
92
  headers=self._client.headers,
91
93
  json=payload,
92
94
  stream=True,
93
- timeout=self._client.timeout,
95
+ timeout=timeout or self._client.timeout,
96
+ proxies=proxies or getattr(self._client, "proxies", None),
94
97
  impersonate="chrome110" # Impersonation often helps
95
98
  )
96
99
 
@@ -193,7 +196,7 @@ class Completions(BaseCompletions):
193
196
  raise IOError(f"MCPCore stream processing failed: {e}{error_details}") from e
194
197
 
195
198
  def _create_non_stream_from_stream(
196
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
199
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
197
200
  ) -> ChatCompletion:
198
201
  """Handles the non-streaming response by making a single POST request (like deepinfra)."""
199
202
  try:
@@ -205,7 +208,8 @@ class Completions(BaseCompletions):
205
208
  self._client.api_endpoint,
206
209
  headers=self._client.headers,
207
210
  json=payload,
208
- timeout=self._client.timeout,
211
+ timeout=timeout or self._client.timeout,
212
+ proxies=proxies or getattr(self._client, "proxies", None),
209
213
  impersonate="chrome110"
210
214
  )
211
215
  if not response.ok:
@@ -386,4 +390,3 @@ class MCPCore(OpenAICompatibleProvider):
386
390
  def list(inner_self):
387
391
  return type(self).AVAILABLE_MODELS
388
392
  return _ModelList()
389
-
@@ -94,6 +94,8 @@ class Completions(BaseCompletions):
94
94
  stream: bool = False,
95
95
  temperature: Optional[float] = None,
96
96
  top_p: Optional[float] = None,
97
+ timeout: Optional[int] = None,
98
+ proxies: Optional[Dict[str, str]] = None,
97
99
  **kwargs: Any
98
100
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
99
101
  """
@@ -132,7 +134,7 @@ class Completions(BaseCompletions):
132
134
  created_time = int(time.time())
133
135
 
134
136
  # Make the API request
135
- response_text = self._client._make_api_request(user_message)
137
+ response_text = self._client._make_api_request(user_message, timeout=timeout, proxies=proxies)
136
138
 
137
139
  # If streaming is requested, simulate streaming with the full response
138
140
  if stream:
@@ -37,6 +37,8 @@ class Completions(BaseCompletions):
37
37
  stream: bool = False,
38
38
  temperature: Optional[float] = None,
39
39
  top_p: Optional[float] = None,
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[Dict[str, str]] = None,
40
42
  **kwargs: Any
41
43
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
44
  """
@@ -61,19 +63,20 @@ class Completions(BaseCompletions):
61
63
  created_time = int(time.time())
62
64
 
63
65
  if stream:
64
- return self._create_stream(request_id, created_time, model, payload)
66
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
65
67
  else:
66
- return self._create_non_stream(request_id, created_time, model, payload)
68
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
67
69
 
68
70
  def _create_stream(
69
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
71
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
70
72
  ) -> Generator[ChatCompletionChunk, None, None]:
71
73
  try:
72
74
  response = self._client.session.post(
73
75
  "https://netwrck.com/api/chatpred_or",
74
76
  json=payload,
75
77
  headers=self._client.headers,
76
- timeout=self._client.timeout,
78
+ timeout=timeout or self._client.timeout,
79
+ proxies=proxies or getattr(self._client, "proxies", None),
77
80
  stream=True
78
81
  )
79
82
  response.raise_for_status()
@@ -127,14 +130,15 @@ class Completions(BaseCompletions):
127
130
  raise IOError(f"Netwrck request failed: {e}") from e
128
131
 
129
132
  def _create_non_stream(
130
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
133
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
131
134
  ) -> ChatCompletion:
132
135
  try:
133
136
  response = self._client.session.post(
134
137
  "https://netwrck.com/api/chatpred_or",
135
138
  json=payload,
136
139
  headers=self._client.headers,
137
- timeout=self._client.timeout
140
+ timeout=timeout or self._client.timeout,
141
+ proxies=proxies or getattr(self._client, "proxies", None)
138
142
  )
139
143
  response.raise_for_status()
140
144
 
@@ -29,6 +29,8 @@ class Completions(BaseCompletions):
29
29
  stream: bool = False,
30
30
  temperature: Optional[float] = None,
31
31
  top_p: Optional[float] = None,
32
+ timeout: Optional[int] = None,
33
+ proxies: Optional[Dict[str, str]] = None,
32
34
  **kwargs: Any
33
35
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
34
36
  """
@@ -52,11 +54,11 @@ class Completions(BaseCompletions):
52
54
  created_time = int(time.time())
53
55
 
54
56
  if stream:
55
- return self._create_stream(request_id, created_time, model, payload)
57
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
56
58
  else:
57
- return self._create_non_stream(request_id, created_time, model, payload)
59
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
58
60
 
59
- def _post_with_retry(self, payload, stream=False):
61
+ def _post_with_retry(self, payload, stream=False, timeout=None, proxies=None):
60
62
  """
61
63
  Try all endpoints until one succeeds or all fail.
62
64
  """
@@ -68,7 +70,8 @@ class Completions(BaseCompletions):
68
70
  headers=self._client.headers,
69
71
  json=payload,
70
72
  stream=stream,
71
- timeout=self._client.timeout
73
+ timeout=timeout or self._client.timeout,
74
+ proxies=proxies or getattr(self._client, "proxies", None)
72
75
  )
73
76
  response.raise_for_status()
74
77
  self._client.base_url = endpoint # Update to working endpoint
@@ -79,10 +82,10 @@ class Completions(BaseCompletions):
79
82
  raise IOError(f"All oivscode endpoints failed: {last_exception}") from last_exception
80
83
 
81
84
  def _create_stream(
82
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
85
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
83
86
  ) -> Generator[ChatCompletionChunk, None, None]:
84
87
  try:
85
- response = self._post_with_retry(payload, stream=True)
88
+ response = self._post_with_retry(payload, stream=True, timeout=timeout, proxies=proxies)
86
89
  prompt_tokens = 0
87
90
  completion_tokens = 0
88
91
  total_tokens = 0
@@ -160,10 +163,10 @@ class Completions(BaseCompletions):
160
163
  raise
161
164
 
162
165
  def _create_non_stream(
163
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
166
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
164
167
  ) -> ChatCompletion:
165
168
  try:
166
- response = self._post_with_retry(payload, stream=False)
169
+ response = self._post_with_retry(payload, stream=False, timeout=timeout, proxies=proxies)
167
170
  data = response.json()
168
171
 
169
172
  choices_data = data.get('choices', [])
@@ -284,4 +287,4 @@ if __name__ == "__main__":
284
287
  max_tokens=50,
285
288
  stream=False
286
289
  )
287
- print(response)
290
+ print(response)