webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,207 @@
1
+ import os
2
+ import json
3
+ import requests
4
+ from typing import Any, Dict, Optional, Union, Generator
5
+ from webscout.AIutel import sanitize_stream, Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIbase import Provider
7
+ from webscout import exceptions
8
+
9
+ class MiniMax(Provider):
10
+ """
11
+ Provider for MiniMax-Reasoning-01 API, following the standard provider interface.
12
+ """
13
+ AVAILABLE_MODELS = ["MiniMax-Reasoning-01"]
14
+ API_URL = "https://api.minimaxi.chat/v1/text/chatcompletion_v2"
15
+ # TODO: Move API_KEY to env/config for security
16
+ API_KEY = os.environ.get("MINIMAX_API_KEY") or """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJtbyBuaSIsIlVzZXJOYW1lIjoibW8gbmkiLCJBY2NvdW50IjoiIiwiU3ViamVjdElEIjoiMTg3NjIwMDY0ODA2NDYzNTI0MiIsIlBob25lIjoiIiwiR3JvdXBJRCI6IjE4NzYyMDA2NDgwNjA0NDA5MzgiLCJQYWdlTmFtZSI6IiIsIk1haWwiOiJuaW1vQHN1YnN1cC52aXAiLCJDcmVhdGVUaW1lIjoiMjAyNS0wMS0wNyAxMToyNzowNyIsIlRva2VuVHlwZSI6MSwiaXNzIjoibWluaW1heCJ9.Ge1ZnpFPUfXVdMini0P_qXbP_9VYwzXiffG9DsNQck4GtYEOs33LDeAiwrVsrrLZfvJ2icQZ4sRZS54wmPuWua_Dav6pYJty8ZtahmUX1IuhlUX5YErhhCRAIy3J1xB8FkLHLyylChuBHpkNz6O6BQLmPqmoa-cOYK9Qrc6IDeu8SX1iMzO9-MSkcWNvkvpCF2Pf9tekBVWNKMDK6IZoMEPbtkaPXdDyP6l0M0e2AlL_E0oM9exg3V-ohAi8OTPFyqM6dcd4TwF-b9DULxfIsRFw401mvIxcTDWa42u2LULewdATVRD2BthU65tuRqEiWeFWMvFlPj2soMze_QIiUA"""
17
+ MODEL_CONTROL_DEFAULTS = {"tokens_to_generate": 40000, "temperature": 1, "top_p": 0.95}
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 2049,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ model: str = "MiniMax-Reasoning-01",
31
+ system_prompt: str = "You are a helpful assistant, always respond in english",
32
+ ):
33
+ if model not in self.AVAILABLE_MODELS:
34
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
35
+ self.model = model
36
+ self.api_url = self.API_URL
37
+ self.api_key = self.API_KEY
38
+ self.timeout = timeout
39
+ self.is_conversation = is_conversation
40
+ self.max_tokens_to_sample = max_tokens
41
+ self.last_response = {}
42
+ self.system_prompt = system_prompt
43
+ self.proxies = proxies
44
+ self.__available_optimizers = tuple(
45
+ method for method in dir(Optimizers)
46
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
47
+ )
48
+ Conversation.intro = (
49
+ AwesomePrompts().get_act(
50
+ act, raise_not_found=True, default=None, case_insensitive=True
51
+ )
52
+ if act
53
+ else intro or Conversation.intro
54
+ )
55
+ self.conversation = Conversation(
56
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
57
+ )
58
+ self.conversation.history_offset = history_offset
59
+
60
+ @staticmethod
61
+ def _extract_content(chunk: Any) -> Optional[dict]:
62
+ if not isinstance(chunk, dict):
63
+ return None
64
+ choice = chunk.get('choices', [{}])[0]
65
+ delta = choice.get('delta', {})
66
+ content = delta.get('content')
67
+ reasoning = delta.get('reasoning_content')
68
+ result = {}
69
+ if content:
70
+ result['content'] = content
71
+ if reasoning:
72
+ result['reasoning_content'] = reasoning
73
+ return result if result else None
74
+
75
+ def ask(
76
+ self,
77
+ prompt: str,
78
+ stream: bool = True,
79
+ raw: bool = False,
80
+ optimizer: str = None,
81
+ conversationally: bool = False,
82
+ ) -> Union[Dict[str, Any], Generator]:
83
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
+ if optimizer:
85
+ if optimizer in self.__available_optimizers:
86
+ conversation_prompt = getattr(Optimizers, optimizer)(
87
+ conversation_prompt if conversationally else prompt
88
+ )
89
+ else:
90
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
91
+ messages = [
92
+ {'role': 'system', 'content': self.system_prompt},
93
+ {'role': 'user', 'content': conversation_prompt}
94
+ ]
95
+ data = {
96
+ 'model': self.model,
97
+ 'messages': messages,
98
+ 'stream': True,
99
+ 'max_tokens': self.MODEL_CONTROL_DEFAULTS.get('tokens_to_generate', 512),
100
+ 'temperature': self.MODEL_CONTROL_DEFAULTS.get('temperature', 1.0),
101
+ 'top_p': self.MODEL_CONTROL_DEFAULTS.get('top_p', 1.0),
102
+ }
103
+ headers = {
104
+ 'Content-Type': 'application/json',
105
+ 'Authorization': f'Bearer {self.api_key}',
106
+ }
107
+ def for_stream():
108
+ try:
109
+ response = requests.post(
110
+ self.api_url,
111
+ headers=headers,
112
+ data=json.dumps(data),
113
+ stream=True,
114
+ timeout=self.timeout,
115
+ proxies=self.proxies if self.proxies else None
116
+ )
117
+ if not response.ok:
118
+ raise exceptions.FailedToGenerateResponseError(
119
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
120
+ )
121
+ streaming_response = ""
122
+ last_content = ""
123
+ last_reasoning = ""
124
+ in_think = False
125
+ processed_stream = sanitize_stream(
126
+ response.iter_lines(),
127
+ intro_value="data:",
128
+ to_json=True,
129
+ content_extractor=self._extract_content,
130
+ raw=False # always process as dict for logic
131
+ )
132
+ for chunk in processed_stream:
133
+ if not chunk:
134
+ continue
135
+ content = chunk.get('content') if isinstance(chunk, dict) else None
136
+ reasoning = chunk.get('reasoning_content') if isinstance(chunk, dict) else None
137
+ # Handle reasoning_content with <think> tags
138
+ if reasoning and reasoning != last_reasoning:
139
+ if not in_think:
140
+ yield "<think>\n\n"
141
+ in_think = True
142
+ yield reasoning
143
+ last_reasoning = reasoning
144
+ # If we were in <think> and now have new content, close <think>
145
+ if in_think and content and content != last_content:
146
+ yield "</think>\n\n"
147
+ in_think = False
148
+ # Handle normal content
149
+ if content and content != last_content:
150
+ yield content
151
+ streaming_response += content
152
+ last_content = content
153
+ if not raw:
154
+ self.last_response = {"text": streaming_response}
155
+ self.conversation.update_chat_history(prompt, streaming_response)
156
+ except Exception as e:
157
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
158
+ def for_non_stream():
159
+ full_response = ""
160
+ for chunk in for_stream():
161
+ if isinstance(chunk, dict) and "text" in chunk:
162
+ full_response += chunk["text"]
163
+ elif isinstance(chunk, str):
164
+ full_response += chunk
165
+ if not raw:
166
+ self.last_response = {"text": full_response}
167
+ self.conversation.update_chat_history(prompt, full_response)
168
+ return {"text": full_response}
169
+ else:
170
+ return full_response
171
+ return for_stream() if stream else for_non_stream()
172
+
173
+ def chat(
174
+ self,
175
+ prompt: str,
176
+ stream: bool = True,
177
+ optimizer: str = None,
178
+ conversationally: bool = False,
179
+ raw: bool = False,
180
+ ) -> Union[str, Generator[str, None, None]]:
181
+ def for_stream():
182
+ for response in self.ask(
183
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
184
+ ):
185
+ if raw:
186
+ yield response
187
+ else:
188
+ yield response
189
+ def for_non_stream():
190
+ result = self.ask(
191
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
192
+ )
193
+ if raw:
194
+ return result
195
+ else:
196
+ return self.get_message(result)
197
+ return for_stream() if stream else for_non_stream()
198
+
199
+ def get_message(self, response: dict) -> str:
200
+ assert isinstance(response, dict), "Response should be of dict data-type only"
201
+ return response.get("text", "")
202
+
203
+ if __name__ == "__main__":
204
+ ai = MiniMax()
205
+ resp = ai.chat("What is the capital of France?", stream=True, raw=False)
206
+ for chunk in resp:
207
+ print(chunk, end="", flush=True)
@@ -110,7 +110,8 @@ class NEMOTRON(Provider):
110
110
  def _make_request(
111
111
  self,
112
112
  message: str,
113
- stream: bool = False
113
+ stream: bool = False,
114
+ raw: bool = False
114
115
  ) -> Generator[str, None, None]:
115
116
  """Make request to NEMOTRON API."""
116
117
  payload = {
@@ -131,10 +132,26 @@ class NEMOTRON(Provider):
131
132
  timeout=self.timeout
132
133
  ) as response:
133
134
  response.raise_for_status()
134
- yield from sanitize_stream(
135
- response.iter_content(chunk_size=1024),
136
- to_json=False,
137
- )
135
+ buffer = ""
136
+ chunk_size = 32
137
+ for chunk in response.iter_content(chunk_size=chunk_size):
138
+ if not chunk:
139
+ continue
140
+ text = chunk.decode(errors="ignore")
141
+ buffer += text
142
+ while len(buffer) >= chunk_size:
143
+ out = buffer[:chunk_size]
144
+ buffer = buffer[chunk_size:]
145
+ if out.strip():
146
+ if raw:
147
+ yield out
148
+ else:
149
+ yield out
150
+ if buffer.strip():
151
+ if raw:
152
+ yield buffer
153
+ else:
154
+ yield buffer
138
155
  else:
139
156
  response = self.session.post(
140
157
  self.url,
@@ -143,7 +160,10 @@ class NEMOTRON(Provider):
143
160
  timeout=self.timeout
144
161
  )
145
162
  response.raise_for_status()
146
- yield response.text
163
+ if raw:
164
+ yield response.text
165
+ else:
166
+ yield response.text
147
167
 
148
168
  except requests.exceptions.RequestException as e:
149
169
  raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
@@ -167,13 +187,20 @@ class NEMOTRON(Provider):
167
187
  raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
168
188
 
169
189
  def for_stream():
170
- for text in self._make_request(conversation_prompt, stream=True):
171
- yield {"text": text}
190
+ for text in self._make_request(conversation_prompt, stream=True, raw=raw):
191
+ if raw:
192
+ yield text
193
+ else:
194
+ yield {"text": text}
172
195
 
173
196
  def for_non_stream():
174
- response_text = next(self._make_request(conversation_prompt, stream=False))
175
- self.last_response = {"text": response_text}
176
- return self.last_response
197
+ response_text = next(self._make_request(conversation_prompt, stream=False, raw=raw))
198
+ if raw:
199
+ self.last_response = response_text
200
+ return response_text
201
+ else:
202
+ self.last_response = {"text": response_text}
203
+ return self.last_response
177
204
 
178
205
  return for_stream() if stream else for_non_stream()
179
206
 
@@ -214,5 +241,6 @@ class NEMOTRON(Provider):
214
241
  if __name__ == "__main__":
215
242
  # Example usage
216
243
  nemotron = NEMOTRON()
217
- response = nemotron.chat("Hello, how are you?", stream=False)
218
- print(response)
244
+ response = nemotron.chat("write me about humans in points", stream=True)
245
+ for part in response:
246
+ print(part, end="", flush=True)
@@ -127,75 +127,58 @@ class Netwrck(Provider):
127
127
 
128
128
  def for_stream():
129
129
  try:
130
- # Use curl_cffi session post with impersonate
131
130
  response = self.session.post(
132
131
  "https://netwrck.com/api/chatpred_or",
133
132
  json=payload,
134
- # headers are set on the session
135
- # proxies are set on the session
136
133
  timeout=self.timeout,
137
134
  stream=True,
138
- impersonate="chrome110" # Use a common impersonation profile
135
+ impersonate="chrome110"
139
136
  )
140
- response.raise_for_status() # Check for HTTP errors
141
-
142
- streaming_text = ""
143
- # Use sanitize_stream
144
- processed_stream = sanitize_stream(
145
- data=response.iter_content(chunk_size=None), # Pass byte iterator
146
- intro_value=None, # No prefix
147
- to_json=False, # It's text
148
- content_extractor=self._netwrck_extractor, # Use the quote stripper
149
- yield_raw_on_error=True
150
- )
151
- for content_chunk in processed_stream:
152
- if content_chunk and isinstance(content_chunk, str):
153
- streaming_text += content_chunk
154
- yield {"text": content_chunk} if not raw else content_chunk
155
- # Update history after stream finishes
156
- self.last_response = {"text": streaming_text} # Store aggregated text
157
- self.conversation.update_chat_history(payload["query"], streaming_text)
158
-
159
- except CurlError as e: # Catch CurlError
137
+ response.raise_for_status()
138
+ buffer = ""
139
+ chunk_size = 32
140
+ for chunk in response.iter_content(chunk_size=chunk_size):
141
+ if not chunk:
142
+ continue
143
+ text = chunk.decode(errors="ignore")
144
+ buffer += text
145
+ while len(buffer) >= chunk_size:
146
+ out = buffer[:chunk_size]
147
+ buffer = buffer[chunk_size:]
148
+ if out.strip():
149
+ if raw:
150
+ yield out
151
+ else:
152
+ yield {"text": out}
153
+ if buffer.strip():
154
+ if raw:
155
+ yield buffer
156
+ else:
157
+ yield {"text": buffer}
158
+ self.last_response = {"text": buffer}
159
+ self.conversation.update_chat_history(payload["query"], buffer)
160
+ except CurlError as e:
160
161
  raise exceptions.ProviderConnectionError(f"Network error (CurlError): {str(e)}") from e
161
- except Exception as e: # Catch other potential exceptions (like HTTPError)
162
+ except Exception as e:
162
163
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
163
164
  raise exceptions.ProviderConnectionError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
164
165
 
165
166
  def for_non_stream():
166
167
  try:
167
- # Use curl_cffi session post with impersonate
168
168
  response = self.session.post(
169
169
  "https://netwrck.com/api/chatpred_or",
170
170
  json=payload,
171
- # headers are set on the session
172
- # proxies are set on the session
173
171
  timeout=self.timeout,
174
- impersonate="chrome110" # Use a common impersonation profile
175
- )
176
- response.raise_for_status() # Check for HTTP errors
177
-
178
- response_text_raw = response.text # Get raw text
179
-
180
- # Process the text using sanitize_stream
181
- processed_stream = sanitize_stream(
182
- data=response_text_raw,
183
- intro_value=None,
184
- to_json=False,
185
- content_extractor=self._netwrck_extractor
172
+ impersonate="chrome110"
186
173
  )
187
- # Aggregate the single result
188
- text = "".join(list(processed_stream))
189
-
190
- self.last_response = {"text": text} # Store processed text
191
- self.conversation.update_chat_history(prompt, text)
192
-
193
- # Return dict or raw string
194
- return text if raw else self.last_response
195
-
196
- except CurlError as e: # Catch CurlError
174
+ response.raise_for_status()
175
+ response_text_raw = response.text
176
+ self.last_response = {"text": response_text_raw}
177
+ self.conversation.update_chat_history(prompt, response_text_raw)
178
+ return response_text_raw if raw else self.last_response
179
+ except CurlError as e:
197
180
  raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
198
- except Exception as e: # Catch other potential exceptions (like HTTPError)
181
+ except Exception as e:
199
182
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
200
183
  raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
201
184
 
@@ -1,6 +1,5 @@
1
1
  # from pickle import NONE
2
2
  import requests
3
- import requests
4
3
  import random
5
4
  import string
6
5
  import base64
@@ -1044,6 +1043,3 @@ if __name__ == "__main__":
1044
1043
  )
1045
1044
  for chunk in response:
1046
1045
  print(chunk.choices[0].delta.content, end='', flush=True)
1047
- print()
1048
- print("Proxies on instance:", client.proxies)
1049
- print("Proxies on session:", client.session.proxies)