webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (94) hide show
  1. webscout/AIutel.py +146 -37
  2. webscout/Bing_search.py +1 -2
  3. webscout/Provider/AISEARCH/__init__.py +1 -0
  4. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  5. webscout/Provider/ExaChat.py +84 -58
  6. webscout/Provider/HeckAI.py +85 -80
  7. webscout/Provider/Jadve.py +56 -50
  8. webscout/Provider/MiniMax.py +207 -0
  9. webscout/Provider/Nemotron.py +41 -13
  10. webscout/Provider/Netwrck.py +34 -51
  11. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  12. webscout/Provider/OPENAI/MiniMax.py +298 -0
  13. webscout/Provider/OPENAI/README.md +30 -29
  14. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  15. webscout/Provider/OPENAI/__init__.py +3 -1
  16. webscout/Provider/OPENAI/autoproxy.py +752 -17
  17. webscout/Provider/OPENAI/base.py +7 -76
  18. webscout/Provider/OPENAI/deepinfra.py +42 -108
  19. webscout/Provider/OPENAI/flowith.py +179 -166
  20. webscout/Provider/OPENAI/friendli.py +233 -0
  21. webscout/Provider/OPENAI/monochat.py +329 -0
  22. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  23. webscout/Provider/OPENAI/toolbaz.py +1 -0
  24. webscout/Provider/OPENAI/typegpt.py +1 -1
  25. webscout/Provider/OPENAI/utils.py +19 -42
  26. webscout/Provider/OPENAI/x0gpt.py +14 -2
  27. webscout/Provider/OpenGPT.py +54 -32
  28. webscout/Provider/PI.py +58 -84
  29. webscout/Provider/StandardInput.py +32 -13
  30. webscout/Provider/TTI/README.md +9 -9
  31. webscout/Provider/TTI/__init__.py +2 -1
  32. webscout/Provider/TTI/aiarta.py +92 -78
  33. webscout/Provider/TTI/infip.py +212 -0
  34. webscout/Provider/TTI/monochat.py +220 -0
  35. webscout/Provider/TeachAnything.py +11 -3
  36. webscout/Provider/TextPollinationsAI.py +78 -70
  37. webscout/Provider/TogetherAI.py +32 -48
  38. webscout/Provider/Venice.py +37 -46
  39. webscout/Provider/VercelAI.py +27 -24
  40. webscout/Provider/WiseCat.py +35 -35
  41. webscout/Provider/WrDoChat.py +22 -26
  42. webscout/Provider/WritingMate.py +26 -22
  43. webscout/Provider/__init__.py +2 -2
  44. webscout/Provider/granite.py +48 -57
  45. webscout/Provider/koala.py +51 -39
  46. webscout/Provider/learnfastai.py +49 -64
  47. webscout/Provider/llmchat.py +79 -93
  48. webscout/Provider/llmchatco.py +63 -78
  49. webscout/Provider/multichat.py +51 -40
  50. webscout/Provider/oivscode.py +1 -1
  51. webscout/Provider/scira_chat.py +159 -96
  52. webscout/Provider/scnet.py +13 -13
  53. webscout/Provider/searchchat.py +13 -13
  54. webscout/Provider/sonus.py +12 -11
  55. webscout/Provider/toolbaz.py +25 -8
  56. webscout/Provider/turboseek.py +41 -42
  57. webscout/Provider/typefully.py +27 -12
  58. webscout/Provider/typegpt.py +41 -46
  59. webscout/Provider/uncovr.py +55 -90
  60. webscout/Provider/x0gpt.py +33 -17
  61. webscout/Provider/yep.py +79 -96
  62. webscout/auth/__init__.py +12 -1
  63. webscout/auth/providers.py +27 -5
  64. webscout/auth/routes.py +128 -104
  65. webscout/auth/server.py +367 -312
  66. webscout/client.py +121 -116
  67. webscout/litagent/Readme.md +68 -55
  68. webscout/litagent/agent.py +99 -9
  69. webscout/version.py +1 -1
  70. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
  71. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
  72. webscout/Provider/TTI/fastflux.py +0 -233
  73. webscout/Provider/Writecream.py +0 -246
  74. webscout/auth/static/favicon.svg +0 -11
  75. webscout/auth/swagger_ui.py +0 -203
  76. webscout/auth/templates/components/authentication.html +0 -237
  77. webscout/auth/templates/components/base.html +0 -103
  78. webscout/auth/templates/components/endpoints.html +0 -750
  79. webscout/auth/templates/components/examples.html +0 -491
  80. webscout/auth/templates/components/footer.html +0 -75
  81. webscout/auth/templates/components/header.html +0 -27
  82. webscout/auth/templates/components/models.html +0 -286
  83. webscout/auth/templates/components/navigation.html +0 -70
  84. webscout/auth/templates/static/api.js +0 -455
  85. webscout/auth/templates/static/icons.js +0 -168
  86. webscout/auth/templates/static/main.js +0 -784
  87. webscout/auth/templates/static/particles.js +0 -201
  88. webscout/auth/templates/static/styles.css +0 -3353
  89. webscout/auth/templates/static/ui.js +0 -374
  90. webscout/auth/templates/swagger_ui.html +0 -170
  91. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  92. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
  93. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  94. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -165,8 +165,6 @@ class UncovrAI(Provider):
165
165
  )
166
166
  else:
167
167
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
168
-
169
- # Prepare the request payload
170
168
  payload = {
171
169
  "content": conversation_prompt,
172
170
  "chatId": self.chat_id,
@@ -180,78 +178,72 @@ class UncovrAI(Provider):
180
178
  "creativity": creativity
181
179
  }
182
180
  }
183
-
184
181
  def for_stream():
185
182
  try:
186
- # Use curl_cffi session post with impersonate
187
183
  response = self.session.post(
188
184
  self.url,
189
185
  json=payload,
190
186
  stream=True,
191
187
  timeout=self.timeout,
192
- impersonate=self.fingerprint.get("browser_type", "chrome110") # Use fingerprint browser type
188
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
193
189
  )
194
-
195
190
  if response.status_code != 200:
196
- # If we get a non-200 response, try refreshing our identity once
197
191
  if response.status_code in [403, 429]:
198
192
  self.refresh_identity()
199
- # Retry with new identity using curl_cffi session
200
193
  retry_response = self.session.post(
201
194
  self.url,
202
195
  json=payload,
203
196
  stream=True,
204
197
  timeout=self.timeout,
205
- impersonate=self.fingerprint.get("browser_type", "chrome110") # Use updated fingerprint
198
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
206
199
  )
207
200
  if not retry_response.ok:
208
201
  raise exceptions.FailedToGenerateResponseError(
209
202
  f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
210
203
  )
211
- response = retry_response # Use the successful retry response
204
+ response = retry_response
212
205
  else:
213
206
  raise exceptions.FailedToGenerateResponseError(
214
207
  f"Request failed with status code {response.status_code} - {response.text}"
215
208
  )
216
-
217
209
  streaming_text = ""
218
- # Use sanitize_stream with the custom extractor
219
210
  processed_stream = sanitize_stream(
220
- data=response.iter_content(chunk_size=None), # Pass byte iterator
221
- intro_value=None, # No simple prefix
222
- to_json=False, # Content is not JSON
223
- content_extractor=self._uncovr_extractor, # Use the specific extractor
224
- yield_raw_on_error=True # Keep yielding even if extractor fails, for potential error messages? (Adjust if needed)
211
+ data=response.iter_content(chunk_size=None),
212
+ intro_value=None,
213
+ to_json=False,
214
+ content_extractor=self._uncovr_extractor,
215
+ yield_raw_on_error=True,
216
+ raw=raw
225
217
  )
226
-
227
218
  for content_chunk in processed_stream:
228
- if content_chunk and isinstance(content_chunk, str):
229
- streaming_text += content_chunk
230
- yield dict(text=content_chunk) if not raw else content_chunk
231
-
219
+ # Always yield as string, even in raw mode
220
+ if isinstance(content_chunk, bytes):
221
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
222
+ if content_chunk is None:
223
+ continue # Ignore non-content lines
224
+ if raw:
225
+ yield content_chunk
226
+ else:
227
+ if content_chunk and isinstance(content_chunk, str):
228
+ streaming_text += content_chunk
229
+ yield dict(text=content_chunk)
232
230
  self.last_response = {"text": streaming_text}
233
231
  self.conversation.update_chat_history(prompt, streaming_text)
234
-
235
- except CurlError as e: # Catch CurlError
232
+ except CurlError as e:
236
233
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
237
- except Exception as e: # Catch other potential exceptions
234
+ except Exception as e:
238
235
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
239
-
240
-
241
236
  def for_non_stream():
242
237
  try:
243
- # Use curl_cffi session post with impersonate
244
238
  response = self.session.post(
245
239
  self.url,
246
240
  json=payload,
247
241
  timeout=self.timeout,
248
242
  impersonate=self.fingerprint.get("browser_type", "chrome110")
249
243
  )
250
-
251
244
  if response.status_code != 200:
252
245
  if response.status_code in [403, 429]:
253
246
  self.refresh_identity()
254
- # Retry with new identity using curl_cffi session
255
247
  response = self.session.post(
256
248
  self.url,
257
249
  json=payload,
@@ -266,36 +258,32 @@ class UncovrAI(Provider):
266
258
  raise exceptions.FailedToGenerateResponseError(
267
259
  f"Request failed with status code {response.status_code} - {response.text}"
268
260
  )
269
-
270
- response_text = response.text # Get the full response text
271
-
272
- # Use sanitize_stream to process the non-streaming text
273
- # It won't parse as JSON, but will apply the extractor line by line
261
+ response_text = response.text
274
262
  processed_stream = sanitize_stream(
275
- data=response_text.splitlines(), # Split into lines first
263
+ data=response_text.splitlines(),
276
264
  intro_value=None,
277
265
  to_json=False,
278
266
  content_extractor=self._uncovr_extractor,
279
- yield_raw_on_error=True
267
+ yield_raw_on_error=True,
268
+ raw=raw
280
269
  )
281
-
282
- # Aggregate the results from the generator
283
270
  full_response = ""
284
271
  for content in processed_stream:
285
- if content and isinstance(content, str):
272
+ if isinstance(content, bytes):
273
+ content = content.decode('utf-8', errors='ignore')
274
+ if content is None:
275
+ continue # Ignore non-content lines
276
+ if raw:
277
+ full_response += content
278
+ elif content and isinstance(content, str):
286
279
  full_response += content
287
-
288
- # Check if aggregation resulted in empty response (might indicate error not caught by extractor)
289
280
  self.last_response = {"text": full_response}
290
281
  self.conversation.update_chat_history(prompt, full_response)
291
- return {"text": full_response}
292
-
293
- except CurlError as e: # Catch CurlError
282
+ return {"text": full_response} if not raw else full_response
283
+ except CurlError as e:
294
284
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
295
- except Exception as e: # Catch other potential exceptions
285
+ except Exception as e:
296
286
  raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
297
-
298
-
299
287
  return for_stream() if stream else for_non_stream()
300
288
 
301
289
  def chat(
@@ -307,23 +295,29 @@ class UncovrAI(Provider):
307
295
  temperature: int = 32,
308
296
  creativity: str = "medium",
309
297
  selected_focus: list = ["web"],
310
- selected_tools: list = []
298
+ selected_tools: list = [],
299
+ raw: bool = False, # Added raw parameter
311
300
  ) -> Union[str, Generator[str, None, None]]:
312
301
  def for_stream():
313
302
  for response in self.ask(
314
- prompt, True, optimizer=optimizer, conversationally=conversationally,
303
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally,
315
304
  temperature=temperature, creativity=creativity,
316
305
  selected_focus=selected_focus, selected_tools=selected_tools
317
306
  ):
318
- yield self.get_message(response)
307
+ if raw:
308
+ yield response
309
+ else:
310
+ yield self.get_message(response)
319
311
  def for_non_stream():
320
- return self.get_message(
321
- self.ask(
322
- prompt, False, optimizer=optimizer, conversationally=conversationally,
323
- temperature=temperature, creativity=creativity,
324
- selected_focus=selected_focus, selected_tools=selected_tools
325
- )
312
+ result = self.ask(
313
+ prompt, False, raw=raw, optimizer=optimizer, conversationally=conversationally,
314
+ temperature=temperature, creativity=creativity,
315
+ selected_focus=selected_focus, selected_tools=selected_tools
326
316
  )
317
+ if raw:
318
+ return result
319
+ else:
320
+ return self.get_message(result)
327
321
  return for_stream() if stream else for_non_stream()
328
322
 
329
323
  def get_message(self, response: dict) -> str:
@@ -333,36 +327,7 @@ class UncovrAI(Provider):
333
327
  return text.replace('\\n', '\n').replace('\\n\\n', '\n\n') # Keep newline replacement
334
328
 
335
329
  if __name__ == "__main__":
336
- # Ensure curl_cffi is installed
337
- print("-" * 80)
338
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
339
- print("-" * 80)
340
-
341
- for model in UncovrAI.AVAILABLE_MODELS:
342
- try:
343
- test_ai = UncovrAI(model=model, timeout=60)
344
- # Test non-stream first as stream logic depends on it
345
- response_non_stream = test_ai.chat("Say 'Hello' in one word", stream=False)
346
-
347
- if response_non_stream and len(response_non_stream.strip()) > 0:
348
- # Now test stream
349
- response_stream = test_ai.chat("Say 'Hi' in one word", stream=True)
350
- response_text = ""
351
- for chunk in response_stream:
352
- response_text += chunk
353
-
354
- if response_text and len(response_text.strip()) > 0:
355
- status = "✓"
356
- # Clean and truncate response
357
- clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
358
- display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
359
- else:
360
- status = "✗ (Stream)"
361
- display_text = "Empty or invalid stream response"
362
- else:
363
- status = "✗ (Non-Stream)"
364
- display_text = "Empty or invalid non-stream response"
365
-
366
- print(f"\r{model:<50} {status:<10} {display_text}")
367
- except Exception as e:
368
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
330
+ ai = UncovrAI()
331
+ response = ai.chat("who is pm of india?", raw=False, stream=True)
332
+ for chunk in response:
333
+ print(chunk, end='', flush=True)
@@ -1,4 +1,4 @@
1
- from typing import Optional, Union, Any, Dict
1
+ from typing import Generator, Optional, Union, Any, Dict
2
2
  from uuid import uuid4
3
3
  from curl_cffi import CurlError
4
4
  from curl_cffi.requests import Session
@@ -134,7 +134,7 @@ class X0GPT(Provider):
134
134
  raw: bool = False,
135
135
  optimizer: str = None,
136
136
  conversationally: bool = False,
137
- ) -> Dict[str, Any]:
137
+ ) -> Union[Dict[str, Any], Generator]:
138
138
  """
139
139
  Sends a prompt to the x0-gpt.devwtf.in API and returns the response.
140
140
 
@@ -197,13 +197,20 @@ class X0GPT(Provider):
197
197
  data=response.iter_content(chunk_size=None), # Pass byte iterator
198
198
  intro_value=None, # No simple prefix to remove here
199
199
  to_json=False, # Content is not JSON
200
- content_extractor=self._x0gpt_extractor # Use the specific extractor
200
+ content_extractor=self._x0gpt_extractor, # Use the specific extractor
201
+ raw=raw
201
202
  )
202
203
 
203
204
  for content_chunk in processed_stream:
204
- if content_chunk and isinstance(content_chunk, str):
205
- streaming_response += content_chunk
206
- yield content_chunk if raw else dict(text=content_chunk)
205
+ # Always yield as string, even in raw mode
206
+ if isinstance(content_chunk, bytes):
207
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
208
+ if raw:
209
+ yield content_chunk
210
+ else:
211
+ if content_chunk and isinstance(content_chunk, str):
212
+ streaming_response += content_chunk
213
+ yield dict(text=content_chunk)
207
214
 
208
215
  self.last_response.update(dict(text=streaming_response))
209
216
  self.conversation.update_chat_history(
@@ -217,6 +224,8 @@ class X0GPT(Provider):
217
224
 
218
225
  def for_non_stream():
219
226
  # This function implicitly uses the updated for_stream
227
+ if stream:
228
+ return for_stream()
220
229
  for _ in for_stream():
221
230
  pass
222
231
  return self.last_response
@@ -229,7 +238,8 @@ class X0GPT(Provider):
229
238
  stream: bool = False,
230
239
  optimizer: str = None,
231
240
  conversationally: bool = False,
232
- ) -> str:
241
+ raw: bool = False, # Added raw parameter
242
+ ) -> Union[str, Generator[str, None, None]]:
233
243
  """
234
244
  Generates a response from the X0GPT API.
235
245
 
@@ -251,19 +261,25 @@ class X0GPT(Provider):
251
261
 
252
262
  def for_stream():
253
263
  for response in self.ask(
254
- prompt, True, optimizer=optimizer, conversationally=conversationally
264
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
255
265
  ):
256
- yield self.get_message(response)
266
+ if raw:
267
+ yield response
268
+ else:
269
+ yield self.get_message(response)
257
270
 
258
271
  def for_non_stream():
259
- return self.get_message(
260
- self.ask(
261
- prompt,
262
- False,
263
- optimizer=optimizer,
264
- conversationally=conversationally,
265
- )
272
+ result = self.ask(
273
+ prompt,
274
+ False,
275
+ raw=raw,
276
+ optimizer=optimizer,
277
+ conversationally=conversationally,
266
278
  )
279
+ if raw:
280
+ return result
281
+ else:
282
+ return self.get_message(result)
267
283
 
268
284
  return for_stream() if stream else for_non_stream()
269
285
 
@@ -294,6 +310,6 @@ class X0GPT(Provider):
294
310
  if __name__ == "__main__":
295
311
  from rich import print
296
312
  ai = X0GPT(timeout=5000)
297
- response = ai.chat("write a poem about AI", stream=True)
313
+ response = ai.chat("write a poem about AI", stream=True, raw=True)
298
314
  for chunk in response:
299
315
  print(chunk, end="", flush=True)
webscout/Provider/yep.py CHANGED
@@ -1,10 +1,8 @@
1
1
  import uuid
2
- import json
3
2
  from curl_cffi import CurlError
4
3
  from curl_cffi.requests import Session
5
4
 
6
5
  from typing import Any, Dict, Optional, Generator, Union, List, TypeVar
7
-
8
6
  from webscout.AIutel import Optimizers
9
7
  from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
10
8
  from webscout.AIbase import Provider
@@ -192,81 +190,63 @@ class YEPCHAT(Provider):
192
190
 
193
191
  def for_stream():
194
192
  try:
195
- # buffer = b"" # No longer needed here
196
- # Use curl_cffi session post, pass cookies explicitly
197
193
  response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
198
-
199
194
  if not response.ok:
200
- # If we get a non-200 response, try refreshing our identity once
201
195
  if response.status_code in [403, 429]:
202
196
  self.refresh_identity()
203
- # Retry with new identity
204
- # Use curl_cffi session post, pass cookies explicitly
205
197
  retry_response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
206
198
  if not retry_response.ok:
207
199
  raise exceptions.FailedToGenerateResponseError(
208
200
  f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
209
201
  )
210
- response = retry_response # Use the successful retry response
202
+ response = retry_response
211
203
  else:
212
204
  raise exceptions.FailedToGenerateResponseError(
213
205
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
214
206
  )
215
-
216
- # --- Start of stream processing block (should be outside the 'if not response.ok' block) ---
217
207
  streaming_text = ""
218
-
219
- # Use sanitize_stream to process the lines
220
208
  processed_stream = sanitize_stream(
221
- data=response.iter_content(chunk_size=None), # Pass the byte iterator directly
209
+ data=response.iter_content(chunk_size=None),
222
210
  intro_value="data:",
223
- to_json=True, # Yep sends JSON after 'data:'
224
- skip_markers=["[DONE]"], # Skip the final marker
225
- yield_raw_on_error=False, # Only process valid JSON data
226
- # --- Add the content extractor ---
227
- content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None
211
+ to_json=True,
212
+ skip_markers=["[DONE]"],
213
+ yield_raw_on_error=False,
214
+ content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
215
+ raw=raw
228
216
  )
229
- # The loop now yields the final extracted string content directly
230
217
  for content_chunk in processed_stream:
231
- # --- TEMPORARY DEBUG PRINT ---
232
- # print(f"\nDEBUG: Received extracted content: {content_chunk!r}\n", flush=True) # Keep or remove debug print as needed
233
- if content_chunk and isinstance(content_chunk, str): # Ensure it's a non-empty string
234
- streaming_text += content_chunk
235
- # Yield dict or raw string chunk based on 'raw' flag
236
- yield dict(text=content_chunk) if not raw else content_chunk
237
- # --- End of stream processing block ---
238
-
239
- # Check if the response contains a tool call (This should happen *after* processing the stream)
240
- response_data = self.conversation.handle_tool_response(streaming_text)
241
-
242
- if response_data["is_tool_call"]:
243
- # Handle tool call results
244
- if response_data["success"]:
245
- for tool_call in response_data.get("tool_calls", []):
246
- tool_name = tool_call.get("name", "unknown_tool")
247
- result = response_data["result"]
248
- self.conversation.update_chat_history_with_tool(prompt, tool_name, result)
218
+ # Always yield as string, even in raw mode
219
+ if isinstance(content_chunk, bytes):
220
+ content_chunk = content_chunk.decode('utf-8', errors='ignore')
221
+ if raw:
222
+ yield content_chunk
249
223
  else:
250
- # If tool call failed, update history with error
251
- self.conversation.update_chat_history(prompt,
252
- f"Error executing tool call: {response_data['result']}")
253
- else:
254
- # Normal response handling
255
- self.conversation.update_chat_history(prompt, streaming_text)
256
-
257
- except CurlError as e: # Catch CurlError
258
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
224
+ if content_chunk and isinstance(content_chunk, str):
225
+ streaming_text += content_chunk
226
+ yield dict(text=content_chunk)
227
+ if not raw:
228
+ response_data = self.conversation.handle_tool_response(streaming_text)
229
+ if response_data["is_tool_call"]:
230
+ if response_data["success"]:
231
+ for tool_call in response_data.get("tool_calls", []):
232
+ tool_name = tool_call.get("name", "unknown_tool")
233
+ result = response_data["result"]
234
+ self.conversation.update_chat_history_with_tool(prompt, tool_name, result)
235
+ else:
236
+ self.conversation.update_chat_history(prompt, f"Error executing tool call: {response_data['result']}")
237
+ else:
238
+ self.conversation.update_chat_history(prompt, streaming_text)
239
+ except CurlError as e:
240
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
259
241
  except Exception as e:
260
242
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
261
243
 
262
244
  def for_non_stream():
263
245
  try:
264
- # Use curl_cffi session post, pass cookies explicitly
265
246
  response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
266
247
  if not response.ok:
267
248
  if response.status_code in [403, 429]:
268
249
  self.refresh_identity()
269
- # Use curl_cffi session post, pass cookies explicitly
270
250
  response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
271
251
  if not response.ok:
272
252
  raise exceptions.FailedToGenerateResponseError(
@@ -276,40 +256,28 @@ class YEPCHAT(Provider):
276
256
  raise exceptions.FailedToGenerateResponseError(
277
257
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
278
258
  )
279
-
280
- # ... existing non-stream response handling code ...
259
+ if raw:
260
+ return response.text
281
261
  response_data = response.json()
282
262
  if 'choices' in response_data and len(response_data['choices']) > 0:
283
263
  content = response_data['choices'][0].get('message', {}).get('content', '')
284
-
285
- # Check if the response contains a tool call
286
264
  tool_response = self.conversation.handle_tool_response(content)
287
-
288
265
  if tool_response["is_tool_call"]:
289
- # Process tool call
290
266
  if tool_response["success"]:
291
- # Get the first tool call for simplicity
292
267
  if "tool_calls" in tool_response and len(tool_response["tool_calls"]) > 0:
293
268
  tool_call = tool_response["tool_calls"][0]
294
269
  tool_name = tool_call.get("name", "unknown_tool")
295
270
  tool_result = tool_response["result"]
296
-
297
- # Update chat history with tool call
298
271
  self.conversation.update_chat_history_with_tool(prompt, tool_name, tool_result)
299
-
300
- # Return tool result
301
272
  return {"text": tool_result, "is_tool_call": True, "tool_name": tool_name}
302
-
303
- # If tool call processing failed
304
273
  return {"text": tool_response["result"], "is_tool_call": True, "error": True}
305
274
  else:
306
- # Normal response handling
307
275
  self.conversation.update_chat_history(prompt, content)
308
276
  return {"text": content}
309
277
  else:
310
278
  raise exceptions.FailedToGenerateResponseError("No response content found")
311
- except CurlError as e: # Catch CurlError
312
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
279
+ except CurlError as e:
280
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
313
281
  except Exception as e:
314
282
  raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
315
283
 
@@ -321,6 +289,7 @@ class YEPCHAT(Provider):
321
289
  stream: bool = False,
322
290
  optimizer: str = None,
323
291
  conversationally: bool = False,
292
+ raw: bool = False, # Added raw parameter
324
293
  ) -> Union[str, Generator[str, None, None]]:
325
294
  """
326
295
  Initiates a chat with the Yep API using the provided prompt.
@@ -335,19 +304,25 @@ class YEPCHAT(Provider):
335
304
  """
336
305
  def for_stream():
337
306
  for response in self.ask(
338
- prompt, True, optimizer=optimizer, conversationally=conversationally
307
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
339
308
  ):
340
- yield self.get_message(response)
309
+ if raw:
310
+ yield response
311
+ else:
312
+ yield self.get_message(response)
341
313
 
342
314
  def for_non_stream():
343
- return self.get_message(
344
- self.ask(
345
- prompt,
346
- False,
347
- optimizer=optimizer,
348
- conversationally=conversationally,
349
- )
315
+ result = self.ask(
316
+ prompt,
317
+ False,
318
+ raw=raw,
319
+ optimizer=optimizer,
320
+ conversationally=conversationally,
350
321
  )
322
+ if raw:
323
+ return result
324
+ else:
325
+ return self.get_message(result)
351
326
 
352
327
  return for_stream() if stream else for_non_stream()
353
328
 
@@ -361,29 +336,37 @@ class YEPCHAT(Provider):
361
336
  >>> ai.get_message(response)
362
337
  Extracts and returns the message content from the response.
363
338
  """
364
- assert isinstance(response, dict)
365
- return response["text"]
339
+ if isinstance(response, dict):
340
+ return response["text"]
341
+ elif isinstance(response, (str, bytes)):
342
+ return response
343
+ else:
344
+ raise TypeError(f"Unexpected response type: {type(response)}")
366
345
 
367
346
 
368
347
  if __name__ == "__main__":
369
- print("-" * 80)
370
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
371
- print("-" * 80)
372
-
373
- for model in YEPCHAT.AVAILABLE_MODELS:
374
- try:
375
- test_ai = YEPCHAT(model=model, timeout=60)
376
- response = test_ai.chat("Say 'Hello' in one word")
377
- response_text = response
348
+ # print("-" * 80)
349
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
350
+ # print("-" * 80)
351
+
352
+ # for model in YEPCHAT.AVAILABLE_MODELS:
353
+ # try:
354
+ # test_ai = YEPCHAT(model=model, timeout=60)
355
+ # response = test_ai.chat("Say 'Hello' in one word")
356
+ # response_text = response
378
357
 
379
- if response_text and len(response_text.strip()) > 0:
380
- status = "✓"
381
- # Truncate response if too long
382
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
383
- else:
384
- status = "✗"
385
- display_text = "Empty or invalid response"
386
- print(f"{model:<50} {status:<10} {display_text}")
387
- except Exception as e:
388
- print(f"{model:<50} {'✗':<10} {str(e)}")
389
-
358
+ # if response_text and len(response_text.strip()) > 0:
359
+ # status = "✓"
360
+ # # Truncate response if too long
361
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
362
+ # else:
363
+ # status = "✗"
364
+ # display_text = "Empty or invalid response"
365
+ # print(f"{model:<50} {status:<10} {display_text}")
366
+ # except Exception as e:
367
+ # print(f"{model:<50} {'✗':<10} {str(e)}")
368
+ ai = YEPCHAT(model="DeepSeek-R1-Distill-Qwen-32B", timeout=60)
369
+ response = ai.chat("Say 'Hello' in one word", raw=True, stream=True)
370
+ for chunk in response:
371
+
372
+ print(chunk, end='', flush=True)
webscout/auth/__init__.py CHANGED
@@ -13,7 +13,18 @@ from .schemas import (
13
13
  UserResponse,
14
14
  HealthCheckResponse
15
15
  )
16
- from .server import create_app, run_api, start_server
16
+ # Import server functions lazily to avoid module execution issues
17
+ def create_app():
18
+ from .server import create_app as _create_app
19
+ return _create_app()
20
+
21
+ def run_api(*args, **kwargs):
22
+ from .server import run_api as _run_api
23
+ return _run_api(*args, **kwargs)
24
+
25
+ def start_server(*args, **kwargs):
26
+ from .server import start_server as _start_server
27
+ return _start_server(*args, **kwargs)
17
28
  from .routes import Api
18
29
  from .config import ServerConfig, AppConfig
19
30
  from .exceptions import APIError