webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -57,7 +57,7 @@ class HeckAI(Provider):
57
57
  proxies: dict = {},
58
58
  history_offset: int = 10250,
59
59
  act: str = None,
60
- model: str = "google/gemini-2.0-flash-001",
60
+ model: str = "google/gemini-2.5-flash-preview",
61
61
  language: str = "English"
62
62
  ):
63
63
  """
@@ -177,79 +177,73 @@ class HeckAI(Provider):
177
177
  def for_stream():
178
178
  streaming_text = "" # Initialize outside try block
179
179
  try:
180
- # Use curl_cffi session post with impersonate
181
180
  response = self.session.post(
182
181
  self.url,
183
- # headers are set on the session
184
182
  data=json.dumps(payload),
185
183
  stream=True,
186
184
  timeout=self.timeout,
187
- impersonate="chrome110" # Use a common impersonation profile
185
+ impersonate="chrome110"
188
186
  )
189
- response.raise_for_status() # Check for HTTP errors
187
+ response.raise_for_status()
190
188
 
191
- # Use sanitize_stream to process the stream
192
189
  processed_stream = sanitize_stream(
193
- data=response.iter_content(chunk_size=1024), # Pass byte iterator
194
- intro_value="data: ", # Prefix to remove (note the space)
195
- to_json=False, # Content is text
196
- start_marker="data: [ANSWER_START]",
197
- end_marker="data: [ANSWER_DONE]",
198
- skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
199
- yield_raw_on_error=True,
200
- strip_chars=" \n\r\t" # Strip whitespace characters from chunks
190
+ data=response.iter_content(chunk_size=1024),
191
+ intro_value="data: ",
192
+ to_json=False,
193
+ start_marker="data: [ANSWER_START]",
194
+ end_marker="data: [ANSWER_DONE]",
195
+ skip_markers=["data: [RELATE_Q_START]", "data: [RELATE_Q_DONE]", "data: [REASON_START]", "data: [REASON_DONE]"],
196
+ yield_raw_on_error=True,
197
+ strip_chars=" \n\r\t",
198
+ raw=raw
201
199
  )
202
200
 
203
201
  for content_chunk in processed_stream:
204
- # content_chunk is the text between ANSWER_START and ANSWER_DONE
205
202
  if content_chunk and isinstance(content_chunk, str):
206
- streaming_text += content_chunk
207
- yield dict(text=content_chunk) if not raw else content_chunk
203
+ content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
204
+ if raw:
205
+ if content_chunk and isinstance(content_chunk, str):
206
+ streaming_text += content_chunk
207
+ yield content_chunk
208
+ else:
209
+ if content_chunk and isinstance(content_chunk, str):
210
+ streaming_text += content_chunk
211
+ yield dict(text=content_chunk)
208
212
 
209
213
  # Only update history if we received a valid response
210
214
  if streaming_text:
211
- # Update history and previous answer after stream finishes
212
215
  self.previous_answer = streaming_text
213
- # Convert to simple text before updating conversation
214
216
  try:
215
- # Ensure content is valid before updating conversation
216
217
  if streaming_text and isinstance(streaming_text, str):
217
- # Sanitize the content to ensure it's valid
218
218
  sanitized_text = streaming_text.strip()
219
- if sanitized_text: # Only update if we have non-empty content
219
+ if sanitized_text:
220
220
  self.conversation.update_chat_history(prompt, sanitized_text)
221
221
  except Exception as e:
222
- # If conversation update fails, log but don't crash
223
222
  print(f"Warning: Failed to update conversation history: {str(e)}")
224
-
225
- except CurlError as e: # Catch CurlError
223
+ except CurlError as e:
226
224
  raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
227
- except Exception as e: # Catch other potential exceptions (like HTTPError)
225
+ except Exception as e:
228
226
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
229
227
  raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
230
228
 
231
-
232
229
  def for_non_stream():
233
- # Aggregate the stream using the updated for_stream logic
234
230
  full_text = ""
235
231
  try:
236
- # Ensure raw=False so for_stream yields dicts
237
232
  for chunk_data in for_stream():
238
- if isinstance(chunk_data, dict) and "text" in chunk_data:
239
- full_text += chunk_data["text"]
240
- # Handle raw string case if raw=True was passed
241
- elif raw and isinstance(chunk_data, str):
242
- full_text += chunk_data
233
+ if raw:
234
+ if isinstance(chunk_data, str):
235
+ chunk_data = chunk_data.replace('\\\\', '\\').replace('\\"', '"')
236
+ full_text += chunk_data
237
+ else:
238
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
239
+ text = chunk_data["text"].replace('\\\\', '\\').replace('\\"', '"')
240
+ full_text += text
243
241
  except Exception as e:
244
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
245
- if not full_text:
246
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
247
-
248
- # Return the final aggregated response dict or raw string
249
- self.last_response = {"text": full_text} # Update last_response here
242
+ if not full_text:
243
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
244
+ self.last_response = {"text": full_text}
250
245
  return full_text if raw else self.last_response
251
246
 
252
-
253
247
  return for_stream() if stream else for_non_stream()
254
248
 
255
249
  @staticmethod
@@ -266,15 +260,15 @@ class HeckAI(Provider):
266
260
  if isinstance(text, dict) and "text" in text:
267
261
  try:
268
262
  text["text"] = text["text"].encode("latin1").decode("utf-8")
269
- return text
263
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
270
264
  except (UnicodeError, AttributeError) as e:
271
- return text
265
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
272
266
  elif isinstance(text, str):
273
267
  try:
274
268
  return text.encode("latin1").decode("utf-8")
275
269
  except (UnicodeError, AttributeError) as e:
276
- return text
277
- return text
270
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
271
+ return text.replace('\\\\', '\\').replace('\\"', '"') # Handle escaped backslashes
278
272
 
279
273
  def chat(
280
274
  self,
@@ -282,6 +276,7 @@ class HeckAI(Provider):
282
276
  stream: bool = False,
283
277
  optimizer: str = None,
284
278
  conversationally: bool = False,
279
+ raw: bool = False,
285
280
  ) -> Union[str, Generator[str, None, None]]:
286
281
  """
287
282
  Sends a prompt to the HeckAI API and returns only the message text.
@@ -298,18 +293,23 @@ class HeckAI(Provider):
298
293
  def for_stream_chat():
299
294
  # ask() yields dicts or strings when streaming
300
295
  gen = self.ask(
301
- prompt, stream=True, raw=False, # Ensure ask yields dicts
296
+ prompt, stream=True, raw=raw,
302
297
  optimizer=optimizer, conversationally=conversationally
303
298
  )
304
- for response_dict in gen:
305
- yield self.get_message(response_dict) # get_message expects dict
299
+ for response in gen:
300
+ if raw:
301
+ yield response
302
+ else:
303
+ yield self.get_message(response)
306
304
 
307
305
  def for_non_stream_chat():
308
306
  # ask() returns dict or str when not streaming
309
307
  response_data = self.ask(
310
- prompt, stream=False, raw=False, # Ensure ask returns dict
308
+ prompt, stream=False, raw=raw,
311
309
  optimizer=optimizer, conversationally=conversationally
312
310
  )
311
+ if raw:
312
+ return response_data if isinstance(response_data, str) else str(response_data)
313
313
  return self.get_message(response_data) # get_message expects dict
314
314
 
315
315
  return for_stream_chat() if stream else for_non_stream_chat()
@@ -338,38 +338,43 @@ class HeckAI(Provider):
338
338
  # Ensure text is a string
339
339
  text = response["text"]
340
340
  if not isinstance(text, str):
341
- return str(text)
341
+ text = str(text)
342
342
 
343
- return text
343
+ return text.replace('\\\\', '\\').replace('\\"', '"')
344
344
 
345
345
  if __name__ == "__main__":
346
- # Ensure curl_cffi is installed
347
- print("-" * 80)
348
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
349
- print("-" * 80)
350
-
351
- for model in HeckAI.AVAILABLE_MODELS:
352
- try:
353
- test_ai = HeckAI(model=model, timeout=60)
354
- # Use non-streaming mode first to avoid potential streaming issues
355
- try:
356
- response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
357
- print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
358
- except Exception as e1:
359
- # Fall back to streaming if non-streaming fails
360
- print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
361
- response = test_ai.chat("Say 'Hello' in one word", stream=True)
362
- response_text = ""
363
- for chunk in response:
364
- if chunk and isinstance(chunk, str):
365
- response_text += chunk
346
+ # # Ensure curl_cffi is installed
347
+ # print("-" * 80)
348
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
349
+ # print("-" * 80)
350
+
351
+ # for model in HeckAI.AVAILABLE_MODELS:
352
+ # try:
353
+ # test_ai = HeckAI(model=model, timeout=60)
354
+ # # Use non-streaming mode first to avoid potential streaming issues
355
+ # try:
356
+ # response_text = test_ai.chat("Say 'Hello' in one word", stream=False)
357
+ # print(f"\r{model:<50} {'✓':<10} {response_text.strip()[:50]}")
358
+ # except Exception as e1:
359
+ # # Fall back to streaming if non-streaming fails
360
+ # print(f"\r{model:<50} {'Testing stream...':<10}", end="", flush=True)
361
+ # response = test_ai.chat("Say 'Hello' in one word", stream=True)
362
+ # response_text = ""
363
+ # for chunk in response:
364
+ # if chunk and isinstance(chunk, str):
365
+ # response_text += chunk
366
366
 
367
- if response_text and len(response_text.strip()) > 0:
368
- status = "✓"
369
- # Truncate response if too long
370
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
371
- print(f"\r{model:<50} {status:<10} {display_text}")
372
- else:
373
- raise ValueError("Empty or invalid response")
374
- except Exception as e:
375
- print(f"\r{model:<50} {'✗':<10} {str(e)}")
367
+ # if response_text and len(response_text.strip()) > 0:
368
+ # status = "✓"
369
+ # # Truncate response if too long
370
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
371
+ # print(f"\r{model:<50} {status:<10} {display_text}")
372
+ # else:
373
+ # raise ValueError("Empty or invalid response")
374
+ # except Exception as e:
375
+ # print(f"\r{model:<50} {'✗':<10} {str(e)}")
376
+ from rich import print
377
+ ai = HeckAI()
378
+ response = ai.chat("tell me about humans", stream=True, raw=False)
379
+ for chunk in response:
380
+ print(chunk, end='', flush=True)
@@ -170,16 +170,20 @@ class JadveOpenAI(Provider):
170
170
  intro_value=None, # No simple prefix
171
171
  to_json=False, # Content is text after extraction
172
172
  content_extractor=self._jadve_extractor, # Use the specific extractor
173
- # end_marker="e:", # Add if 'e:' reliably marks the end
174
- yield_raw_on_error=True
173
+ yield_raw_on_error=True,
174
+ raw=raw
175
175
  )
176
176
 
177
177
  for content_chunk in processed_stream:
178
- # content_chunk is the string extracted by _jadve_extractor
179
- if content_chunk and isinstance(content_chunk, str):
180
- full_response_text += content_chunk
181
- resp = {"text": content_chunk}
182
- yield resp if not raw else content_chunk
178
+ if raw:
179
+ if content_chunk and isinstance(content_chunk, str):
180
+ full_response_text += content_chunk
181
+ yield content_chunk
182
+ else:
183
+ if content_chunk and isinstance(content_chunk, str):
184
+ full_response_text += content_chunk
185
+ resp = {"text": content_chunk}
186
+ yield resp
183
187
 
184
188
  # Update history after stream finishes
185
189
  self.last_response = {"text": full_response_text}
@@ -191,30 +195,22 @@ class JadveOpenAI(Provider):
191
195
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
192
196
  raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
193
197
 
194
-
195
198
  def for_non_stream():
196
- # Aggregate the stream using the updated for_stream logic
197
199
  collected_text = ""
198
200
  try:
199
- # Ensure raw=False so for_stream yields dicts
200
201
  for chunk_data in for_stream():
201
- if isinstance(chunk_data, dict) and "text" in chunk_data:
202
- collected_text += chunk_data["text"]
203
- # Handle raw string case if raw=True was passed
204
- elif raw and isinstance(chunk_data, str):
205
- collected_text += chunk_data
202
+ if raw:
203
+ if isinstance(chunk_data, str):
204
+ collected_text += chunk_data
205
+ else:
206
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
207
+ collected_text += chunk_data["text"]
206
208
  except Exception as e:
207
- # If aggregation fails but some text was received, use it. Otherwise, re-raise.
208
- if not collected_text:
209
- raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
210
-
209
+ if not collected_text:
210
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
211
211
  # last_response and history are updated within for_stream
212
- # Return the final aggregated response dict or raw string
213
212
  return collected_text if raw else self.last_response
214
213
 
215
-
216
- # Since the API endpoint suggests streaming, always call the stream generator.
217
- # The non-stream wrapper will handle aggregation if stream=False.
218
214
  return for_stream() if stream else for_non_stream()
219
215
 
220
216
  def chat(
@@ -223,6 +219,7 @@ class JadveOpenAI(Provider):
223
219
  stream: bool = False,
224
220
  optimizer: str = None,
225
221
  conversationally: bool = False,
222
+ raw: bool = False,
226
223
  ) -> Union[str, Generator[str, None, None]]:
227
224
  """
228
225
  Generate a chat response (string).
@@ -232,25 +229,29 @@ class JadveOpenAI(Provider):
232
229
  stream (bool, optional): Flag for streaming response. Defaults to False.
233
230
  optimizer (str, optional): Prompt optimizer name. Defaults to None.
234
231
  conversationally (bool, optional): Flag for conversational optimization. Defaults to False.
232
+ raw (bool, optional): Return raw response. Defaults to False.
235
233
  Returns:
236
234
  str or generator: Generated response string or generator yielding response chunks.
237
235
  """
238
236
  def for_stream_chat():
239
- # ask() yields dicts or strings when streaming
240
237
  gen = self.ask(
241
- prompt, stream=True, raw=False, # Ensure ask yields dicts
238
+ prompt, stream=True, raw=raw,
242
239
  optimizer=optimizer, conversationally=conversationally
243
240
  )
244
- for response_dict in gen:
245
- yield self.get_message(response_dict) # get_message expects dict
241
+ for response in gen:
242
+ if raw:
243
+ yield response
244
+ else:
245
+ yield self.get_message(response)
246
246
 
247
247
  def for_non_stream_chat():
248
- # ask() returns dict or str when not streaming
249
248
  response_data = self.ask(
250
- prompt, stream=False, raw=False, # Ensure ask returns dict
249
+ prompt, stream=False, raw=raw,
251
250
  optimizer=optimizer, conversationally=conversationally
252
251
  )
253
- return self.get_message(response_data) # get_message expects dict
252
+ if raw:
253
+ return response_data if isinstance(response_data, str) else str(response_data)
254
+ return self.get_message(response_data)
254
255
 
255
256
  return for_stream_chat() if stream else for_non_stream_chat()
256
257
 
@@ -268,24 +269,29 @@ class JadveOpenAI(Provider):
268
269
  return response.get("text", "")
269
270
 
270
271
  if __name__ == "__main__":
271
- # Ensure curl_cffi is installed
272
- print("-" * 80)
273
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
274
- print("-" * 80)
275
-
276
- for model in JadveOpenAI.AVAILABLE_MODELS:
277
- try:
278
- test_ai = JadveOpenAI(model=model, timeout=60)
279
- response = test_ai.chat("Say 'Hello' in one word")
280
- response_text = response
272
+ # # Ensure curl_cffi is installed
273
+ # print("-" * 80)
274
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
275
+ # print("-" * 80)
276
+
277
+ # for model in JadveOpenAI.AVAILABLE_MODELS:
278
+ # try:
279
+ # test_ai = JadveOpenAI(model=model, timeout=60)
280
+ # response = test_ai.chat("Say 'Hello' in one word")
281
+ # response_text = response
281
282
 
282
- if response_text and len(response_text.strip()) > 0:
283
- status = "✓"
284
- # Truncate response if too long
285
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
286
- else:
287
- status = "✗"
288
- display_text = "Empty or invalid response"
289
- print(f"{model:<50} {status:<10} {display_text}")
290
- except Exception as e:
291
- print(f"{model:<50} {'✗':<10} {str(e)}")
283
+ # if response_text and len(response_text.strip()) > 0:
284
+ # status = "✓"
285
+ # # Truncate response if too long
286
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
287
+ # else:
288
+ # status = "✗"
289
+ # display_text = "Empty or invalid response"
290
+ # print(f"{model:<50} {status:<10} {display_text}")
291
+ # except Exception as e:
292
+ # print(f"{model:<50} {'✗':<10} {str(e)}")
293
+ from rich import print
294
+ ai = JadveOpenAI()
295
+ response = ai.chat("tell me about humans", stream=True, raw=False)
296
+ for chunk in response:
297
+ print(chunk, end='', flush=True)
@@ -1,5 +1,7 @@
1
1
  import json
2
2
  import uuid
3
+ import random
4
+ import string
3
5
  from typing import Any, Dict, Generator, Union
4
6
 
5
7
  # Use curl_cffi for requests
@@ -21,37 +23,37 @@ class MCPCore(Provider):
21
23
 
22
24
  # Add more models if known, starting with the one from the example
23
25
  AVAILABLE_MODELS = [
24
- "google/gemma-7b-it",
25
- "deepseek-ai/deepseek-coder-33b-instruct",
26
- "deepseek-ai/DeepSeek-R1",
27
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
28
- "deepseek-ai/DeepSeek-v3-0324",
29
- "fixie-ai/ultravox-v0_4_1-llama-3_1-8b",
30
- "meta-llama/Llama-3.3-70B-Instruct",
31
- "meta-llama/Llama-4-Maverick-Instruct",
32
- "mistralai/Mistral-7B-Instruct-v0.2",
33
- "qwen-max-latest",
34
- "qwen-plus-latest",
35
- "qwen2.5-coder-32b-instruct",
36
- "qwen-turbo-latest",
37
- "qwen2.5-14b-instruct-1m",
38
- "GLM-4-32B",
39
- "Z1-32B",
40
- "Z1-Rumination",
41
- "arena-model",
42
- "qvq-72b-preview-0310",
43
- "qwq-32b",
44
- "qwen3-235b-a22b",
45
- "qwen3-30b-a3b",
46
- "qwen3-32b",
47
- "deepseek-flash",
48
- "@cf/meta/llama-4-scout-17b-16e-instruct",
49
- "任务专用",
26
+ "@cf/deepseek-ai/deepseek-math-7b-instruct",
27
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
28
+ "@cf/defog/sqlcoder-7b-2",
29
+ "@cf/fblgit/una-cybertron-7b-v2-bf16",
30
+ "@cf/google/gemma-3-12b-it",
31
+ "@cf/meta/llama-2-7b-chat-int8",
32
+ "@hf/thebloke/llama-2-13b-chat-awq",
33
+ "@hf/thebloke/llamaguard-7b-awq",
34
+ "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
35
+ "@hf/thebloke/neural-chat-7b-v3-1-awq",
36
+ "anthropic/claude-3.5-haiku",
37
+ "anthropic/claude-3.5-sonnet",
38
+ "anthropic/claude-3.7-sonnet",
39
+ "anthropic/claude-3.7-sonnet:thinking",
40
+ "anthropic/claude-opus-4",
41
+ "anthropic/claude-sonnet-4",
42
+ "openai/chatgpt-4o-latest",
43
+ "openai/gpt-3.5-turbo",
44
+ "openai/gpt-4.1",
45
+ "openai/gpt-4.1-mini",
46
+ "openai/gpt-4.1-nano",
47
+ "openai/gpt-4o-mini-search-preview",
48
+ "openai/gpt-4o-search-preview",
49
+ "openai/o1-pro",
50
+ "openai/o3-mini",
51
+ "sarvam-m",
52
+ "x-ai/grok-3-beta",
50
53
  ]
51
54
 
52
55
  def __init__(
53
56
  self,
54
- cookies_path: str,
55
57
  is_conversation: bool = True,
56
58
  max_tokens: int = 2048,
57
59
  timeout: int = 60,
@@ -70,46 +72,22 @@ class MCPCore(Provider):
70
72
 
71
73
  self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
72
74
 
73
- # Cache the user-agent at the class level
74
- if not hasattr(MCPCore, '_cached_user_agent'):
75
- MCPCore._cached_user_agent = LitAgent().random()
76
75
  self.model = model
77
76
  self.system_prompt = system_prompt
78
- self.cookies_path = cookies_path
79
- self.cookie_string, self.token = self._load_cookies()
80
77
 
81
78
  # Initialize curl_cffi Session
82
79
  self.session = Session()
83
80
 
84
81
  # Set up headers based on the provided request
85
82
  self.headers = {
86
- 'authority': 'chat.mcpcore.xyz',
87
- 'accept': '*/*',
88
- 'accept-language': 'en-US,en;q=0.9,en-IN;q=0.8',
89
- **({'authorization': f'Bearer {self.token}'} if self.token else {}),
90
- 'content-type': 'application/json',
91
- 'dnt': '1',
83
+ **LitAgent().generate_fingerprint(),
92
84
  'origin': 'https://chat.mcpcore.xyz',
93
85
  'referer': 'https://chat.mcpcore.xyz/',
94
- 'priority': 'u=1, i',
95
- 'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
96
- 'sec-ch-ua-mobile': '?0',
97
- 'sec-ch-ua-platform': '"Windows"',
98
- 'sec-fetch-dest': 'empty',
99
- 'sec-fetch-mode': 'cors',
100
- 'sec-fetch-site': 'same-origin',
101
- 'sec-gpc': '1',
102
- 'user-agent': self._cached_user_agent,
103
86
  }
104
87
 
105
88
  # Apply headers, proxies, and cookies to the session
106
89
  self.session.headers.update(self.headers)
107
90
  self.session.proxies = proxies
108
- self.cookies = {
109
- 'token': self.token,
110
- }
111
- for name, value in self.cookies.items():
112
- self.session.cookies.set(name, value, domain="chat.mcpcore.xyz")
113
91
 
114
92
  # Provider settings
115
93
  self.is_conversation = is_conversation
@@ -136,27 +114,54 @@ class MCPCore(Provider):
136
114
  )
137
115
  self.conversation.history_offset = history_offset
138
116
 
139
- def _load_cookies(self) -> tuple[str, str]:
140
- """Load cookies from a JSON file and build a cookie header string."""
117
+ # Token handling: always auto-fetch token, no cookies logic
118
+ self.token = self._auto_fetch_token()
119
+
120
+ # Set the Authorization header for the session
121
+ self.session.headers.update({
122
+ 'authorization': f'Bearer {self.token}',
123
+ })
124
+
125
+ def _auto_fetch_token(self):
126
+ """Automatically fetch a token from the signup endpoint."""
127
+ session = Session()
128
+ def random_string(length=8):
129
+ return ''.join(random.choices(string.ascii_lowercase, k=length))
130
+ name = random_string(6)
131
+ email = f"{random_string(8)}@gmail.com"
132
+ password = email
133
+ profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
134
+ payload = {
135
+ "name": name,
136
+ "email": email,
137
+ "password": password,
138
+ "profile_image_url": profile_image_url
139
+ }
140
+ headers = {
141
+ **LitAgent().generate_fingerprint(),
142
+ 'origin': 'https://chat.mcpcore.xyz',
143
+ 'referer': 'https://chat.mcpcore.xyz/auth',
144
+ }
141
145
  try:
142
- with open(self.cookies_path, "r") as f:
143
- cookies = json.load(f)
144
- cookie_string = "; ".join(
145
- f"{cookie['name']}={cookie['value']}" for cookie in cookies if 'name' in cookie and 'value' in cookie
146
- )
147
- token = next(
148
- (cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
149
- "",
150
- )
151
- return cookie_string, token
152
- except FileNotFoundError:
153
- raise exceptions.FailedToGenerateResponseError(
154
- f"Error: Cookies file not found at {self.cookies_path}!"
155
- )
156
- except json.JSONDecodeError:
157
- raise exceptions.FailedToGenerateResponseError(
158
- f"Error: Invalid JSON format in cookies file: {self.cookies_path}!"
146
+ resp = session.post(
147
+ "https://chat.mcpcore.xyz/api/v1/auths/signup",
148
+ headers=headers,
149
+ json=payload,
150
+ timeout=30,
151
+ impersonate="chrome110"
159
152
  )
153
+ if resp.ok:
154
+ data = resp.json()
155
+ token = data.get("token")
156
+ if token:
157
+ return token
158
+ # fallback: try to get from set-cookie
159
+ set_cookie = resp.headers.get("set-cookie", "")
160
+ if "token=" in set_cookie:
161
+ return set_cookie.split("token=")[1].split(";")[0]
162
+ raise exceptions.FailedToGenerateResponseError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
163
+ except Exception as e:
164
+ raise exceptions.FailedToGenerateResponseError(f"Token auto-fetch failed: {e}")
160
165
 
161
166
  def ask(
162
167
  self,
@@ -286,19 +291,17 @@ class MCPCore(Provider):
286
291
  assert isinstance(response, dict), "Response should be of dict data-type only"
287
292
  return response.get("text", "")
288
293
 
289
- # Example usage (remember to create a cookies.json file)
294
+ # Example usage (no cookies file needed)
290
295
  if __name__ == "__main__":
291
296
  from rich import print
292
297
 
293
- cookies_file_path = "cookies.json"
294
-
295
298
  print("-" * 80)
296
299
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
297
300
  print("-" * 80)
298
301
 
299
302
  for model in MCPCore.AVAILABLE_MODELS:
300
303
  try:
301
- test_ai = MCPCore(cookies_path=cookies_file_path, model=model, timeout=60)
304
+ test_ai = MCPCore(model=model, timeout=60)
302
305
  response = test_ai.chat("Say 'Hello' in one word", stream=True)
303
306
  response_text = ""
304
307
  # Accumulate the response text without printing in the loop