webscout 8.3__py3-none-any.whl → 8.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (120) hide show
  1. webscout/AIauto.py +4 -4
  2. webscout/AIbase.py +61 -1
  3. webscout/AIutel.py +46 -53
  4. webscout/Bing_search.py +418 -0
  5. webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
  6. webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
  7. webscout/Extra/YTToolkit/ytapi/video.py +10 -10
  8. webscout/Extra/autocoder/autocoder_utiles.py +1 -1
  9. webscout/Extra/gguf.py +706 -177
  10. webscout/Litlogger/formats.py +9 -0
  11. webscout/Litlogger/handlers.py +18 -0
  12. webscout/Litlogger/logger.py +43 -1
  13. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  14. webscout/Provider/AISEARCH/scira_search.py +3 -2
  15. webscout/Provider/GeminiProxy.py +140 -0
  16. webscout/Provider/LambdaChat.py +7 -1
  17. webscout/Provider/MCPCore.py +78 -75
  18. webscout/Provider/OPENAI/BLACKBOXAI.py +1046 -1017
  19. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  20. webscout/Provider/OPENAI/Qwen3.py +303 -303
  21. webscout/Provider/OPENAI/README.md +5 -0
  22. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  23. webscout/Provider/OPENAI/TogetherAI.py +355 -0
  24. webscout/Provider/OPENAI/__init__.py +16 -1
  25. webscout/Provider/OPENAI/autoproxy.py +332 -0
  26. webscout/Provider/OPENAI/base.py +101 -14
  27. webscout/Provider/OPENAI/chatgpt.py +15 -2
  28. webscout/Provider/OPENAI/chatgptclone.py +14 -3
  29. webscout/Provider/OPENAI/deepinfra.py +339 -328
  30. webscout/Provider/OPENAI/e2b.py +295 -74
  31. webscout/Provider/OPENAI/mcpcore.py +109 -70
  32. webscout/Provider/OPENAI/opkfc.py +18 -6
  33. webscout/Provider/OPENAI/scirachat.py +59 -50
  34. webscout/Provider/OPENAI/toolbaz.py +2 -10
  35. webscout/Provider/OPENAI/writecream.py +166 -166
  36. webscout/Provider/OPENAI/x0gpt.py +367 -367
  37. webscout/Provider/OPENAI/xenai.py +514 -0
  38. webscout/Provider/OPENAI/yep.py +389 -383
  39. webscout/Provider/STT/__init__.py +3 -0
  40. webscout/Provider/STT/base.py +281 -0
  41. webscout/Provider/STT/elevenlabs.py +265 -0
  42. webscout/Provider/TTI/__init__.py +4 -1
  43. webscout/Provider/TTI/aiarta.py +399 -365
  44. webscout/Provider/TTI/base.py +74 -2
  45. webscout/Provider/TTI/bing.py +231 -0
  46. webscout/Provider/TTI/fastflux.py +63 -30
  47. webscout/Provider/TTI/gpt1image.py +149 -0
  48. webscout/Provider/TTI/imagen.py +196 -0
  49. webscout/Provider/TTI/magicstudio.py +60 -29
  50. webscout/Provider/TTI/piclumen.py +43 -32
  51. webscout/Provider/TTI/pixelmuse.py +232 -225
  52. webscout/Provider/TTI/pollinations.py +43 -32
  53. webscout/Provider/TTI/together.py +287 -0
  54. webscout/Provider/TTI/utils.py +2 -1
  55. webscout/Provider/TTS/README.md +1 -0
  56. webscout/Provider/TTS/__init__.py +2 -1
  57. webscout/Provider/TTS/freetts.py +140 -0
  58. webscout/Provider/TTS/speechma.py +45 -39
  59. webscout/Provider/TogetherAI.py +366 -0
  60. webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
  61. webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
  62. webscout/Provider/XenAI.py +324 -0
  63. webscout/Provider/__init__.py +8 -0
  64. webscout/Provider/deepseek_assistant.py +378 -0
  65. webscout/Provider/scira_chat.py +3 -2
  66. webscout/Provider/toolbaz.py +0 -1
  67. webscout/auth/__init__.py +44 -0
  68. webscout/auth/api_key_manager.py +189 -0
  69. webscout/auth/auth_system.py +100 -0
  70. webscout/auth/config.py +76 -0
  71. webscout/auth/database.py +400 -0
  72. webscout/auth/exceptions.py +67 -0
  73. webscout/auth/middleware.py +248 -0
  74. webscout/auth/models.py +130 -0
  75. webscout/auth/providers.py +257 -0
  76. webscout/auth/rate_limiter.py +254 -0
  77. webscout/auth/request_models.py +127 -0
  78. webscout/auth/request_processing.py +226 -0
  79. webscout/auth/routes.py +526 -0
  80. webscout/auth/schemas.py +103 -0
  81. webscout/auth/server.py +312 -0
  82. webscout/auth/static/favicon.svg +11 -0
  83. webscout/auth/swagger_ui.py +203 -0
  84. webscout/auth/templates/components/authentication.html +237 -0
  85. webscout/auth/templates/components/base.html +103 -0
  86. webscout/auth/templates/components/endpoints.html +750 -0
  87. webscout/auth/templates/components/examples.html +491 -0
  88. webscout/auth/templates/components/footer.html +75 -0
  89. webscout/auth/templates/components/header.html +27 -0
  90. webscout/auth/templates/components/models.html +286 -0
  91. webscout/auth/templates/components/navigation.html +70 -0
  92. webscout/auth/templates/static/api.js +455 -0
  93. webscout/auth/templates/static/icons.js +168 -0
  94. webscout/auth/templates/static/main.js +784 -0
  95. webscout/auth/templates/static/particles.js +201 -0
  96. webscout/auth/templates/static/styles.css +3353 -0
  97. webscout/auth/templates/static/ui.js +374 -0
  98. webscout/auth/templates/swagger_ui.html +170 -0
  99. webscout/client.py +49 -3
  100. webscout/litagent/Readme.md +12 -3
  101. webscout/litagent/agent.py +99 -62
  102. webscout/scout/core/scout.py +104 -26
  103. webscout/scout/element.py +139 -18
  104. webscout/swiftcli/core/cli.py +14 -3
  105. webscout/swiftcli/decorators/output.py +59 -9
  106. webscout/update_checker.py +31 -49
  107. webscout/version.py +1 -1
  108. webscout/webscout_search.py +4 -12
  109. webscout/webscout_search_async.py +3 -10
  110. webscout/yep_search.py +2 -11
  111. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/METADATA +41 -11
  112. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/RECORD +116 -68
  113. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/entry_points.txt +1 -1
  114. webscout/Provider/HF_space/__init__.py +0 -0
  115. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  116. webscout/Provider/OPENAI/api.py +0 -1035
  117. webscout/Provider/TTI/artbit.py +0 -0
  118. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/WHEEL +0 -0
  119. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/licenses/LICENSE.md +0 -0
  120. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,13 @@
1
1
  DEFAULT_FORMAT = "{time} | {level} | {name} | {message}"
2
2
 
3
+ SIMPLE_FORMAT = "{level}: {message}"
4
+
5
+ DETAILED_FORMAT = "{time} | {level} | {name} | {message} | Thread: {thread} | Process: {process}"
6
+
7
+ JSON_FORMAT = '{{"time": "{time}", "level": "{level}", "name": "{name}", "message": "{message}"}}'
8
+
3
9
  class LogFormat:
4
10
  DEFAULT = DEFAULT_FORMAT
11
+ SIMPLE = SIMPLE_FORMAT
12
+ DETAILED = DETAILED_FORMAT
13
+ JSON = JSON_FORMAT
@@ -101,3 +101,21 @@ class TCPHandler(Handler):
101
101
  return
102
102
  with socket.create_connection((self.host, self.port), timeout=5) as sock:
103
103
  sock.sendall(message.encode() + b"\n")
104
+
105
+ class JSONFileHandler(FileHandler):
106
+ def __init__(self, path: str, level: LogLevel = LogLevel.DEBUG, max_bytes: int = 0, backups: int = 0):
107
+ super().__init__(path, level, max_bytes, backups)
108
+
109
+ def emit(self, message: str, level: LogLevel):
110
+ # Expect message to be a JSON string or dict
111
+ if level < self.level:
112
+ return
113
+ import json
114
+ if isinstance(message, dict):
115
+ log_entry = json.dumps(message)
116
+ else:
117
+ log_entry = message
118
+ self._file.write(log_entry + "\n")
119
+ self._file.flush()
120
+ if self.max_bytes and self._file.tell() >= self.max_bytes:
121
+ self._rotate()
@@ -16,16 +16,58 @@ class Logger:
16
16
  handlers: Optional[List[Handler]] = None,
17
17
  fmt: str = LogFormat.DEFAULT, # <--- use LogFormat.DEFAULT
18
18
  async_mode: bool = False,
19
+ include_context: bool = False, # New flag to include thread/process info
19
20
  ):
21
+ import threading
22
+ import multiprocessing
23
+
20
24
  self.name = name
21
25
  self.level = level
22
26
  self.format = fmt
23
27
  self.async_mode = async_mode
28
+ self.include_context = include_context
24
29
  self.handlers = handlers or [ConsoleHandler()]
30
+ self._thread = threading
31
+ self._multiprocessing = multiprocessing
25
32
 
26
33
  def _format(self, level: LogLevel, message: str) -> str:
27
34
  now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
28
- return self.format.format(time=now, level=level.name, name=self.name, message=message)
35
+ if self.include_context:
36
+ thread_name = self._thread.current_thread().name
37
+ process_id = self._multiprocessing.current_process().pid
38
+ # Check if format is JSON format
39
+ if self.format.strip().startswith('{') and self.format.strip().endswith('}'):
40
+ # Format as JSON string with extra fields
41
+ return self.format.format(
42
+ time=now,
43
+ level=level.name,
44
+ name=self.name,
45
+ message=message,
46
+ thread=thread_name,
47
+ process=process_id
48
+ )
49
+ else:
50
+ # For non-JSON formats, add thread and process info if placeholders exist
51
+ try:
52
+ return self.format.format(
53
+ time=now,
54
+ level=level.name,
55
+ name=self.name,
56
+ message=message,
57
+ thread=thread_name,
58
+ process=process_id
59
+ )
60
+ except KeyError:
61
+ # If thread/process placeholders not in format, append them manually
62
+ base = self.format.format(time=now, level=level.name, name=self.name, message=message)
63
+ return f"{base} | Thread: {thread_name} | Process: {process_id}"
64
+ else:
65
+ return self.format.format(time=now, level=level.name, name=self.name, message=message)
66
+
67
+ def set_format(self, fmt: str, include_context: bool = False):
68
+ """Dynamically change the log format and context inclusion."""
69
+ self.format = fmt
70
+ self.include_context = include_context
29
71
 
30
72
  def _should_log(self, level: LogLevel) -> bool:
31
73
  return level >= self.level
@@ -201,12 +201,12 @@ class Genspark(AISearch):
201
201
  json={},
202
202
  stream=True,
203
203
  timeout=self.timeout,
204
- ) as SearchResponse:
205
- if not SearchResponse.ok:
204
+ ) as resp:
205
+ if not resp.ok:
206
206
  raise exceptions.APIConnectionError(
207
- f"Failed to generate SearchResponse - ({SearchResponse.status_code}, {SearchResponse.reason}) - {SearchResponse.text}"
207
+ f"Failed to generate SearchResponse - ({resp.status_code}, {resp.reason}) - {resp.text}"
208
208
  )
209
- for line in SearchResponse.iter_lines(decode_unicode=True):
209
+ for line in resp.iter_lines(decode_unicode=True):
210
210
  if not line or not line.startswith("data: "):
211
211
  continue
212
212
  try:
@@ -287,7 +287,7 @@ class Genspark(AISearch):
287
287
  yield processed_event_payload
288
288
  except json.JSONDecodeError:
289
289
  continue
290
- except cloudscraper.exceptions as e:
290
+ except cloudscraper.exceptions.CloudflareException as e:
291
291
  raise exceptions.APIConnectionError(f"Request failed due to Cloudscraper issue: {e}")
292
292
  except requests.exceptions.RequestException as e:
293
293
  raise exceptions.APIConnectionError(f"Request failed: {e}")
@@ -315,8 +315,8 @@ if __name__ == "__main__":
315
315
  from rich import print
316
316
  ai = Genspark()
317
317
  try:
318
- SearchResponse = ai.search(input(">>> "), stream=True, raw=False)
319
- for chunk in SearchResponse:
318
+ search_result_stream = ai.search(input(">>> "), stream=True, raw=False)
319
+ for chunk in search_result_stream:
320
320
  print(chunk, end="", flush=True)
321
321
  except KeyboardInterrupt:
322
322
  print("\nSearch interrupted by user.")
@@ -45,12 +45,13 @@ class Scira(AISearch):
45
45
  AVAILABLE_MODELS = {
46
46
  "scira-default": "Grok3-mini", # thinking model
47
47
  "scira-grok-3": "Grok3",
48
- "scira-anthropic": "Sonnet 3.7 thinking",
48
+ "scira-anthropic": "Claude 4 Sonnet",
49
+ "scira-anthropic-thinking": "Claude 4 Sonnet Thinking", # thinking model
49
50
  "scira-vision" : "Grok2-Vision", # vision model
50
51
  "scira-4o": "GPT4o",
51
52
  "scira-qwq": "QWQ-32B",
52
53
  "scira-o4-mini": "o4-mini",
53
- "scira-google": "gemini 2.5 flash",
54
+ "scira-google": "gemini 2.5 flash Thinking", # thinking model
54
55
  "scira-google-pro": "gemini 2.5 pro",
55
56
  "scira-llama-4": "llama 4 Maverick",
56
57
  }
@@ -0,0 +1,140 @@
1
+ from typing import Any, Dict, Optional, Union, Generator
2
+ import requests
3
+ import base64
4
+ from webscout.litagent import LitAgent
5
+ from webscout.AIutel import Optimizers, AwesomePrompts
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIbase import Provider
8
+ from webscout import exceptions
9
+
10
+ class GeminiProxy(Provider):
11
+ """
12
+ GeminiProxy is a provider class for interacting with the Gemini API via a proxy endpoint.
13
+ """
14
+ AVAILABLE_MODELS = [
15
+ "gemini-2.0-flash-lite",
16
+ "gemini-2.0-flash",
17
+ "gemini-2.5-pro-preview-06-05",
18
+ "gemini-2.5-pro-preview-05-06",
19
+ "gemini-2.5-flash-preview-04-17",
20
+ "gemini-2.5-flash-preview-05-20",
21
+
22
+ ]
23
+
24
+ def __init__(
25
+ self,
26
+ is_conversation: bool = True,
27
+ max_tokens: int = 2048,
28
+ timeout: int = 30,
29
+ intro: str = None,
30
+ filepath: str = None,
31
+ update_file: bool = True,
32
+ proxies: dict = {},
33
+ history_offset: int = 10250,
34
+ act: str = None,
35
+ model: str = "gemini-2.0-flash-lite",
36
+ system_prompt: str = "You are a helpful assistant.",
37
+ browser: str = "chrome"
38
+ ):
39
+ if model not in self.AVAILABLE_MODELS:
40
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
41
+ self.base_url = "https://us-central1-infinite-chain-295909.cloudfunctions.net/gemini-proxy-staging-v1"
42
+ self.agent = LitAgent()
43
+ self.fingerprint = self.agent.generate_fingerprint(browser)
44
+ self.headers = self.fingerprint.copy()
45
+ self.session = requests.Session()
46
+ self.session.headers.update(self.headers)
47
+ self.session.proxies.update(proxies)
48
+ self.is_conversation = is_conversation
49
+ self.max_tokens_to_sample = max_tokens
50
+ self.timeout = timeout
51
+ self.last_response = {}
52
+ self.model = model
53
+ self.system_prompt = system_prompt
54
+ self.__available_optimizers = (
55
+ method
56
+ for method in dir(Optimizers)
57
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
58
+ )
59
+ Conversation.intro = (
60
+ AwesomePrompts().get_act(
61
+ act, raise_not_found=True, default=None, case_insensitive=True
62
+ )
63
+ if act
64
+ else intro or Conversation.intro
65
+ )
66
+ self.conversation = Conversation(
67
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
68
+ )
69
+ self.conversation.history_offset = history_offset
70
+
71
+ def get_image(self, img_url):
72
+ try:
73
+ response = self.session.get(img_url, stream=True, timeout=self.timeout)
74
+ response.raise_for_status()
75
+ mime_type = response.headers.get("content-type", "application/octet-stream")
76
+ data = base64.b64encode(response.content).decode("utf-8")
77
+ return {"mime_type": mime_type, "data": data}
78
+ except Exception as e:
79
+ raise exceptions.FailedToGenerateResponseError(f"Error fetching image: {e}")
80
+
81
+ def ask(
82
+ self,
83
+ prompt: str,
84
+ stream: bool = False,
85
+ raw: bool = False,
86
+ optimizer: str = None,
87
+ conversationally: bool = False,
88
+ img_url: Optional[str] = None,
89
+ ) -> Union[Dict[str, Any], Generator]:
90
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
91
+ if optimizer:
92
+ if optimizer in self.__available_optimizers:
93
+ conversation_prompt = getattr(Optimizers, optimizer)(
94
+ conversation_prompt if conversationally else prompt
95
+ )
96
+ else:
97
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
98
+ parts = []
99
+ if img_url:
100
+ parts.append({"inline_data": self.get_image(img_url)})
101
+ parts.append({"text": conversation_prompt})
102
+ request_data = {
103
+ "model": self.model,
104
+ "contents": [{"parts": parts}]
105
+ }
106
+ def for_non_stream():
107
+ try:
108
+ response = self.session.post(self.base_url, json=request_data, headers=self.headers, timeout=self.timeout)
109
+ response.raise_for_status()
110
+ data = response.json()
111
+ self.last_response = data
112
+ self.conversation.update_chat_history(prompt, self.get_message(data))
113
+ return data
114
+ except Exception as e:
115
+ raise exceptions.FailedToGenerateResponseError(f"Error during chat request: {e}")
116
+ # Gemini proxy does not support streaming, so only non-stream
117
+ return for_non_stream()
118
+
119
+ def chat(
120
+ self,
121
+ prompt: str,
122
+ stream: bool = False,
123
+ optimizer: str = None,
124
+ conversationally: bool = False,
125
+ img_url: Optional[str] = None,
126
+ ) -> str:
127
+ data = self.ask(prompt, stream=stream, optimizer=optimizer, conversationally=conversationally, img_url=img_url)
128
+ return self.get_message(data)
129
+
130
+ def get_message(self, response: dict) -> str:
131
+ assert isinstance(response, dict), "Response should be of dict data-type only"
132
+ try:
133
+ return response['candidates'][0]['content']['parts'][0]['text']
134
+ except Exception:
135
+ return str(response)
136
+
137
+ if __name__ == "__main__":
138
+ ai = GeminiProxy(timeout=30, model="gemini-2.5-flash-preview-05-20")
139
+ response = ai.chat("write a poem about AI")
140
+ print(response)
@@ -23,11 +23,17 @@ class LambdaChat(Provider):
23
23
  "deepseek-llama3.3-70b",
24
24
  "apriel-5b-instruct",
25
25
  "deepseek-r1",
26
+ "deepseek-v3-0324",
27
+ "deepseek-r1-0528",
26
28
  "hermes-3-llama-3.1-405b-fp8",
27
29
  "llama3.1-nemotron-70b-instruct",
28
30
  "lfm-40b",
29
31
  "llama3.3-70b-instruct-fp8",
30
- "qwen25-coder-32b-instruct"
32
+ "qwen25-coder-32b-instruct",
33
+ "qwen3-32b-fp8",
34
+ "llama-4-maverick-70b-128e-instruct-fp8",
35
+ "llama-4-scout-17b-16e-instruct"
36
+
31
37
  ]
32
38
 
33
39
  def __init__(
@@ -1,5 +1,7 @@
1
1
  import json
2
2
  import uuid
3
+ import random
4
+ import string
3
5
  from typing import Any, Dict, Generator, Union
4
6
 
5
7
  # Use curl_cffi for requests
@@ -21,37 +23,37 @@ class MCPCore(Provider):
21
23
 
22
24
  # Add more models if known, starting with the one from the example
23
25
  AVAILABLE_MODELS = [
24
- "google/gemma-7b-it",
25
- "deepseek-ai/deepseek-coder-33b-instruct",
26
- "deepseek-ai/DeepSeek-R1",
27
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
28
- "deepseek-ai/DeepSeek-v3-0324",
29
- "fixie-ai/ultravox-v0_4_1-llama-3_1-8b",
30
- "meta-llama/Llama-3.3-70B-Instruct",
31
- "meta-llama/Llama-4-Maverick-Instruct",
32
- "mistralai/Mistral-7B-Instruct-v0.2",
33
- "qwen-max-latest",
34
- "qwen-plus-latest",
35
- "qwen2.5-coder-32b-instruct",
36
- "qwen-turbo-latest",
37
- "qwen2.5-14b-instruct-1m",
38
- "GLM-4-32B",
39
- "Z1-32B",
40
- "Z1-Rumination",
41
- "arena-model",
42
- "qvq-72b-preview-0310",
43
- "qwq-32b",
44
- "qwen3-235b-a22b",
45
- "qwen3-30b-a3b",
46
- "qwen3-32b",
47
- "deepseek-flash",
48
- "@cf/meta/llama-4-scout-17b-16e-instruct",
49
- "任务专用",
26
+ "@cf/deepseek-ai/deepseek-math-7b-instruct",
27
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
28
+ "@cf/defog/sqlcoder-7b-2",
29
+ "@cf/fblgit/una-cybertron-7b-v2-bf16",
30
+ "@cf/google/gemma-3-12b-it",
31
+ "@cf/meta/llama-2-7b-chat-int8",
32
+ "@hf/thebloke/llama-2-13b-chat-awq",
33
+ "@hf/thebloke/llamaguard-7b-awq",
34
+ "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
35
+ "@hf/thebloke/neural-chat-7b-v3-1-awq",
36
+ "anthropic/claude-3.5-haiku",
37
+ "anthropic/claude-3.5-sonnet",
38
+ "anthropic/claude-3.7-sonnet",
39
+ "anthropic/claude-3.7-sonnet:thinking",
40
+ "anthropic/claude-opus-4",
41
+ "anthropic/claude-sonnet-4",
42
+ "openai/chatgpt-4o-latest",
43
+ "openai/gpt-3.5-turbo",
44
+ "openai/gpt-4.1",
45
+ "openai/gpt-4.1-mini",
46
+ "openai/gpt-4.1-nano",
47
+ "openai/gpt-4o-mini-search-preview",
48
+ "openai/gpt-4o-search-preview",
49
+ "openai/o1-pro",
50
+ "openai/o3-mini",
51
+ "sarvam-m",
52
+ "x-ai/grok-3-beta",
50
53
  ]
51
54
 
52
55
  def __init__(
53
56
  self,
54
- cookies_path: str,
55
57
  is_conversation: bool = True,
56
58
  max_tokens: int = 2048,
57
59
  timeout: int = 60,
@@ -70,46 +72,22 @@ class MCPCore(Provider):
70
72
 
71
73
  self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
72
74
 
73
- # Cache the user-agent at the class level
74
- if not hasattr(MCPCore, '_cached_user_agent'):
75
- MCPCore._cached_user_agent = LitAgent().random()
76
75
  self.model = model
77
76
  self.system_prompt = system_prompt
78
- self.cookies_path = cookies_path
79
- self.cookie_string, self.token = self._load_cookies()
80
77
 
81
78
  # Initialize curl_cffi Session
82
79
  self.session = Session()
83
80
 
84
81
  # Set up headers based on the provided request
85
82
  self.headers = {
86
- 'authority': 'chat.mcpcore.xyz',
87
- 'accept': '*/*',
88
- 'accept-language': 'en-US,en;q=0.9,en-IN;q=0.8',
89
- **({'authorization': f'Bearer {self.token}'} if self.token else {}),
90
- 'content-type': 'application/json',
91
- 'dnt': '1',
83
+ **LitAgent().generate_fingerprint(),
92
84
  'origin': 'https://chat.mcpcore.xyz',
93
85
  'referer': 'https://chat.mcpcore.xyz/',
94
- 'priority': 'u=1, i',
95
- 'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
96
- 'sec-ch-ua-mobile': '?0',
97
- 'sec-ch-ua-platform': '"Windows"',
98
- 'sec-fetch-dest': 'empty',
99
- 'sec-fetch-mode': 'cors',
100
- 'sec-fetch-site': 'same-origin',
101
- 'sec-gpc': '1',
102
- 'user-agent': self._cached_user_agent,
103
86
  }
104
87
 
105
88
  # Apply headers, proxies, and cookies to the session
106
89
  self.session.headers.update(self.headers)
107
90
  self.session.proxies = proxies
108
- self.cookies = {
109
- 'token': self.token,
110
- }
111
- for name, value in self.cookies.items():
112
- self.session.cookies.set(name, value, domain="chat.mcpcore.xyz")
113
91
 
114
92
  # Provider settings
115
93
  self.is_conversation = is_conversation
@@ -136,27 +114,54 @@ class MCPCore(Provider):
136
114
  )
137
115
  self.conversation.history_offset = history_offset
138
116
 
139
- def _load_cookies(self) -> tuple[str, str]:
140
- """Load cookies from a JSON file and build a cookie header string."""
117
+ # Token handling: always auto-fetch token, no cookies logic
118
+ self.token = self._auto_fetch_token()
119
+
120
+ # Set the Authorization header for the session
121
+ self.session.headers.update({
122
+ 'authorization': f'Bearer {self.token}',
123
+ })
124
+
125
+ def _auto_fetch_token(self):
126
+ """Automatically fetch a token from the signup endpoint."""
127
+ session = Session()
128
+ def random_string(length=8):
129
+ return ''.join(random.choices(string.ascii_lowercase, k=length))
130
+ name = random_string(6)
131
+ email = f"{random_string(8)}@gmail.com"
132
+ password = email
133
+ profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
134
+ payload = {
135
+ "name": name,
136
+ "email": email,
137
+ "password": password,
138
+ "profile_image_url": profile_image_url
139
+ }
140
+ headers = {
141
+ **LitAgent().generate_fingerprint(),
142
+ 'origin': 'https://chat.mcpcore.xyz',
143
+ 'referer': 'https://chat.mcpcore.xyz/auth',
144
+ }
141
145
  try:
142
- with open(self.cookies_path, "r") as f:
143
- cookies = json.load(f)
144
- cookie_string = "; ".join(
145
- f"{cookie['name']}={cookie['value']}" for cookie in cookies if 'name' in cookie and 'value' in cookie
146
- )
147
- token = next(
148
- (cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
149
- "",
150
- )
151
- return cookie_string, token
152
- except FileNotFoundError:
153
- raise exceptions.FailedToGenerateResponseError(
154
- f"Error: Cookies file not found at {self.cookies_path}!"
155
- )
156
- except json.JSONDecodeError:
157
- raise exceptions.FailedToGenerateResponseError(
158
- f"Error: Invalid JSON format in cookies file: {self.cookies_path}!"
146
+ resp = session.post(
147
+ "https://chat.mcpcore.xyz/api/v1/auths/signup",
148
+ headers=headers,
149
+ json=payload,
150
+ timeout=30,
151
+ impersonate="chrome110"
159
152
  )
153
+ if resp.ok:
154
+ data = resp.json()
155
+ token = data.get("token")
156
+ if token:
157
+ return token
158
+ # fallback: try to get from set-cookie
159
+ set_cookie = resp.headers.get("set-cookie", "")
160
+ if "token=" in set_cookie:
161
+ return set_cookie.split("token=")[1].split(";")[0]
162
+ raise exceptions.FailedToGenerateResponseError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
163
+ except Exception as e:
164
+ raise exceptions.FailedToGenerateResponseError(f"Token auto-fetch failed: {e}")
160
165
 
161
166
  def ask(
162
167
  self,
@@ -286,19 +291,17 @@ class MCPCore(Provider):
286
291
  assert isinstance(response, dict), "Response should be of dict data-type only"
287
292
  return response.get("text", "")
288
293
 
289
- # Example usage (remember to create a cookies.json file)
294
+ # Example usage (no cookies file needed)
290
295
  if __name__ == "__main__":
291
296
  from rich import print
292
297
 
293
- cookies_file_path = "cookies.json"
294
-
295
298
  print("-" * 80)
296
299
  print(f"{'Model':<50} {'Status':<10} {'Response'}")
297
300
  print("-" * 80)
298
301
 
299
302
  for model in MCPCore.AVAILABLE_MODELS:
300
303
  try:
301
- test_ai = MCPCore(cookies_path=cookies_file_path, model=model, timeout=60)
304
+ test_ai = MCPCore(model=model, timeout=60)
302
305
  response = test_ai.chat("Say 'Hello' in one word", stream=True)
303
306
  response_text = ""
304
307
  # Accumulate the response text without printing in the loop