webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (114) hide show
  1. webscout/AIutel.py +180 -78
  2. webscout/Bing_search.py +417 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/ExaChat.py +84 -58
  8. webscout/Provider/GeminiProxy.py +140 -0
  9. webscout/Provider/HeckAI.py +85 -80
  10. webscout/Provider/Jadve.py +56 -50
  11. webscout/Provider/MCPCore.py +78 -75
  12. webscout/Provider/MiniMax.py +207 -0
  13. webscout/Provider/Nemotron.py +41 -13
  14. webscout/Provider/Netwrck.py +34 -51
  15. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
  16. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  17. webscout/Provider/OPENAI/MiniMax.py +298 -0
  18. webscout/Provider/OPENAI/README.md +32 -29
  19. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  20. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  21. webscout/Provider/OPENAI/__init__.py +17 -1
  22. webscout/Provider/OPENAI/autoproxy.py +1067 -39
  23. webscout/Provider/OPENAI/base.py +17 -76
  24. webscout/Provider/OPENAI/deepinfra.py +42 -108
  25. webscout/Provider/OPENAI/e2b.py +0 -1
  26. webscout/Provider/OPENAI/flowith.py +179 -166
  27. webscout/Provider/OPENAI/friendli.py +233 -0
  28. webscout/Provider/OPENAI/mcpcore.py +109 -70
  29. webscout/Provider/OPENAI/monochat.py +329 -0
  30. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  31. webscout/Provider/OPENAI/scirachat.py +59 -51
  32. webscout/Provider/OPENAI/toolbaz.py +3 -9
  33. webscout/Provider/OPENAI/typegpt.py +1 -1
  34. webscout/Provider/OPENAI/utils.py +19 -42
  35. webscout/Provider/OPENAI/x0gpt.py +14 -2
  36. webscout/Provider/OPENAI/xenai.py +514 -0
  37. webscout/Provider/OPENAI/yep.py +8 -2
  38. webscout/Provider/OpenGPT.py +54 -32
  39. webscout/Provider/PI.py +58 -84
  40. webscout/Provider/StandardInput.py +32 -13
  41. webscout/Provider/TTI/README.md +9 -9
  42. webscout/Provider/TTI/__init__.py +3 -1
  43. webscout/Provider/TTI/aiarta.py +92 -78
  44. webscout/Provider/TTI/bing.py +231 -0
  45. webscout/Provider/TTI/infip.py +212 -0
  46. webscout/Provider/TTI/monochat.py +220 -0
  47. webscout/Provider/TTS/speechma.py +45 -39
  48. webscout/Provider/TeachAnything.py +11 -3
  49. webscout/Provider/TextPollinationsAI.py +78 -70
  50. webscout/Provider/TogetherAI.py +350 -0
  51. webscout/Provider/Venice.py +37 -46
  52. webscout/Provider/VercelAI.py +27 -24
  53. webscout/Provider/WiseCat.py +35 -35
  54. webscout/Provider/WrDoChat.py +22 -26
  55. webscout/Provider/WritingMate.py +26 -22
  56. webscout/Provider/XenAI.py +324 -0
  57. webscout/Provider/__init__.py +10 -5
  58. webscout/Provider/deepseek_assistant.py +378 -0
  59. webscout/Provider/granite.py +48 -57
  60. webscout/Provider/koala.py +51 -39
  61. webscout/Provider/learnfastai.py +49 -64
  62. webscout/Provider/llmchat.py +79 -93
  63. webscout/Provider/llmchatco.py +63 -78
  64. webscout/Provider/multichat.py +51 -40
  65. webscout/Provider/oivscode.py +1 -1
  66. webscout/Provider/scira_chat.py +159 -96
  67. webscout/Provider/scnet.py +13 -13
  68. webscout/Provider/searchchat.py +13 -13
  69. webscout/Provider/sonus.py +12 -11
  70. webscout/Provider/toolbaz.py +25 -8
  71. webscout/Provider/turboseek.py +41 -42
  72. webscout/Provider/typefully.py +27 -12
  73. webscout/Provider/typegpt.py +41 -46
  74. webscout/Provider/uncovr.py +55 -90
  75. webscout/Provider/x0gpt.py +33 -17
  76. webscout/Provider/yep.py +79 -96
  77. webscout/auth/__init__.py +55 -0
  78. webscout/auth/api_key_manager.py +189 -0
  79. webscout/auth/auth_system.py +100 -0
  80. webscout/auth/config.py +76 -0
  81. webscout/auth/database.py +400 -0
  82. webscout/auth/exceptions.py +67 -0
  83. webscout/auth/middleware.py +248 -0
  84. webscout/auth/models.py +130 -0
  85. webscout/auth/providers.py +279 -0
  86. webscout/auth/rate_limiter.py +254 -0
  87. webscout/auth/request_models.py +127 -0
  88. webscout/auth/request_processing.py +226 -0
  89. webscout/auth/routes.py +550 -0
  90. webscout/auth/schemas.py +103 -0
  91. webscout/auth/server.py +367 -0
  92. webscout/client.py +121 -70
  93. webscout/litagent/Readme.md +68 -55
  94. webscout/litagent/agent.py +99 -9
  95. webscout/scout/core/scout.py +104 -26
  96. webscout/scout/element.py +139 -18
  97. webscout/swiftcli/core/cli.py +14 -3
  98. webscout/swiftcli/decorators/output.py +59 -9
  99. webscout/update_checker.py +31 -49
  100. webscout/version.py +1 -1
  101. webscout/webscout_search.py +4 -12
  102. webscout/webscout_search_async.py +3 -10
  103. webscout/yep_search.py +2 -11
  104. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
  105. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
  106. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
  107. webscout/Provider/HF_space/__init__.py +0 -0
  108. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  109. webscout/Provider/OPENAI/api.py +0 -1320
  110. webscout/Provider/TTI/fastflux.py +0 -233
  111. webscout/Provider/Writecream.py +0 -246
  112. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  113. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  114. {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,8 @@
1
1
  import time
2
2
  import uuid
3
3
  import json
4
+ import random
5
+ import string
4
6
  from typing import List, Dict, Optional, Union, Generator, Any
5
7
 
6
8
  # Use curl_cffi for requests
@@ -8,8 +10,8 @@ from curl_cffi.requests import Session
8
10
  from curl_cffi import CurlError
9
11
 
10
12
  # Import base classes and utility structures
11
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
12
- from .utils import (
13
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
14
+ from webscout.Provider.OPENAI.utils import (
13
15
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
14
16
  ChatCompletionMessage, CompletionUsage
15
17
  )
@@ -274,11 +276,8 @@ class MCPCore(OpenAICompatibleProvider):
274
276
  """
275
277
  OpenAI-compatible client for the MCPCore API (chat.mcpcore.xyz).
276
278
 
277
- Requires cookies to be stored in a JSON file (e.g., 'cookies.json').
278
- The JSON file should contain a list of cookie objects, including one named 'token'.
279
-
280
279
  Usage:
281
- client = MCPCore(cookies_path="path/to/your/cookies.json")
280
+ client = MCPCore()
282
281
  response = client.chat.completions.create(
283
282
  model="google/gemma-7b-it",
284
283
  messages=[{"role": "user", "content": "Hello!"}]
@@ -286,71 +285,96 @@ class MCPCore(OpenAICompatibleProvider):
286
285
  print(response.choices[0].message.content)
287
286
  """
288
287
  AVAILABLE_MODELS = [
289
- "google/gemma-7b-it",
290
- "deepseek-ai/deepseek-coder-33b-instruct",
291
- "deepseek-ai/DeepSeek-R1",
292
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
293
- "deepseek-ai/DeepSeek-v3-0324",
294
- "fixie-ai/ultravox-v0_4_1-llama-3_1-8b",
295
- "meta-llama/Llama-3.3-70B-Instruct",
296
- "meta-llama/Llama-4-Maverick-Instruct",
297
- "mistralai/Mistral-7B-Instruct-v0.2",
298
- "qwen-max-latest",
299
- "qwen-plus-latest",
300
- "qwen2.5-coder-32b-instruct",
301
- "qwen-turbo-latest",
302
- "qwen2.5-14b-instruct-1m",
303
- "GLM-4-32B",
304
- "Z1-32B",
305
- "Z1-Rumination",
306
- "arena-model",
307
- "qvq-72b-preview-0310",
308
- "qwq-32b",
309
- "qwen3-235b-a22b",
310
- "qwen3-30b-a3b",
311
- "qwen3-32b",
312
- "deepseek-flash",
313
- "@cf/meta/llama-4-scout-17b-16e-instruct",
314
- "任务专用",
288
+ "@cf/deepseek-ai/deepseek-math-7b-instruct",
289
+ "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
290
+ "@cf/defog/sqlcoder-7b-2",
291
+ "@cf/fblgit/una-cybertron-7b-v2-bf16",
292
+ "@cf/google/gemma-3-12b-it",
293
+ "@cf/meta/llama-2-7b-chat-int8",
294
+ "@hf/thebloke/llama-2-13b-chat-awq",
295
+ "@hf/thebloke/llamaguard-7b-awq",
296
+ "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
297
+ "@hf/thebloke/neural-chat-7b-v3-1-awq",
298
+ "anthropic/claude-3.5-haiku",
299
+ "anthropic/claude-3.5-sonnet",
300
+ "anthropic/claude-3.7-sonnet",
301
+ "anthropic/claude-3.7-sonnet:thinking",
302
+ "anthropic/claude-opus-4",
303
+ "anthropic/claude-sonnet-4",
304
+ "openai/chatgpt-4o-latest",
305
+ "openai/gpt-3.5-turbo",
306
+ "openai/gpt-4.1",
307
+ "openai/gpt-4.1-mini",
308
+ "openai/gpt-4.1-nano",
309
+ "openai/gpt-4o-mini-search-preview",
310
+ "openai/gpt-4o-search-preview",
311
+ "openai/o1-pro",
312
+ "openai/o3-mini",
313
+ "sarvam-m",
314
+ "x-ai/grok-3-beta",
315
315
  ]
316
316
 
317
+ def _auto_fetch_token(self):
318
+ """Automatically fetch a token from the signup endpoint."""
319
+ session = Session()
320
+ def random_string(length=8):
321
+ return ''.join(random.choices(string.ascii_lowercase, k=length))
322
+ name = random_string(6)
323
+ email = f"{random_string(8)}@gmail.com"
324
+ password = email
325
+ profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
326
+ payload = {
327
+ "name": name,
328
+ "email": email,
329
+ "password": password,
330
+ "profile_image_url": profile_image_url
331
+ }
332
+ headers = {
333
+ **LitAgent().generate_fingerprint(),
334
+ 'origin': 'https://chat.mcpcore.xyz',
335
+ 'referer': 'https://chat.mcpcore.xyz/auth',
336
+ }
337
+ try:
338
+ resp = session.post(
339
+ "https://chat.mcpcore.xyz/api/v1/auths/signup",
340
+ headers=headers,
341
+ json=payload,
342
+ timeout=30,
343
+ impersonate="chrome110"
344
+ )
345
+ if resp.ok:
346
+ data = resp.json()
347
+ token = data.get("token")
348
+ if token:
349
+ return token
350
+ # fallback: try to get from set-cookie
351
+ set_cookie = resp.headers.get("set-cookie", "")
352
+ if "token=" in set_cookie:
353
+ return set_cookie.split("token=")[1].split(";")[0]
354
+ raise RuntimeError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
355
+ except Exception as e:
356
+ raise RuntimeError(f"Token auto-fetch failed: {e}")
357
+
317
358
  def __init__(
318
359
  self,
319
- cookies_path: str, # Make cookies path mandatory for authentication
320
360
  timeout: int = 60,
321
361
  ):
322
362
  """
323
363
  Initializes the MCPCore OpenAI-compatible client.
324
364
 
325
365
  Args:
326
- cookies_path: Path to the JSON file containing cookies (must include 'token').
327
366
  timeout: Request timeout in seconds.
328
- proxies: Optional proxy configuration.
329
- system_prompt: Default system prompt to use if none is provided in messages.
330
367
  """
331
368
  self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
332
369
  self.timeout = timeout
333
- self.cookies_path = cookies_path
334
-
335
- try:
336
- self.token = self._load_token_from_cookies()
337
- if not self.token:
338
- raise ValueError("Could not find 'token' cookie in the provided file.")
339
- except Exception as e:
340
- raise ValueError(f"Failed to load authentication token from cookies file '{cookies_path}': {e}") from e
341
-
370
+ self.token = self._auto_fetch_token()
342
371
  self.session = Session() # Use curl_cffi Session
343
372
 
344
373
  # Basic headers + Authorization
345
374
  self.headers = {
346
- 'authority': 'chat.mcpcore.xyz',
347
- 'accept': '*/*', # Accept anything, let the server decide
348
- 'accept-language': 'en-US,en;q=0.9',
349
- 'authorization': f'Bearer {self.token}',
350
- 'content-type': 'application/json',
375
+ **LitAgent().generate_fingerprint(),
351
376
  'origin': 'https://chat.mcpcore.xyz',
352
- 'referer': 'https://chat.mcpcore.xyz/',
353
- 'user-agent': LitAgent().random(), # Use LitAgent for User-Agent
377
+ 'referer': 'https://chat.mcpcore.xyz/auth',
354
378
  }
355
379
  # Add more headers mimicking browser behavior if needed, e.g., sec-ch-ua, etc.
356
380
  # Example:
@@ -366,27 +390,42 @@ class MCPCore(OpenAICompatibleProvider):
366
390
  self.session.headers.update(self.headers)
367
391
  self.chat = Chat(self) # Initialize chat interface
368
392
 
369
- def _load_token_from_cookies(self) -> Optional[str]:
370
- """Load the 'token' value from a JSON cookies file."""
371
- try:
372
- with open(self.cookies_path, "r") as f:
373
- cookies = json.load(f)
374
- # Find the cookie named 'token'
375
- token_cookie = next((cookie for cookie in cookies if cookie.get("name") == "token"), None)
376
- return token_cookie.get("value") if token_cookie else None
377
- except FileNotFoundError:
378
- print(f"{RED}Error: Cookies file not found at {self.cookies_path}!{RESET}")
379
- return None
380
- except json.JSONDecodeError:
381
- print(f"{RED}Error: Invalid JSON format in cookies file: {self.cookies_path}!{RESET}")
382
- return None
383
- except Exception as e:
384
- print(f"{RED}An unexpected error occurred loading cookies: {e}{RESET}")
385
- return None
386
-
387
393
  @property
388
394
  def models(self):
389
395
  class _ModelList:
390
396
  def list(inner_self):
391
397
  return type(self).AVAILABLE_MODELS
392
398
  return _ModelList()
399
+
400
+ if __name__ == "__main__":
401
+ print("-" * 100)
402
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
403
+ print("-" * 100)
404
+
405
+ test_prompt = "Say 'Hello' in one word"
406
+
407
+ client = MCPCore()
408
+ for model in client.models.list():
409
+ print(f"\rTesting {model}...", end="")
410
+ try:
411
+ presp = client.chat.completions.create(
412
+ model=model,
413
+ messages=[{"role": "user", "content": test_prompt}]
414
+ )
415
+ # Try to get the response text (truncate to 100 chars)
416
+ if hasattr(presp, "choices") and presp.choices and hasattr(presp.choices[0], "message"):
417
+ content = presp.choices[0].message.content or ""
418
+ clean_text = content.strip().encode('utf-8', errors='ignore').decode('utf-8')
419
+ display_text = clean_text[:100] + "..." if len(clean_text) > 100 else clean_text
420
+ status = "✓" if clean_text else "✗"
421
+ if not clean_text:
422
+ display_text = "Empty or invalid response"
423
+ else:
424
+ status = "✗"
425
+ display_text = "Empty or invalid response"
426
+ print(f"\r{model:<50} {status:<10} {display_text}")
427
+ except Exception as e:
428
+ error_msg = str(e)
429
+ if len(error_msg) > 100:
430
+ error_msg = error_msg[:97] + "..."
431
+ print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
@@ -0,0 +1,329 @@
1
+ import time
2
+ import uuid
3
+ import requests
4
+ import json
5
+ import re
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from webscout.Provider.OPENAI.utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, count_tokens
12
+ )
13
+
14
+ from webscout.litagent import LitAgent
15
+
16
+ # --- MonoChat Client ---
17
+
18
+ class Completions(BaseCompletions):
19
+ def __init__(self, client: 'MonoChat'):
20
+ self._client = client
21
+
22
+ def create(
23
+ self,
24
+ *,
25
+ model: str,
26
+ messages: List[Dict[str, str]],
27
+ max_tokens: Optional[int] = 2049,
28
+ stream: bool = False,
29
+ temperature: Optional[float] = None,
30
+ top_p: Optional[float] = None,
31
+ timeout: Optional[int] = None,
32
+ proxies: Optional[Dict[str, str]] = None,
33
+ **kwargs: Any
34
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
+ """
36
+ Creates a model response for the given chat conversation.
37
+ Mimics openai.chat.completions.create
38
+ """
39
+ # Prepare the payload for MonoChat API
40
+ payload = {
41
+ "messages": messages,
42
+ "model": model
43
+ }
44
+ if max_tokens is not None and max_tokens > 0:
45
+ payload["max_tokens"] = max_tokens
46
+ if temperature is not None:
47
+ payload["temperature"] = temperature
48
+ if top_p is not None:
49
+ payload["top_p"] = top_p
50
+ payload.update(kwargs)
51
+
52
+ request_id = f"chatcmpl-{uuid.uuid4()}"
53
+ created_time = int(time.time())
54
+
55
+ if stream:
56
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
57
+ else:
58
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
59
+
60
+ def _create_stream(
61
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
62
+ ) -> Generator[ChatCompletionChunk, None, None]:
63
+ try:
64
+ response = self._client.session.post(
65
+ self._client.api_endpoint,
66
+ headers=self._client.headers,
67
+ json=payload,
68
+ stream=True,
69
+ timeout=timeout or self._client.timeout,
70
+ proxies=proxies or getattr(self._client, "proxies", None)
71
+ )
72
+ if not response.ok:
73
+ raise IOError(
74
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
75
+ )
76
+
77
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
78
+ completion_tokens = 0
79
+ total_tokens = 0
80
+
81
+ for line in response.iter_lines():
82
+ if line:
83
+ decoded_line = line.decode('utf-8').strip()
84
+ # MonoChat returns lines like: 0:"Hello" or 0:"!" etc.
85
+ match = re.search(r'0:"(.*?)"', decoded_line)
86
+ if match:
87
+ content = match.group(1)
88
+ content = self._client.format_text(content)
89
+ completion_tokens += count_tokens(content)
90
+ total_tokens = prompt_tokens + completion_tokens
91
+
92
+ delta = ChoiceDelta(
93
+ content=content,
94
+ role="assistant",
95
+ tool_calls=None
96
+ )
97
+ choice = Choice(
98
+ index=0,
99
+ delta=delta,
100
+ finish_reason=None,
101
+ logprobs=None
102
+ )
103
+ chunk = ChatCompletionChunk(
104
+ id=request_id,
105
+ choices=[choice],
106
+ created=created_time,
107
+ model=model,
108
+ system_fingerprint=None
109
+ )
110
+ chunk.usage = {
111
+ "prompt_tokens": prompt_tokens,
112
+ "completion_tokens": completion_tokens,
113
+ "total_tokens": total_tokens,
114
+ "estimated_cost": None
115
+ }
116
+ yield chunk
117
+
118
+ # Final chunk with finish_reason="stop"
119
+ delta = ChoiceDelta(
120
+ content=None,
121
+ role=None,
122
+ tool_calls=None
123
+ )
124
+ choice = Choice(
125
+ index=0,
126
+ delta=delta,
127
+ finish_reason="stop",
128
+ logprobs=None
129
+ )
130
+ chunk = ChatCompletionChunk(
131
+ id=request_id,
132
+ choices=[choice],
133
+ created=created_time,
134
+ model=model,
135
+ system_fingerprint=None
136
+ )
137
+ chunk.usage = {
138
+ "prompt_tokens": prompt_tokens,
139
+ "completion_tokens": completion_tokens,
140
+ "total_tokens": total_tokens,
141
+ "estimated_cost": None
142
+ }
143
+ yield chunk
144
+
145
+ except Exception as e:
146
+ print(f"Error during MonoChat stream request: {e}")
147
+ raise IOError(f"MonoChat request failed: {e}") from e
148
+
149
+ def _create_non_stream(
150
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
151
+ ) -> ChatCompletion:
152
+ try:
153
+ response = self._client.session.post(
154
+ self._client.api_endpoint,
155
+ headers=self._client.headers,
156
+ json=payload,
157
+ stream=True,
158
+ timeout=timeout or self._client.timeout,
159
+ proxies=proxies or getattr(self._client, "proxies", None)
160
+ )
161
+ if not response.ok:
162
+ raise IOError(
163
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
164
+ )
165
+
166
+ full_text = ""
167
+ for line in response.iter_lines(decode_unicode=True):
168
+ if line:
169
+ match = re.search(r'0:"(.*?)"', line)
170
+ if match:
171
+ content = match.group(1)
172
+ full_text += content
173
+
174
+ full_text = self._client.format_text(full_text)
175
+
176
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
177
+ completion_tokens = count_tokens(full_text)
178
+ total_tokens = prompt_tokens + completion_tokens
179
+
180
+ message = ChatCompletionMessage(
181
+ role="assistant",
182
+ content=full_text
183
+ )
184
+ choice = Choice(
185
+ index=0,
186
+ message=message,
187
+ finish_reason="stop"
188
+ )
189
+ usage = CompletionUsage(
190
+ prompt_tokens=prompt_tokens,
191
+ completion_tokens=completion_tokens,
192
+ total_tokens=total_tokens
193
+ )
194
+ completion = ChatCompletion(
195
+ id=request_id,
196
+ choices=[choice],
197
+ created=created_time,
198
+ model=model,
199
+ usage=usage,
200
+ )
201
+ return completion
202
+
203
+ except Exception as e:
204
+ print(f"Error during MonoChat non-stream request: {e}")
205
+ raise IOError(f"MonoChat request failed: {e}") from e
206
+
207
+ class Chat(BaseChat):
208
+ def __init__(self, client: 'MonoChat'):
209
+ self.completions = Completions(client)
210
+
211
+ class MonoChat(OpenAICompatibleProvider):
212
+ """
213
+ OpenAI-compatible client for MonoChat API.
214
+
215
+ Usage:
216
+ client = MonoChat()
217
+ response = client.chat.completions.create(
218
+ model="gpt-4.1",
219
+ messages=[{"role": "user", "content": "Hello!"}]
220
+ )
221
+ """
222
+
223
+ AVAILABLE_MODELS = [
224
+ "deepseek-r1",
225
+ "deepseek-v3",
226
+ "uncensored-r1-32b",
227
+ "o3-pro",
228
+ "o4-mini",
229
+ "o3",
230
+ "gpt-4.5-preview",
231
+ "gpt-4.1",
232
+ "gpt-4.1-mini",
233
+ "gpt-4.1-nano",
234
+ "gpt-4o",
235
+ "gpt-4o-mini",
236
+ "gpt-4o-search-preview",
237
+ "gpt-4o-mini-search-preview",
238
+ "gpt-4-turbo"
239
+
240
+
241
+ ]
242
+
243
+ def __init__(
244
+ self,
245
+ browser: str = "chrome"
246
+ ):
247
+ """
248
+ Initialize the MonoChat client.
249
+
250
+ Args:
251
+ browser: Browser to emulate in user agent
252
+ """
253
+ self.timeout = None
254
+ self.api_endpoint = "https://www.chatwithmono.xyz/api/chat"
255
+ self.session = requests.Session()
256
+
257
+ agent = LitAgent()
258
+ self.fingerprint = agent.generate_fingerprint(browser)
259
+
260
+ self.headers = {
261
+ "accept": "*/*",
262
+ "accept-encoding": "gzip, deflate, br, zstd",
263
+ "accept-language": self.fingerprint["accept_language"],
264
+ "content-type": "application/json",
265
+ "origin": "https://www.chatwithmono.xyz",
266
+ "referer": "https://www.chatwithmono.xyz/",
267
+ "user-agent": self.fingerprint["user_agent"]
268
+ }
269
+
270
+ self.session.headers.update(self.headers)
271
+ self.chat = Chat(self)
272
+
273
+ @property
274
+ def models(self):
275
+ class _ModelList:
276
+ def list(inner_self):
277
+ return MonoChat.AVAILABLE_MODELS
278
+ return _ModelList()
279
+
280
+ def format_text(self, text: str) -> str:
281
+ """
282
+ Format text by replacing escaped newlines with actual newlines.
283
+
284
+ Args:
285
+ text: Text to format
286
+
287
+ Returns:
288
+ Formatted text
289
+ """
290
+ try:
291
+ text = text.replace('\\\\', '\\')
292
+ text = text.replace('\\n', '\n')
293
+ text = text.replace('\\r', '\r')
294
+ text = text.replace('\\t', '\t')
295
+ text = text.replace('\\"', '"')
296
+ text = text.replace("\\'", "'")
297
+ try:
298
+ json_str = f'"{text}"'
299
+ decoded = json.loads(json_str)
300
+ return decoded
301
+ except json.JSONDecodeError:
302
+ return text
303
+ except Exception as e:
304
+ print(f"Warning: Error formatting text: {e}")
305
+ return text
306
+
307
+ def convert_model_name(self, model: str) -> str:
308
+ """
309
+ Convert model names to ones supported by MonoChat.
310
+
311
+ Args:
312
+ model: Model name to convert
313
+
314
+ Returns:
315
+ MonoChat model name
316
+ """
317
+ return model
318
+
319
+ if __name__ == "__main__":
320
+ client = MonoChat()
321
+ response = client.chat.completions.create(
322
+ model="gpt-4.1",
323
+ messages=[{"role": "user", "content": "tell me about humans"}],
324
+ max_tokens=1000,
325
+ stream=True
326
+ )
327
+ for chunk in response:
328
+ if chunk.choices and hasattr(chunk.choices[0], "delta") and getattr(chunk.choices[0].delta, "content", None):
329
+ print(chunk.choices[0].delta.content, end="", flush=True)