webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
-
2
1
  import re
3
2
 
3
+
4
4
  # Import trio before curl_cffi to prevent eventlet socket monkey-patching conflicts
5
5
  # See: https://github.com/python-trio/trio/issues/3015
6
6
  try:
@@ -28,16 +28,69 @@ class Cerebras(Provider):
28
28
  """
29
29
  A class to interact with the Cerebras API using a cookie for authentication.
30
30
  """
31
-
31
+ required_auth = True
32
32
  AVAILABLE_MODELS = [
33
33
  "qwen-3-coder-480b",
34
34
  "qwen-3-235b-a22b-instruct-2507",
35
35
  "qwen-3-235b-a22b-thinking-2507",
36
36
  "qwen-3-32b",
37
37
  "llama-3.3-70b",
38
- "llama-4-maverick-17b-128e-instruct"
38
+ "llama-4-maverick-17b-128e-instruct",
39
+ "gpt-oss-120b",
40
+ "llama-4-scout-17b-16e-instruct",
41
+ "llama3.1-8b"
39
42
  ]
40
43
 
44
+ @classmethod
45
+ def get_models(cls, api_key: str = None):
46
+ """Fetch available models from Cerebras API.
47
+
48
+ Args:
49
+ api_key (str, optional): Cerebras API key. If not provided, returns default models.
50
+
51
+ Returns:
52
+ list: List of available model IDs
53
+ """
54
+ if not api_key:
55
+ return cls.AVAILABLE_MODELS
56
+
57
+ try:
58
+ # Use a temporary curl_cffi session for this class method
59
+ temp_session = Session()
60
+ headers = {
61
+ "Content-Type": "application/json",
62
+ "Authorization": f"Bearer {api_key}",
63
+ }
64
+
65
+ response = temp_session.get(
66
+ "https://api.cerebras.ai/v1/models",
67
+ headers=headers,
68
+ impersonate="chrome120"
69
+ )
70
+
71
+ if response.status_code != 200:
72
+ return cls.AVAILABLE_MODELS
73
+
74
+ data = response.json()
75
+ if "data" in data and isinstance(data["data"], list):
76
+ return [model['id'] for model in data['data']]
77
+ return cls.AVAILABLE_MODELS
78
+
79
+ except Exception:
80
+ # Fallback to default models list if fetching fails
81
+ return cls.AVAILABLE_MODELS
82
+
83
+ @classmethod
84
+ def update_available_models(cls, api_key=None):
85
+ """Update the available models list from Cerebras API"""
86
+ try:
87
+ models = cls.get_models(api_key)
88
+ if models and len(models) > 0:
89
+ cls.AVAILABLE_MODELS = models
90
+ except Exception:
91
+ # Fallback to default models list if fetching fails
92
+ pass
93
+
41
94
  def __init__(
42
95
  self,
43
96
  cookie_path: str = None,
@@ -56,12 +109,6 @@ class Cerebras(Provider):
56
109
  temperature: float = 0.7,
57
110
  top_p: float = 0.8,
58
111
  ):
59
- # Validate model choice
60
- if model not in self.AVAILABLE_MODELS:
61
- raise ValueError(
62
- f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
63
- )
64
-
65
112
  # Initialize basic settings first
66
113
  self.timeout = timeout
67
114
  self.model = model
@@ -89,6 +136,15 @@ class Cerebras(Provider):
89
136
  else:
90
137
  raise ValueError("Either api_key must be provided or cookie_path must be specified")
91
138
 
139
+ # Update available models from API
140
+ self.update_available_models(self.api_key)
141
+
142
+ # Validate model choice after updating models
143
+ if model not in self.AVAILABLE_MODELS:
144
+ raise ValueError(
145
+ f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
146
+ )
147
+
92
148
  # Initialize optimizers
93
149
  self.__available_optimizers = (
94
150
  method
@@ -112,6 +168,56 @@ class Cerebras(Provider):
112
168
  # Apply proxies to the session
113
169
  self.session.proxies = proxies
114
170
 
171
+ @classmethod
172
+ def get_models(cls, api_key: str = None):
173
+ """Fetch available models from Cerebras API.
174
+
175
+ Args:
176
+ api_key (str, optional): Cerebras API key. If not provided, returns default models.
177
+
178
+ Returns:
179
+ list: List of available model IDs
180
+ """
181
+ if not api_key:
182
+ return cls.AVAILABLE_MODELS
183
+
184
+ try:
185
+ # Use a temporary curl_cffi session for this class method
186
+ temp_session = Session()
187
+ headers = {
188
+ "Content-Type": "application/json",
189
+ "Authorization": f"Bearer {api_key}",
190
+ }
191
+
192
+ response = temp_session.get(
193
+ "https://api.cerebras.ai/v1/models",
194
+ headers=headers,
195
+ impersonate="chrome120"
196
+ )
197
+
198
+ if response.status_code != 200:
199
+ return cls.AVAILABLE_MODELS
200
+
201
+ data = response.json()
202
+ if "data" in data and isinstance(data["data"], list):
203
+ return [model['id'] for model in data['data']]
204
+ return cls.AVAILABLE_MODELS
205
+
206
+ except Exception:
207
+ # Fallback to default models list if fetching fails
208
+ return cls.AVAILABLE_MODELS
209
+
210
+ @classmethod
211
+ def update_available_models(cls, api_key=None):
212
+ """Update the available models list from Cerebras API"""
213
+ try:
214
+ models = cls.get_models(api_key)
215
+ if models and len(models) > 0:
216
+ cls.AVAILABLE_MODELS = models
217
+ except Exception:
218
+ # Fallback to default models list if fetching fails
219
+ pass
220
+
115
221
  # Rest of the class implementation remains the same...
116
222
  @staticmethod
117
223
  def extract_query(text: str) -> str:
@@ -1,9 +1,7 @@
1
1
  from curl_cffi import CurlError
2
2
  from curl_cffi.requests import Session
3
- import json
4
3
  from typing import Any, Dict, Optional, Generator, List, Union
5
4
  import uuid
6
-
7
5
  from webscout.AIutel import Optimizers
8
6
  from webscout.AIutel import Conversation
9
7
  from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
@@ -11,11 +9,38 @@ from webscout.AIbase import Provider
11
9
  from webscout import exceptions
12
10
  from webscout.litagent import LitAgent
13
11
 
12
+
14
13
  class ChatGLM(Provider):
15
14
  """
16
- A class to interact with the ChatGLM API.
15
+ A class to interact with the Z.AI Chat API (GLM-4.5).
17
16
  """
17
+ required_auth = False
18
+ url = "https://chat.z.ai"
19
+ # Model nickname mapping system
20
+ MODEL_MAPPING = {
21
+ "glm-4.5V": "glm-4.5v",
22
+ "glm-4-32B": "main_chat",
23
+ "glm-4.5-Air": "0727-106B-API",
24
+ "glm-4.5": "0727-360B-API",
25
+ # Add more nicknames as needed
26
+ }
27
+ # Reverse mapping: API format to nickname
28
+ GLM_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
29
+ AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(GLM_TO_MODEL.keys()) + ["0727-106B-API", "0727-360B-API", "glm-4.5v", "main_chat"]
18
30
 
31
+ @classmethod
32
+ def _resolve_model(cls, model: str) -> str:
33
+ """
34
+ Resolve a model nickname or API name to the API format.
35
+ """
36
+ if model in cls.GLM_TO_MODEL:
37
+ return model
38
+ if model in cls.MODEL_MAPPING:
39
+ return cls.MODEL_MAPPING[model]
40
+ # fallback to direct API name if present
41
+ if model in ["0727-106B-API", "0727-360B-API", "glm-4.5v", "main_chat"]:
42
+ return model
43
+ raise ValueError(f"Invalid model: {model}. Choose from: {cls.AVAILABLE_MODELS}")
19
44
  def __init__(
20
45
  self,
21
46
  is_conversation: bool = True,
@@ -27,36 +52,32 @@ class ChatGLM(Provider):
27
52
  proxies: dict = {},
28
53
  history_offset: int = 10250,
29
54
  act: str = None,
30
- plus_model: bool = True,
55
+ model: str = "0727-106B-API",
31
56
  ):
32
- """Initializes the ChatGLM API client."""
33
- self.session = Session() # Use curl_cffi Session
57
+ """Initializes the Z.AI Chat API client."""
58
+ self.session = Session()
34
59
  self.is_conversation = is_conversation
35
60
  self.max_tokens_to_sample = max_tokens
36
- self.api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
37
- self.stream_chunk_size = 64
38
61
  self.timeout = timeout
39
62
  self.last_response = {}
40
- self.plus_model = plus_model
41
63
  self.headers = {
42
64
  'Accept-Language': 'en-US,en;q=0.9',
43
65
  'App-Name': 'chatglm',
44
- 'Authorization': 'undefined',
45
66
  'Content-Type': 'application/json',
46
- 'Origin': 'https://chatglm.cn',
67
+ 'Origin': self.url,
47
68
  'User-Agent': LitAgent().random(),
48
69
  'X-App-Platform': 'pc',
49
70
  'X-App-Version': '0.0.1',
50
- 'X-Device-Id': '', #Will be generated each time
51
71
  'Accept': 'text/event-stream',
52
72
  }
73
+ self.api_endpoint = f"{self.url}/api/chat/completions"
53
74
  self.__available_optimizers = (
54
75
  method
55
76
  for method in dir(Optimizers)
56
77
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
57
78
  )
58
79
  self.session.headers.update(self.headers)
59
- Conversation.intro = ( # type: ignore
80
+ Conversation.intro = (
60
81
  AwesomePrompts().get_act(
61
82
  act, raise_not_found=True, default=None, case_insensitive=True
62
83
  )
@@ -67,16 +88,26 @@ class ChatGLM(Provider):
67
88
  is_conversation, self.max_tokens_to_sample, filepath, update_file
68
89
  )
69
90
  self.conversation.history_offset = history_offset
70
- self.session.proxies = proxies # Assign proxies directly
91
+ self.session.proxies = proxies
92
+ # Use nickname resolution for model
93
+ self.model = self._resolve_model(model)
94
+
95
+ def _get_api_key(self):
96
+ if not hasattr(self, 'api_key') or not self.api_key:
97
+ response = self.session.get(f"{self.url}/api/v1/auths/")
98
+ self.api_key = response.json().get("token")
99
+ return self.api_key
100
+
101
+ def _get_cookie(self):
102
+ """Get authentication cookie from the site"""
103
+ if not hasattr(self, 'cookie') or not self.cookie:
104
+ response = self.session.get(f"{self.url}/")
105
+ self.cookie = response.headers.get('Set-Cookie', '')
106
+ return self.cookie
107
+
108
+
109
+ # _zai_extractor removed; use extract_regexes in sanitize_stream
71
110
 
72
- @staticmethod
73
- def _chatglm_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
74
- """Extracts content from ChatGLM stream JSON objects."""
75
- if isinstance(chunk, dict):
76
- parts = chunk.get('parts', [])
77
- if parts and isinstance(parts[0].get('content'), list) and parts[0]['content']:
78
- return parts[0]['content'][0].get('text')
79
- return None
80
111
 
81
112
  def ask(
82
113
  self,
@@ -86,13 +117,14 @@ class ChatGLM(Provider):
86
117
  optimizer: str = None,
87
118
  conversationally: bool = False,
88
119
  ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
89
- """Chat with AI
120
+ """Chat with Z.AI API
90
121
  Args:
91
122
  prompt (str): Prompt to be sent.
92
123
  stream (bool, optional): Flag for streaming response. Defaults to False.
93
124
  raw (bool, optional): Stream back raw response as received. Defaults to False.
94
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
125
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
95
126
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
127
+ model (str, optional): Model name. Defaults to None.
96
128
  Returns:
97
129
  Union[Dict, Generator[Dict, None, None]]: Response generated
98
130
  """
@@ -106,110 +138,165 @@ class ChatGLM(Provider):
106
138
  raise exceptions.FailedToGenerateResponseError(
107
139
  f"Optimizer is not one of {self.__available_optimizers}"
108
140
  )
109
- device_id = str(uuid.uuid4()).replace('-', '')
110
- self.session.headers.update({'X-Device-Id': device_id})
141
+ api_key = self._get_api_key()
111
142
  payload = {
112
- "assistant_id": "65940acff94777010aa6b796",
113
- "conversation_id": "",
114
- "meta_data": {
115
- "if_plus_model": self.plus_model,
116
- "is_test": False,
117
- "input_question_type": "xxxx",
118
- "channel": "",
119
- "draft_id": "",
120
- "quote_log_id": "",
121
- "platform": "pc",
122
- },
143
+ "stream": True,
144
+ "model": self.model, # Already resolved to API format
123
145
  "messages": [
124
- {
125
- "role": "user",
126
- "content": [{"type": "text", "text": conversation_prompt}],
127
- }
146
+ {"role": "user", "content": conversation_prompt}
128
147
  ],
148
+ "params": {},
149
+ "features": {"image_generation": False, "web_search": False, "auto_web_search": False, "preview_mode": True, "flags": [], "features": [{"type": "mcp", "server": "vibe-coding", "status": "hidden"}, {"type": "mcp", "server": "ppt-maker", "status": "hidden"}, {"type": "mcp", "server": "image-search", "status": "hidden"}], "enable_thinking": True},
150
+ "actions": [],
151
+ "tags": [],
152
+ "chat_id": "local",
153
+ "id": str(uuid.uuid4())
129
154
  }
130
155
 
131
156
  def for_stream():
132
- streaming_text = "" # Initialize outside try block
133
- last_processed_content = "" # Track the last processed content
157
+ streaming_text = ""
134
158
  try:
159
+ cookie = self._get_cookie()
135
160
  response = self.session.post(
136
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout,
137
- impersonate="chrome120" # Add impersonate
161
+ self.api_endpoint,
162
+ json=payload,
163
+ stream=True,
164
+ timeout=self.timeout,
165
+ impersonate="chrome120",
166
+ headers={
167
+ "Authorization": f"Bearer {api_key}",
168
+ "x-fe-version": "prod-fe-1.0.70",
169
+ }
138
170
  )
139
171
  response.raise_for_status()
140
172
 
141
- # Use sanitize_stream
173
+ def glm_content_extractor(chunk):
174
+ if not isinstance(chunk, dict) or chunk.get("type") != "chat:completion":
175
+ return None
176
+ data = chunk.get("data", {})
177
+ phase = data.get("phase")
178
+ usage = data.get("usage")
179
+ if usage:
180
+ return None
181
+ delta_content = data.get("delta_content")
182
+ if delta_content:
183
+ if phase == "thinking":
184
+ # Remove details/summary tags if present
185
+ split_text = delta_content.split("</summary>\n>")[-1]
186
+ return {"reasoning_content": split_text}
187
+ elif phase == "answer":
188
+ return {"content": delta_content}
189
+ else:
190
+ return {"content": delta_content}
191
+ return None
192
+
142
193
  processed_stream = sanitize_stream(
143
- data=response.iter_content(chunk_size=None), # Pass byte iterator
194
+ data=response.iter_content(chunk_size=None),
144
195
  intro_value="data:",
145
- to_json=True, # Stream sends JSON
146
- content_extractor=self._chatglm_extractor, # Use the specific extractor
147
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
196
+ to_json=True,
197
+ content_extractor=glm_content_extractor,
198
+ yield_raw_on_error=False,
199
+ raw=False
148
200
  )
149
-
150
- for current_full_text in processed_stream:
151
- # current_full_text is the full text extracted by _chatglm_extractor
152
- if current_full_text and isinstance(current_full_text, str):
153
- new_text = current_full_text[len(last_processed_content):]
154
- if new_text: # Check for new content
155
- streaming_text += new_text
156
- last_processed_content = current_full_text # Update tracker
157
- yield new_text if raw else dict(text=new_text)
158
-
201
+ last_content = ""
202
+ last_reasoning = ""
203
+ in_think = False
204
+ for chunk in processed_stream:
205
+ if not chunk:
206
+ continue
207
+ content = chunk.get('content') if isinstance(chunk, dict) else None
208
+ reasoning = chunk.get('reasoning_content') if isinstance(chunk, dict) else None
209
+ # Handle reasoning_content with <think> tags
210
+ if reasoning and reasoning != last_reasoning:
211
+ if not in_think:
212
+ yield "<think>\n\n"
213
+ in_think = True
214
+ yield reasoning
215
+ last_reasoning = reasoning
216
+ # If we were in <think> and now have new content, close <think>
217
+ if in_think and content and content != last_content:
218
+ yield "\n</think>\n\n"
219
+ in_think = False
220
+ # Handle normal content
221
+ if content and content != last_content:
222
+ yield content
223
+ streaming_text += content
224
+ last_content = content
225
+ if not raw:
226
+ self.last_response = {"text": content}
227
+ self.conversation.update_chat_history(prompt, streaming_text)
159
228
  except CurlError as e:
160
- raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
229
+ raise exceptions.APIConnectionError(f"Request failed (CurlError): {e}") from e
161
230
  except Exception as e:
162
231
  raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}") from e
163
- finally:
164
- # Update history after stream finishes or fails
165
- if streaming_text:
166
- self.last_response.update(dict(text=streaming_text))
167
- self.conversation.update_chat_history(
168
- prompt, self.get_message(self.last_response)
169
- )
170
232
 
171
233
  def for_non_stream():
172
- for _ in for_stream():
173
- pass
174
- return self.last_response
234
+ full_text = ""
235
+ try:
236
+ for chunk_data in for_stream():
237
+ if raw:
238
+ if isinstance(chunk_data, str):
239
+ full_text += chunk_data
240
+ else:
241
+ if isinstance(chunk_data, dict) and "text" in chunk_data:
242
+ full_text += chunk_data["text"]
243
+ except Exception as e:
244
+ if not full_text:
245
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
246
+ self.last_response = {"text": full_text}
247
+ return full_text if raw else self.last_response
175
248
  return for_stream() if stream else for_non_stream()
176
249
 
250
+
177
251
  def chat(
178
252
  self,
179
253
  prompt: str,
180
254
  stream: bool = False,
181
255
  optimizer: str = None,
182
256
  conversationally: bool = False,
257
+ raw: bool = False,
183
258
  ) -> Union[str, Generator[str, None, None]]:
184
259
  """Generate response `str`"""
185
260
 
186
261
  def for_stream():
187
262
  for response in self.ask(
188
- prompt, True, optimizer=optimizer, conversationally=conversationally
263
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
189
264
  ):
190
- yield self.get_message(response)
265
+ if raw:
266
+ yield response
267
+ else:
268
+ # Only call get_message on dicts, yield str directly
269
+ if isinstance(response, dict):
270
+ yield self.get_message(response)
271
+ elif isinstance(response, str):
272
+ yield response
191
273
 
192
274
  def for_non_stream():
193
- return self.get_message(
194
- self.ask(
195
- prompt,
196
- False,
197
- optimizer=optimizer,
198
- conversationally=conversationally,
199
- )
275
+ result = self.ask(
276
+ prompt,
277
+ False,
278
+ raw=raw,
279
+ optimizer=optimizer,
280
+ conversationally=conversationally,
200
281
  )
282
+ if raw:
283
+ return result if isinstance(result, str) else self.get_message(result)
284
+ else:
285
+ return self.get_message(result)
201
286
 
202
287
  return for_stream() if stream else for_non_stream()
203
288
 
289
+
204
290
  def get_message(self, response: dict) -> str:
205
291
  """Retrieves message only from response"""
206
292
  assert isinstance(response, dict), "Response should be of dict data-type only"
207
- return response["text"]
293
+ return response.get("text", "")
294
+
208
295
 
209
296
 
210
297
  if __name__ == "__main__":
211
298
  from rich import print
212
- ai = ChatGLM()
213
- response = ai.chat(input(">>> "), stream=True)
299
+ ai = ChatGLM(model="glm-4-32B")
300
+ response = ai.chat("hi", stream=True, raw=False)
214
301
  for chunk in response:
215
302
  print(chunk, end="", flush=True)
@@ -1,5 +1,4 @@
1
1
  import requests
2
- import json
3
2
  from uuid import uuid4
4
3
 
5
4
  from webscout.AIutel import Optimizers
@@ -12,7 +11,7 @@ class Cleeai(Provider):
12
11
  """
13
12
  A class to interact with the Cleeai.com API.
14
13
  """
15
-
14
+ required_auth = False
16
15
  def __init__(
17
16
  self,
18
17
  is_conversation: bool = True,
@@ -31,7 +31,7 @@ class DeepSeekAssistant(Provider):
31
31
  """
32
32
 
33
33
  AVAILABLE_MODELS = ["V3 model", "R1 model"]
34
-
34
+ required_auth = False
35
35
  @staticmethod
36
36
  def _deepseek_assistant_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
37
37
  """Extracts content from DeepSeek Assistant stream JSON objects."""
webscout/Provider/elmo.py CHANGED
@@ -15,7 +15,7 @@ class Elmo(Provider):
15
15
  """
16
16
  A class to interact with the Elmo.chat API.
17
17
  """
18
-
18
+ required_auth = False
19
19
  def __init__(
20
20
  self,
21
21
  is_conversation: bool = True,
@@ -19,7 +19,7 @@ class GEMINIAPI(Provider):
19
19
  """
20
20
  A class to interact with the Gemini API using the google-generativeai library.
21
21
  """
22
-
22
+ required_auth = True
23
23
  def __init__(
24
24
  self,
25
25
  api_key,
@@ -13,7 +13,7 @@ class IBMGranite(Provider):
13
13
  A class to interact with the IBM Granite API (accessed via d18n68ssusgr7r.cloudfront.net)
14
14
  using Lit agent for the user agent.
15
15
  """
16
-
16
+ required_auth = False
17
17
  AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct", "granite-3-3-8b-instruct"]
18
18
 
19
19
  def __init__(
@@ -2,20 +2,18 @@ from curl_cffi.requests import Session
2
2
  from curl_cffi import CurlError
3
3
  import json
4
4
  from typing import Union, Any, Dict, Generator, Optional
5
-
6
5
  from webscout.AIutel import Optimizers
7
6
  from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
8
7
  from webscout.AIutel import AwesomePrompts
9
8
  from webscout.AIbase import Provider
10
9
  from webscout import exceptions
11
- from webscout.litagent import LitAgent
12
10
  class NousHermes(Provider):
13
11
  """
14
12
  A class to interact with the Hermes API.
15
13
  """
16
14
 
17
15
  AVAILABLE_MODELS = ["Hermes-3-Llama-3.1-70B", "Hermes-3-Llama-3.1-8B"]
18
-
16
+ required_auth = False
19
17
  def __init__(
20
18
  self,
21
19
  cookies_path: str,
@@ -12,6 +12,7 @@ from typing import Union, Any, AsyncGenerator, Dict
12
12
 
13
13
 
14
14
  class Julius(Provider):
15
+ required_auth = True
15
16
  AVAILABLE_MODELS = [
16
17
  "Llama 3",
17
18
  "GPT-4o",
@@ -16,7 +16,7 @@ class LearnFast(Provider):
16
16
  """
17
17
  A class to interact with the LearnFast.ai API.
18
18
  """
19
-
19
+ required_auth = False
20
20
  def __init__(
21
21
  self,
22
22
  is_conversation: bool = True,
@@ -13,7 +13,7 @@ class Llama3Mitril(Provider):
13
13
  """
14
14
  A class to interact with the Llama3 Mitril API. Implements the WebScout provider interface.
15
15
  """
16
-
16
+ required_auth = False
17
17
  def __init__(
18
18
  self,
19
19
  is_conversation: bool = True,
@@ -14,7 +14,7 @@ class LLMChat(Provider):
14
14
  """
15
15
  A class to interact with the LLMChat API
16
16
  """
17
-
17
+ required_auth = False
18
18
  AVAILABLE_MODELS = [
19
19
  "@cf/meta/llama-3.1-70b-instruct",
20
20
  "@cf/meta/llama-3.1-8b-instruct",
@@ -16,7 +16,7 @@ class LLMChatCo(Provider):
16
16
  """
17
17
  A class to interact with the LLMChat.co API
18
18
  """
19
-
19
+ required_auth = False
20
20
  AVAILABLE_MODELS = [
21
21
  "gemini-flash-2.0", # Default model
22
22
  "llama-4-scout",