webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (159) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  5. webscout/Provider/AISEARCH/__init__.py +18 -11
  6. webscout/Provider/AISEARCH/scira_search.py +3 -1
  7. webscout/Provider/Aitopia.py +2 -3
  8. webscout/Provider/Andi.py +3 -3
  9. webscout/Provider/ChatGPTClone.py +1 -1
  10. webscout/Provider/ChatSandbox.py +1 -0
  11. webscout/Provider/Cloudflare.py +1 -1
  12. webscout/Provider/Cohere.py +1 -0
  13. webscout/Provider/Deepinfra.py +13 -10
  14. webscout/Provider/ExaAI.py +1 -1
  15. webscout/Provider/ExaChat.py +1 -80
  16. webscout/Provider/Flowith.py +6 -1
  17. webscout/Provider/Gemini.py +7 -5
  18. webscout/Provider/GeminiProxy.py +1 -0
  19. webscout/Provider/GithubChat.py +4 -1
  20. webscout/Provider/Groq.py +1 -1
  21. webscout/Provider/HeckAI.py +8 -4
  22. webscout/Provider/Jadve.py +23 -38
  23. webscout/Provider/K2Think.py +308 -0
  24. webscout/Provider/Koboldai.py +8 -186
  25. webscout/Provider/LambdaChat.py +2 -4
  26. webscout/Provider/Nemotron.py +3 -4
  27. webscout/Provider/Netwrck.py +6 -8
  28. webscout/Provider/OLLAMA.py +1 -0
  29. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  30. webscout/Provider/OPENAI/FalconH1.py +2 -7
  31. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  32. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  33. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  34. webscout/Provider/OPENAI/PI.py +5 -4
  35. webscout/Provider/OPENAI/Qwen3.py +2 -3
  36. webscout/Provider/OPENAI/README.md +2 -1
  37. webscout/Provider/OPENAI/TogetherAI.py +52 -57
  38. webscout/Provider/OPENAI/TwoAI.py +3 -4
  39. webscout/Provider/OPENAI/__init__.py +17 -56
  40. webscout/Provider/OPENAI/ai4chat.py +313 -303
  41. webscout/Provider/OPENAI/base.py +9 -29
  42. webscout/Provider/OPENAI/chatgpt.py +7 -2
  43. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  44. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  45. webscout/Provider/OPENAI/deepinfra.py +12 -6
  46. webscout/Provider/OPENAI/e2b.py +60 -8
  47. webscout/Provider/OPENAI/flowith.py +4 -3
  48. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  49. webscout/Provider/OPENAI/heckai.py +4 -1
  50. webscout/Provider/OPENAI/netwrck.py +9 -12
  51. webscout/Provider/OPENAI/refact.py +274 -0
  52. webscout/Provider/OPENAI/scirachat.py +6 -0
  53. webscout/Provider/OPENAI/textpollinations.py +3 -14
  54. webscout/Provider/OPENAI/toolbaz.py +14 -10
  55. webscout/Provider/OpenGPT.py +1 -1
  56. webscout/Provider/Openai.py +150 -402
  57. webscout/Provider/PI.py +1 -0
  58. webscout/Provider/Perplexitylabs.py +1 -2
  59. webscout/Provider/QwenLM.py +107 -89
  60. webscout/Provider/STT/__init__.py +17 -2
  61. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  62. webscout/Provider/StandardInput.py +1 -1
  63. webscout/Provider/TTI/__init__.py +18 -12
  64. webscout/Provider/TTI/bing.py +14 -2
  65. webscout/Provider/TTI/together.py +10 -9
  66. webscout/Provider/TTS/README.md +0 -1
  67. webscout/Provider/TTS/__init__.py +18 -11
  68. webscout/Provider/TTS/base.py +479 -159
  69. webscout/Provider/TTS/deepgram.py +409 -156
  70. webscout/Provider/TTS/elevenlabs.py +425 -111
  71. webscout/Provider/TTS/freetts.py +317 -140
  72. webscout/Provider/TTS/gesserit.py +192 -128
  73. webscout/Provider/TTS/murfai.py +248 -113
  74. webscout/Provider/TTS/openai_fm.py +347 -129
  75. webscout/Provider/TTS/speechma.py +620 -586
  76. webscout/Provider/TeachAnything.py +1 -0
  77. webscout/Provider/TextPollinationsAI.py +5 -15
  78. webscout/Provider/TogetherAI.py +136 -142
  79. webscout/Provider/TwoAI.py +53 -309
  80. webscout/Provider/TypliAI.py +2 -1
  81. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  82. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  83. webscout/Provider/Venice.py +2 -1
  84. webscout/Provider/VercelAI.py +1 -0
  85. webscout/Provider/WiseCat.py +2 -1
  86. webscout/Provider/WrDoChat.py +2 -1
  87. webscout/Provider/__init__.py +18 -174
  88. webscout/Provider/ai4chat.py +1 -1
  89. webscout/Provider/akashgpt.py +7 -10
  90. webscout/Provider/cerebras.py +194 -38
  91. webscout/Provider/chatglm.py +170 -83
  92. webscout/Provider/cleeai.py +1 -2
  93. webscout/Provider/deepseek_assistant.py +1 -1
  94. webscout/Provider/elmo.py +1 -1
  95. webscout/Provider/geminiapi.py +1 -1
  96. webscout/Provider/granite.py +1 -1
  97. webscout/Provider/hermes.py +1 -3
  98. webscout/Provider/julius.py +1 -0
  99. webscout/Provider/learnfastai.py +1 -1
  100. webscout/Provider/llama3mitril.py +1 -1
  101. webscout/Provider/llmchat.py +1 -1
  102. webscout/Provider/llmchatco.py +1 -1
  103. webscout/Provider/meta.py +3 -3
  104. webscout/Provider/oivscode.py +2 -2
  105. webscout/Provider/scira_chat.py +51 -124
  106. webscout/Provider/searchchat.py +1 -0
  107. webscout/Provider/sonus.py +1 -1
  108. webscout/Provider/toolbaz.py +15 -11
  109. webscout/Provider/turboseek.py +31 -22
  110. webscout/Provider/typefully.py +2 -1
  111. webscout/Provider/x0gpt.py +1 -0
  112. webscout/Provider/yep.py +2 -1
  113. webscout/conversation.py +22 -20
  114. webscout/sanitize.py +14 -10
  115. webscout/scout/README.md +20 -23
  116. webscout/scout/core/crawler.py +125 -38
  117. webscout/scout/core/scout.py +26 -5
  118. webscout/tempid.py +6 -0
  119. webscout/version.py +1 -1
  120. webscout/webscout_search.py +13 -6
  121. webscout/webscout_search_async.py +10 -8
  122. webscout/yep_search.py +13 -5
  123. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
  124. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
  125. webscout/Provider/AllenAI.py +0 -440
  126. webscout/Provider/Blackboxai.py +0 -793
  127. webscout/Provider/FreeGemini.py +0 -250
  128. webscout/Provider/Glider.py +0 -225
  129. webscout/Provider/Hunyuan.py +0 -283
  130. webscout/Provider/MCPCore.py +0 -322
  131. webscout/Provider/MiniMax.py +0 -207
  132. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  133. webscout/Provider/OPENAI/MiniMax.py +0 -298
  134. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  135. webscout/Provider/OPENAI/c4ai.py +0 -394
  136. webscout/Provider/OPENAI/copilot.py +0 -305
  137. webscout/Provider/OPENAI/glider.py +0 -330
  138. webscout/Provider/OPENAI/mcpcore.py +0 -431
  139. webscout/Provider/OPENAI/multichat.py +0 -378
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/TTS/sthir.py +0 -94
  142. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  143. webscout/Provider/asksteve.py +0 -220
  144. webscout/Provider/copilot.py +0 -422
  145. webscout/Provider/freeaichat.py +0 -294
  146. webscout/Provider/koala.py +0 -182
  147. webscout/Provider/lmarena.py +0 -198
  148. webscout/Provider/monochat.py +0 -275
  149. webscout/Provider/multichat.py +0 -375
  150. webscout/Provider/scnet.py +0 -244
  151. webscout/Provider/talkai.py +0 -194
  152. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  153. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  154. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  155. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  156. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  157. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  158. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  159. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,394 +0,0 @@
1
- import time
2
- import uuid
3
- import requests
4
- import json
5
- import re
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
11
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage,
13
- get_system_prompt, get_last_user_message, format_prompt, count_tokens # Import format_prompt
14
- )
15
-
16
- # Attempt to import LitAgent, fallback if not available
17
- try:
18
- from webscout.litagent import LitAgent
19
- except ImportError:
20
- # Define a dummy LitAgent if webscout is not installed or accessible
21
- class LitAgent:
22
- def generate_fingerprint(self, browser: str = "chrome") -> Dict[str, Any]:
23
- # Return minimal default headers if LitAgent is unavailable
24
- print("Warning: LitAgent not found. Using default minimal headers.")
25
- return {
26
- "accept": "*/*",
27
- "accept_language": "en-US,en;q=0.9",
28
- "platform": "Windows",
29
- "sec_ch_ua": '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
30
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
31
- "browser_type": browser,
32
- }
33
- def random(self) -> str:
34
- # Return a default user agent if LitAgent is unavailable
35
- print("Warning: LitAgent not found. Using default user agent.")
36
- return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
37
-
38
-
39
- class Completions(BaseCompletions):
40
- def __init__(self, client: 'C4AI'):
41
- self._client = client
42
-
43
- def create(
44
- self,
45
- *,
46
- model: str,
47
- messages: List[Dict[str, str]],
48
- max_tokens: Optional[int] = 2000,
49
- stream: bool = False,
50
- temperature: Optional[float] = None,
51
- top_p: Optional[float] = None,
52
- timeout: Optional[int] = None,
53
- proxies: Optional[dict] = None,
54
- **kwargs: Any
55
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
56
- """
57
- Creates a model response for the given chat conversation.
58
- Mimics openai.chat.completions.create
59
- """
60
- # Extract system prompt using utility function
61
- system_prompt = get_system_prompt(messages)
62
- if not system_prompt:
63
- system_prompt = "You are a helpful assistant."
64
-
65
- # Format the conversation history using format_prompt
66
- # Note: C4AI API might expect only the *last* user message here.
67
- # Sending the full history might cause issues.
68
- # We exclude the system prompt from format_prompt as it's sent separately.
69
- # We also set do_continue=True as C4AI adds its own assistant prompt implicitly.
70
- conversation_prompt = format_prompt(messages, include_system=False, do_continue=True)
71
-
72
- if not conversation_prompt:
73
- # Fallback to last user message if formatted prompt is empty
74
- last_user_message = get_last_user_message(messages)
75
- if not last_user_message:
76
- raise ValueError("No user message found or formatted prompt is empty.")
77
- conversation_prompt = last_user_message
78
-
79
- # Create or get conversation ID
80
- if model not in self._client._conversation_data:
81
- conversation_id = self._client.create_conversation(model, system_prompt)
82
- if not conversation_id:
83
- raise IOError(f"Failed to create conversation with model {model}")
84
- else:
85
- conversation_id = self._client._conversation_data[model]["conversationId"]
86
- self._client._conversation_data[model]["messageId"] = self._client.fetch_message_id(conversation_id)
87
-
88
- request_id = f"chatcmpl-{uuid.uuid4()}"
89
- created_time = int(time.time())
90
-
91
- # Pass the formatted conversation prompt
92
- if stream:
93
- return self._create_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt, timeout=timeout, proxies=proxies)
94
- else:
95
- return self._create_non_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt, timeout=timeout, proxies=proxies)
96
-
97
- def _create_stream(
98
- self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str,
99
- timeout: Optional[int] = None, proxies: Optional[dict] = None
100
- ) -> Generator[ChatCompletionChunk, None, None]:
101
- original_proxies = self._client.session.proxies
102
- if proxies is not None:
103
- self._client.session.proxies = proxies
104
- else:
105
- self._client.session.proxies = {}
106
- try:
107
- timeout_val = timeout if timeout is not None else self._client.timeout
108
- message_id = self._client._conversation_data[model]["messageId"]
109
- url = f"{self._client.url}/api/chat/message"
110
- payload = {
111
- "conversationId": conversation_id,
112
- "messageId": message_id,
113
- "model": model,
114
- "prompt": prompt, # Use the formatted conversation history as prompt
115
- "preprompt": system_prompt,
116
- "temperature": 0.7,
117
- "top_p": 1,
118
- "top_k": 50,
119
- "max_tokens": self._client.max_tokens_to_sample,
120
- "stop": [],
121
- "stream": True
122
- }
123
-
124
- response = self._client.session.post(
125
- url,
126
- headers=self._client.headers,
127
- json=payload,
128
- stream=True,
129
- timeout=timeout_val,
130
- proxies=proxies or getattr(self._client, "proxies", None)
131
- )
132
- response.raise_for_status()
133
-
134
- full_text = ""
135
- for line in response.iter_lines():
136
- if line:
137
- line = line.decode('utf-8')
138
- if line.startswith('data: '):
139
- data = line[6:]
140
- if data == "[DONE]":
141
- break
142
-
143
- try:
144
- json_data = json.loads(data)
145
- delta_text = json_data.get('text', '')
146
- new_content = delta_text[len(full_text):]
147
- full_text = delta_text
148
- delta = ChoiceDelta(content=new_content)
149
- choice = Choice(index=0, delta=delta, finish_reason=None)
150
- chunk = ChatCompletionChunk(
151
- id=request_id,
152
- choices=[choice],
153
- created=created_time,
154
- model=model
155
- )
156
- yield chunk
157
- except json.JSONDecodeError:
158
- continue
159
-
160
- delta = ChoiceDelta(content=None)
161
- choice = Choice(index=0, delta=delta, finish_reason="stop")
162
- chunk = ChatCompletionChunk(
163
- id=request_id,
164
- choices=[choice],
165
- created=created_time,
166
- model=model
167
- )
168
- yield chunk
169
-
170
- except Exception as e:
171
- print(f"Error during C4AI stream request: {e}")
172
- raise IOError(f"C4AI request failed: {e}") from e
173
- finally:
174
- self._client.session.proxies = original_proxies
175
-
176
- def _create_non_stream(
177
- self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str,
178
- timeout: Optional[int] = None, proxies: Optional[dict] = None
179
- ) -> ChatCompletion:
180
- original_proxies = self._client.session.proxies
181
- if proxies is not None:
182
- self._client.session.proxies = proxies
183
- else:
184
- self._client.session.proxies = {}
185
- try:
186
- timeout_val = timeout if timeout is not None else self._client.timeout
187
- message_id = self._client._conversation_data[model]["messageId"]
188
- url = f"{self._client.url}/api/chat/message"
189
- payload = {
190
- "conversationId": conversation_id,
191
- "messageId": message_id,
192
- "model": model,
193
- "prompt": prompt, # Use the formatted conversation history as prompt
194
- "preprompt": system_prompt,
195
- "temperature": 0.7,
196
- "top_p": 1,
197
- "top_k": 50,
198
- "max_tokens": self._client.max_tokens_to_sample,
199
- "stop": [],
200
- "stream": False
201
- }
202
-
203
- response = self._client.session.post(
204
- url,
205
- headers=self._client.headers,
206
- json=payload,
207
- timeout=timeout_val,
208
- proxies=proxies or getattr(self._client, "proxies", None)
209
- )
210
- response.raise_for_status()
211
-
212
- data = response.json()
213
- response_text = data.get('text', '')
214
- message = ChatCompletionMessage(role="assistant", content=response_text)
215
- choice = Choice(index=0, message=message, finish_reason="stop")
216
- # Estimate tokens based on the formatted prompt
217
- prompt_tokens = count_tokens(prompt)
218
- completion_tokens = count_tokens(response_text)
219
- usage = CompletionUsage(
220
- prompt_tokens=prompt_tokens,
221
- completion_tokens=completion_tokens,
222
- total_tokens=prompt_tokens + completion_tokens
223
- )
224
- completion = ChatCompletion(
225
- id=request_id,
226
- choices=[choice],
227
- created=created_time,
228
- model=model,
229
- usage=usage
230
- )
231
- return completion
232
-
233
- except Exception as e:
234
- print(f"Error during C4AI non-stream request: {e}")
235
- raise IOError(f"C4AI request failed: {e}") from e
236
- finally:
237
- self._client.session.proxies = original_proxies
238
-
239
- class Chat(BaseChat):
240
- def __init__(self, client: 'C4AI'):
241
- self.completions = Completions(client)
242
-
243
- class C4AI(OpenAICompatibleProvider):
244
- """
245
- OpenAI-compatible client for C4AI API.
246
-
247
- Usage:
248
- client = C4AI()
249
- response = client.chat.completions.create(
250
- model="command-a-03-2025",
251
- messages=[{"role": "user", "content": "Hello!"}]
252
- )
253
- """
254
-
255
- AVAILABLE_MODELS = [
256
- 'command-a-03-2025',
257
- 'command-r-plus-08-2024',
258
- 'command-r-08-2024',
259
- 'command-r-plus',
260
- 'command-r',
261
- 'command-r7b-12-2024',
262
- 'command-r7b-arabic-02-2025'
263
- ]
264
-
265
- def __init__(
266
- self,
267
- browser: str = "chrome"
268
- ):
269
- """
270
- Initialize the C4AI client.
271
-
272
- Args:
273
- browser: Browser name for LitAgent to generate User-Agent.
274
- """
275
- self.timeout = 30
276
- self.url = "https://cohereforai-c4ai-command.hf.space"
277
- self.session = requests.Session()
278
- self.session.proxies = {}
279
- self.max_tokens_to_sample = 2000
280
-
281
- agent = LitAgent()
282
- fingerprint = agent.generate_fingerprint(browser)
283
-
284
- self.headers = {
285
- "Content-Type": "application/json",
286
- "User-Agent": fingerprint["user_agent"],
287
- "Accept": "*/*",
288
- "Accept-Encoding": "gzip, deflate, br, zstd",
289
- "Accept-Language": fingerprint["accept_language"],
290
- "Origin": "https://cohereforai-c4ai-command.hf.space",
291
- "Referer": "https://cohereforai-c4ai-command.hf.space/",
292
- "Sec-Ch-Ua": fingerprint["sec_ch_ua"] or "\"Chromium\";v=\"120\"",
293
- "Sec-Ch-Ua-Mobile": "?0",
294
- "Sec-Ch-Ua-Platform": f"\"{fingerprint['platform']}\"",
295
- "Sec-Fetch-Dest": "empty",
296
- "Sec-Fetch-Mode": "cors",
297
- "Sec-Fetch-Site": "same-origin",
298
- "DNT": "1",
299
- "Priority": "u=1, i"
300
- }
301
-
302
- self._conversation_data = {}
303
- self.chat = Chat(self)
304
- self.update_available_models()
305
-
306
- def update_available_models(self):
307
- """Update the list of available models from the server."""
308
- try:
309
- response = requests.get("https://cohereforai-c4ai-command.hf.space/")
310
- text = response.text
311
- models_match = re.search(r'models:(\[.+?\]),oldModels:', text)
312
-
313
- if not models_match:
314
- return
315
-
316
- models_text = models_match.group(1)
317
- models_text = re.sub(r',parameters:{[^}]+?}', '', models_text)
318
- models_text = models_text.replace('void 0', 'null')
319
-
320
- def add_quotation_mark(match):
321
- return f'{match.group(1)}"{match.group(2)}":'
322
-
323
- models_text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, models_text)
324
-
325
- models_data = json.loads(models_text)
326
- self.AVAILABLE_MODELS = [model["id"] for model in models_data]
327
- except Exception:
328
- pass
329
-
330
- def create_conversation(self, model: str, system_prompt: str):
331
- """Create a new conversation with the specified model."""
332
- url = f"{self.url}/api/conversation"
333
- payload = {
334
- "model": model,
335
- "preprompt": system_prompt,
336
- }
337
-
338
- try:
339
- response = self.session.post(
340
- url,
341
- headers=self.headers,
342
- json=payload,
343
- timeout=self.timeout
344
- )
345
- response.raise_for_status()
346
-
347
- data = response.json()
348
- conversation_id = data.get("conversationId")
349
-
350
- if conversation_id:
351
- self._conversation_data[model] = {
352
- "conversationId": conversation_id,
353
- "messageId": self.fetch_message_id(conversation_id)
354
- }
355
- return conversation_id
356
-
357
- return None
358
-
359
- except Exception as e:
360
- print(f"Error creating conversation: {e}")
361
- return None
362
-
363
- def fetch_message_id(self, conversation_id: str):
364
- """Fetch the latest message ID for a conversation."""
365
- url = f"{self.url}/api/conversation/{conversation_id}"
366
-
367
- try:
368
- response = self.session.get(
369
- url,
370
- headers=self.headers,
371
- timeout=self.timeout
372
- )
373
- response.raise_for_status()
374
-
375
- json_data = response.json()
376
-
377
- if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
378
- return str(uuid.uuid4())
379
-
380
- data = json_data["nodes"][1]["data"]
381
- keys = data[data[0]["messages"]]
382
- message_keys = data[keys[-1]]
383
- message_id = data[message_keys["id"]]
384
-
385
- return message_id
386
-
387
- except Exception:
388
- return str(uuid.uuid4())
389
- @property
390
- def models(self):
391
- class _ModelList:
392
- def list(inner_self):
393
- return type(self).AVAILABLE_MODELS
394
- return _ModelList()
@@ -1,305 +0,0 @@
1
- import json
2
- import uuid
3
- import time
4
- from typing import List, Dict, Optional, Union, Generator, Any
5
- from urllib.parse import quote
6
- from curl_cffi.requests import Session, CurlWsFlag
7
-
8
- # Import base classes and utility structures
9
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
- from .utils import (
11
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
- ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
13
- )
14
-
15
- # Attempt to import LitAgent, fallback if not available
16
- try:
17
- from webscout.litagent import LitAgent
18
- except ImportError:
19
- pass
20
-
21
- # --- Microsoft Copilot Client ---
22
-
23
- class Completions(BaseCompletions):
24
- def __init__(self, client: 'Copilot'):
25
- self._client = client
26
-
27
- def create(
28
- self,
29
- *,
30
- model: str,
31
- messages: List[Dict[str, str]],
32
- max_tokens: Optional[int] = None,
33
- stream: bool = False,
34
- temperature: Optional[float] = None,
35
- top_p: Optional[float] = None,
36
- timeout: Optional[int] = None,
37
- proxies: Optional[dict] = None,
38
- **kwargs: Any
39
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
40
- """
41
- Creates a model response for the given chat conversation.
42
- Mimics openai.chat.completions.create
43
- """
44
- # Format the entire conversation using the utility function
45
- formatted_prompt = format_prompt(messages, add_special_tokens=True, include_system=True, do_continue=True)
46
-
47
- request_id = f"chatcmpl-{uuid.uuid4()}"
48
- created_time = int(time.time())
49
-
50
- # Handle image if provided
51
- image = kwargs.get("image")
52
-
53
- if stream:
54
- return self._create_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
55
- else:
56
- return self._create_non_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
57
-
58
- def _create_stream(
59
- self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
60
- timeout: Optional[int] = None, proxies: Optional[dict] = None
61
- ) -> Generator[ChatCompletionChunk, None, None]:
62
- original_proxies = self._client.session.proxies
63
- if proxies is not None:
64
- self._client.session.proxies = proxies
65
- else:
66
- self._client.session.proxies = {}
67
- try:
68
- timeout_val = timeout if timeout is not None else self._client.timeout
69
- s = self._client.session
70
- # Create a new conversation if needed
71
- r = s.post(self._client.conversation_url, timeout=timeout_val)
72
- if r.status_code != 200:
73
- raise RuntimeError(f"Failed to create conversation: {r.text}")
74
- conv_id = r.json().get("id")
75
-
76
- # Handle image upload if provided
77
- images = []
78
- if image:
79
- r = s.post(
80
- f"{self._client.url}/c/api/attachments",
81
- headers={"content-type": "image/jpeg"},
82
- data=image,
83
- timeout=timeout_val
84
- )
85
- if r.status_code != 200:
86
- raise RuntimeError(f"Image upload failed: {r.text}")
87
- images.append({"type": "image", "url": r.json().get("url")})
88
-
89
- ws = s.ws_connect(self._client.websocket_url)
90
- mode = "reasoning" if "Think" in model else "chat"
91
- ws.send(json.dumps({
92
- "event": "send",
93
- "conversationId": conv_id,
94
- "content": images + [{"type": "text", "text": prompt_text}],
95
- "mode": mode
96
- }).encode(), CurlWsFlag.TEXT)
97
-
98
- prompt_tokens = count_tokens(prompt_text)
99
- completion_tokens = 0
100
- total_tokens = prompt_tokens
101
- started = False
102
- image_prompt = None
103
- while True:
104
- try:
105
- msg = json.loads(ws.recv()[0])
106
- except Exception:
107
- break
108
-
109
- event = msg.get("event")
110
- if event not in ["appendText", "done", "error", "generatingImage", "imageGenerated", "suggestedFollowups", "replaceText"]:
111
- print(f"[Copilot] Unhandled event: {event} | msg: {msg}")
112
-
113
- if event == "appendText":
114
- started = True
115
- content = msg.get("text", "")
116
- content_tokens = count_tokens(content)
117
- completion_tokens += content_tokens
118
- total_tokens = prompt_tokens + completion_tokens
119
- delta = ChoiceDelta(
120
- content=content,
121
- role="assistant"
122
- )
123
- choice = Choice(
124
- index=0,
125
- delta=delta,
126
- finish_reason=None
127
- )
128
- chunk = ChatCompletionChunk(
129
- id=request_id,
130
- choices=[choice],
131
- created=created_time,
132
- model=model
133
- )
134
- yield chunk
135
- elif event == "replaceText":
136
- # treat as appendText for OpenAI compatibility
137
- content = msg.get("text", "")
138
- content_tokens = count_tokens(content)
139
- completion_tokens += content_tokens
140
- total_tokens = prompt_tokens + completion_tokens
141
- delta = ChoiceDelta(
142
- content=content,
143
- role="assistant"
144
- )
145
- choice = Choice(
146
- index=0,
147
- delta=delta,
148
- finish_reason=None
149
- )
150
- chunk = ChatCompletionChunk(
151
- id=request_id,
152
- choices=[choice],
153
- created=created_time,
154
- model=model
155
- )
156
- yield chunk
157
- elif event == "generatingImage":
158
- image_prompt = msg.get("prompt")
159
- elif event == "imageGenerated":
160
- # Yield a chunk with image metadata in the delta (custom extension)
161
- delta = ChoiceDelta(
162
- content=None,
163
- role=None
164
- )
165
- choice = Choice(
166
- index=0,
167
- delta=delta,
168
- finish_reason=None
169
- )
170
- chunk = ChatCompletionChunk(
171
- id=request_id,
172
- choices=[choice],
173
- created=created_time,
174
- model=model
175
- )
176
- chunk.image_url = msg.get("url")
177
- chunk.image_prompt = image_prompt
178
- chunk.image_preview = msg.get("thumbnailUrl")
179
- yield chunk
180
- elif event == "suggestedFollowups":
181
- # Yield a chunk with followups in the delta (custom extension)
182
- delta = ChoiceDelta(
183
- content=None,
184
- role=None
185
- )
186
- choice = Choice(
187
- index=0,
188
- delta=delta,
189
- finish_reason=None
190
- )
191
- chunk = ChatCompletionChunk(
192
- id=request_id,
193
- choices=[choice],
194
- created=created_time,
195
- model=model
196
- )
197
- chunk.suggested_followups = msg.get("suggestions")
198
- yield chunk
199
- elif event == "done":
200
- delta = ChoiceDelta(
201
- content=None,
202
- role=None
203
- )
204
- choice = Choice(
205
- index=0,
206
- delta=delta,
207
- finish_reason="stop"
208
- )
209
- chunk = ChatCompletionChunk(
210
- id=request_id,
211
- choices=[choice],
212
- created=created_time,
213
- model=model
214
- )
215
- yield chunk
216
- break
217
- elif event == "error":
218
- print(f"[Copilot] Error event: {msg}")
219
- raise RuntimeError(f"Copilot error: {msg}")
220
-
221
- ws.close()
222
- if not started:
223
- raise RuntimeError("No response received from Copilot")
224
- except Exception as e:
225
- raise RuntimeError(f"Stream error: {e}") from e
226
- finally:
227
- self._client.session.proxies = original_proxies
228
-
229
- def _create_non_stream(
230
- self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
231
- timeout: Optional[int] = None, proxies: Optional[dict] = None
232
- ) -> ChatCompletion:
233
- result = ""
234
- # Pass timeout and proxies to the underlying _create_stream call
235
- for chunk in self._create_stream(request_id, created_time, model, prompt_text, image, timeout=timeout, proxies=proxies):
236
- if hasattr(chunk, 'choices') and chunk.choices and hasattr(chunk.choices[0], 'delta') and chunk.choices[0].delta.content:
237
- result += chunk.choices[0].delta.content
238
-
239
- # Create the message object
240
- message = ChatCompletionMessage(
241
- role="assistant",
242
- content=result
243
- )
244
-
245
- # Create the choice object
246
- choice = Choice(
247
- index=0,
248
- message=message,
249
- finish_reason="stop"
250
- )
251
-
252
- # Estimate token usage using count_tokens
253
- prompt_tokens = count_tokens(prompt_text)
254
- completion_tokens = count_tokens(result)
255
- total_tokens = prompt_tokens + completion_tokens
256
-
257
- # Create usage object
258
- usage = CompletionUsage(
259
- prompt_tokens=prompt_tokens,
260
- completion_tokens=completion_tokens,
261
- total_tokens=total_tokens
262
- )
263
-
264
- # Create the completion object
265
- completion = ChatCompletion(
266
- id=request_id,
267
- choices=[choice],
268
- created=created_time,
269
- model=model,
270
- usage=usage
271
- )
272
-
273
- return completion
274
-
275
- class Chat(BaseChat):
276
- def __init__(self, client: 'Copilot'):
277
- self.completions = Completions(client)
278
-
279
- class Copilot(OpenAICompatibleProvider):
280
-
281
- url = "https://copilot.microsoft.com"
282
- conversation_url = f"{url}/c/api/conversations"
283
- websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
284
-
285
- AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
286
-
287
- def __init__(self, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
288
- self.timeout = 900
289
- self.session = Session(impersonate=browser)
290
- self.session.proxies = {}
291
-
292
- # Initialize tools
293
- self.available_tools = {}
294
- if tools:
295
- self.register_tools(tools)
296
-
297
- # Set up the chat interface
298
- self.chat = Chat(self)
299
-
300
- @property
301
- def models(self):
302
- class _ModelList:
303
- def list(inner_self):
304
- return self.AVAILABLE_MODELS
305
- return _ModelList()