webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (197) hide show
  1. webscout/AIauto.py +34 -16
  2. webscout/AIbase.py +96 -37
  3. webscout/AIutel.py +491 -87
  4. webscout/Bard.py +441 -323
  5. webscout/Extra/GitToolkit/__init__.py +10 -10
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -232
  7. webscout/Litlogger/README.md +10 -0
  8. webscout/Litlogger/__init__.py +7 -59
  9. webscout/Litlogger/formats.py +4 -0
  10. webscout/Litlogger/handlers.py +103 -0
  11. webscout/Litlogger/levels.py +13 -0
  12. webscout/Litlogger/logger.py +92 -0
  13. webscout/Provider/AISEARCH/Perplexity.py +332 -358
  14. webscout/Provider/AISEARCH/felo_search.py +9 -35
  15. webscout/Provider/AISEARCH/genspark_search.py +30 -56
  16. webscout/Provider/AISEARCH/hika_search.py +4 -16
  17. webscout/Provider/AISEARCH/iask_search.py +410 -436
  18. webscout/Provider/AISEARCH/monica_search.py +4 -30
  19. webscout/Provider/AISEARCH/scira_search.py +6 -32
  20. webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
  21. webscout/Provider/Blackboxai.py +155 -35
  22. webscout/Provider/ChatSandbox.py +2 -1
  23. webscout/Provider/Deepinfra.py +339 -339
  24. webscout/Provider/ExaChat.py +358 -358
  25. webscout/Provider/Gemini.py +169 -169
  26. webscout/Provider/GithubChat.py +1 -2
  27. webscout/Provider/Glider.py +3 -3
  28. webscout/Provider/HeckAI.py +172 -82
  29. webscout/Provider/LambdaChat.py +1 -0
  30. webscout/Provider/MCPCore.py +7 -3
  31. webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
  32. webscout/Provider/OPENAI/Cloudflare.py +38 -21
  33. webscout/Provider/OPENAI/FalconH1.py +457 -0
  34. webscout/Provider/OPENAI/FreeGemini.py +35 -18
  35. webscout/Provider/OPENAI/NEMOTRON.py +34 -34
  36. webscout/Provider/OPENAI/PI.py +427 -0
  37. webscout/Provider/OPENAI/Qwen3.py +304 -0
  38. webscout/Provider/OPENAI/README.md +952 -1253
  39. webscout/Provider/OPENAI/TwoAI.py +374 -0
  40. webscout/Provider/OPENAI/__init__.py +7 -1
  41. webscout/Provider/OPENAI/ai4chat.py +73 -63
  42. webscout/Provider/OPENAI/api.py +869 -644
  43. webscout/Provider/OPENAI/base.py +2 -0
  44. webscout/Provider/OPENAI/c4ai.py +34 -13
  45. webscout/Provider/OPENAI/chatgpt.py +575 -556
  46. webscout/Provider/OPENAI/chatgptclone.py +512 -487
  47. webscout/Provider/OPENAI/chatsandbox.py +11 -6
  48. webscout/Provider/OPENAI/copilot.py +258 -0
  49. webscout/Provider/OPENAI/deepinfra.py +327 -318
  50. webscout/Provider/OPENAI/e2b.py +140 -104
  51. webscout/Provider/OPENAI/exaai.py +420 -411
  52. webscout/Provider/OPENAI/exachat.py +448 -443
  53. webscout/Provider/OPENAI/flowith.py +7 -3
  54. webscout/Provider/OPENAI/freeaichat.py +12 -8
  55. webscout/Provider/OPENAI/glider.py +15 -8
  56. webscout/Provider/OPENAI/groq.py +5 -2
  57. webscout/Provider/OPENAI/heckai.py +311 -307
  58. webscout/Provider/OPENAI/llmchatco.py +9 -7
  59. webscout/Provider/OPENAI/mcpcore.py +18 -9
  60. webscout/Provider/OPENAI/multichat.py +7 -5
  61. webscout/Provider/OPENAI/netwrck.py +16 -11
  62. webscout/Provider/OPENAI/oivscode.py +290 -0
  63. webscout/Provider/OPENAI/opkfc.py +507 -496
  64. webscout/Provider/OPENAI/pydantic_imports.py +172 -0
  65. webscout/Provider/OPENAI/scirachat.py +29 -17
  66. webscout/Provider/OPENAI/sonus.py +308 -303
  67. webscout/Provider/OPENAI/standardinput.py +442 -433
  68. webscout/Provider/OPENAI/textpollinations.py +18 -11
  69. webscout/Provider/OPENAI/toolbaz.py +419 -413
  70. webscout/Provider/OPENAI/typefully.py +17 -10
  71. webscout/Provider/OPENAI/typegpt.py +21 -11
  72. webscout/Provider/OPENAI/uncovrAI.py +477 -462
  73. webscout/Provider/OPENAI/utils.py +90 -79
  74. webscout/Provider/OPENAI/venice.py +435 -425
  75. webscout/Provider/OPENAI/wisecat.py +387 -381
  76. webscout/Provider/OPENAI/writecream.py +166 -163
  77. webscout/Provider/OPENAI/x0gpt.py +26 -37
  78. webscout/Provider/OPENAI/yep.py +384 -356
  79. webscout/Provider/PI.py +2 -1
  80. webscout/Provider/TTI/README.md +55 -101
  81. webscout/Provider/TTI/__init__.py +4 -9
  82. webscout/Provider/TTI/aiarta.py +365 -0
  83. webscout/Provider/TTI/artbit.py +0 -0
  84. webscout/Provider/TTI/base.py +64 -0
  85. webscout/Provider/TTI/fastflux.py +200 -0
  86. webscout/Provider/TTI/magicstudio.py +201 -0
  87. webscout/Provider/TTI/piclumen.py +203 -0
  88. webscout/Provider/TTI/pixelmuse.py +225 -0
  89. webscout/Provider/TTI/pollinations.py +221 -0
  90. webscout/Provider/TTI/utils.py +11 -0
  91. webscout/Provider/TTS/__init__.py +2 -1
  92. webscout/Provider/TTS/base.py +159 -159
  93. webscout/Provider/TTS/openai_fm.py +129 -0
  94. webscout/Provider/TextPollinationsAI.py +308 -308
  95. webscout/Provider/TwoAI.py +239 -44
  96. webscout/Provider/UNFINISHED/Youchat.py +330 -330
  97. webscout/Provider/UNFINISHED/puterjs.py +635 -0
  98. webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
  99. webscout/Provider/Writecream.py +246 -246
  100. webscout/Provider/__init__.py +2 -2
  101. webscout/Provider/ai4chat.py +33 -8
  102. webscout/Provider/granite.py +41 -6
  103. webscout/Provider/koala.py +169 -169
  104. webscout/Provider/oivscode.py +309 -0
  105. webscout/Provider/samurai.py +3 -2
  106. webscout/Provider/scnet.py +1 -0
  107. webscout/Provider/typegpt.py +3 -3
  108. webscout/Provider/uncovr.py +368 -368
  109. webscout/client.py +70 -0
  110. webscout/litprinter/__init__.py +58 -58
  111. webscout/optimizers.py +419 -419
  112. webscout/scout/README.md +3 -1
  113. webscout/scout/core/crawler.py +134 -64
  114. webscout/scout/core/scout.py +148 -109
  115. webscout/scout/element.py +106 -88
  116. webscout/swiftcli/Readme.md +323 -323
  117. webscout/swiftcli/plugins/manager.py +9 -2
  118. webscout/version.py +1 -1
  119. webscout/zeroart/__init__.py +134 -134
  120. webscout/zeroart/effects.py +100 -100
  121. webscout/zeroart/fonts.py +1238 -1238
  122. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
  123. webscout-8.3.dist-info/RECORD +290 -0
  124. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  125. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
  126. webscout/Litlogger/Readme.md +0 -175
  127. webscout/Litlogger/core/__init__.py +0 -6
  128. webscout/Litlogger/core/level.py +0 -23
  129. webscout/Litlogger/core/logger.py +0 -165
  130. webscout/Litlogger/handlers/__init__.py +0 -12
  131. webscout/Litlogger/handlers/console.py +0 -33
  132. webscout/Litlogger/handlers/file.py +0 -143
  133. webscout/Litlogger/handlers/network.py +0 -173
  134. webscout/Litlogger/styles/__init__.py +0 -7
  135. webscout/Litlogger/styles/colors.py +0 -249
  136. webscout/Litlogger/styles/formats.py +0 -458
  137. webscout/Litlogger/styles/text.py +0 -87
  138. webscout/Litlogger/utils/__init__.py +0 -6
  139. webscout/Litlogger/utils/detectors.py +0 -153
  140. webscout/Litlogger/utils/formatters.py +0 -200
  141. webscout/Provider/ChatGPTGratis.py +0 -194
  142. webscout/Provider/TTI/AiForce/README.md +0 -159
  143. webscout/Provider/TTI/AiForce/__init__.py +0 -22
  144. webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
  145. webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
  146. webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
  147. webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
  148. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
  149. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
  150. webscout/Provider/TTI/ImgSys/README.md +0 -174
  151. webscout/Provider/TTI/ImgSys/__init__.py +0 -23
  152. webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
  153. webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
  154. webscout/Provider/TTI/MagicStudio/README.md +0 -101
  155. webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
  156. webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
  157. webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
  158. webscout/Provider/TTI/Nexra/README.md +0 -155
  159. webscout/Provider/TTI/Nexra/__init__.py +0 -22
  160. webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
  161. webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
  162. webscout/Provider/TTI/PollinationsAI/README.md +0 -146
  163. webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
  164. webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
  165. webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
  166. webscout/Provider/TTI/aiarta/README.md +0 -134
  167. webscout/Provider/TTI/aiarta/__init__.py +0 -2
  168. webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
  169. webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
  170. webscout/Provider/TTI/artbit/README.md +0 -100
  171. webscout/Provider/TTI/artbit/__init__.py +0 -22
  172. webscout/Provider/TTI/artbit/async_artbit.py +0 -155
  173. webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
  174. webscout/Provider/TTI/fastflux/README.md +0 -129
  175. webscout/Provider/TTI/fastflux/__init__.py +0 -22
  176. webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
  177. webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
  178. webscout/Provider/TTI/huggingface/README.md +0 -114
  179. webscout/Provider/TTI/huggingface/__init__.py +0 -22
  180. webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
  181. webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
  182. webscout/Provider/TTI/piclumen/README.md +0 -161
  183. webscout/Provider/TTI/piclumen/__init__.py +0 -23
  184. webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
  185. webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
  186. webscout/Provider/TTI/pixelmuse/README.md +0 -79
  187. webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
  188. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
  189. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
  190. webscout/Provider/TTI/talkai/README.md +0 -139
  191. webscout/Provider/TTI/talkai/__init__.py +0 -4
  192. webscout/Provider/TTI/talkai/async_talkai.py +0 -229
  193. webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
  194. webscout/Provider/UNFINISHED/oivscode.py +0 -351
  195. webscout-8.2.8.dist-info/RECORD +0 -334
  196. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  197. {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -10,7 +10,8 @@ from .utils import (
10
10
  ChatCompletionMessage,
11
11
  ChoiceDelta,
12
12
  CompletionUsage,
13
- format_prompt
13
+ format_prompt,
14
+ count_tokens
14
15
  )
15
16
  import requests
16
17
 
@@ -32,6 +33,8 @@ class Completions(BaseCompletions):
32
33
  stream: bool = False,
33
34
  temperature: Optional[float] = None,
34
35
  top_p: Optional[float] = None,
36
+ timeout: Optional[int] = None,
37
+ proxies: Optional[dict] = None,
35
38
  **kwargs: Any
36
39
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
40
  """
@@ -69,13 +72,15 @@ class Completions(BaseCompletions):
69
72
  }
70
73
  session = requests.Session()
71
74
  session.headers.update(headers)
75
+ session.proxies = proxies if proxies is not None else {}
76
+
72
77
  def for_stream():
73
78
  try:
74
79
  response = session.post(
75
80
  url,
76
81
  json=payload,
77
82
  stream=True,
78
- timeout=30
83
+ timeout=timeout if timeout is not None else 30
79
84
  )
80
85
  response.raise_for_status()
81
86
  streaming_text = ""
@@ -115,7 +120,7 @@ class Completions(BaseCompletions):
115
120
  response = session.post(
116
121
  url,
117
122
  json=payload,
118
- timeout=30
123
+ timeout=timeout if timeout is not None else 30
119
124
  )
120
125
  response.raise_for_status()
121
126
  text = response.text
@@ -124,8 +129,8 @@ class Completions(BaseCompletions):
124
129
  content = data.get("reasoning_content", text)
125
130
  except Exception:
126
131
  content = text
127
- prompt_tokens = len(question) // 4
128
- completion_tokens = len(content) // 4
132
+ prompt_tokens = count_tokens(question)
133
+ completion_tokens = count_tokens(content)
129
134
  total_tokens = prompt_tokens + completion_tokens
130
135
  usage = CompletionUsage(
131
136
  prompt_tokens=prompt_tokens,
@@ -151,7 +156,7 @@ class Chat(BaseChat):
151
156
  self.completions = Completions(client)
152
157
 
153
158
  class ChatSandbox(OpenAICompatibleProvider):
154
- AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large"]
159
+ AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large", "deepseek-r1", "deepseek-r1-full", "gemini-thinking", "openai-o1-mini", "llama", "mistral", "gemma-3"]
155
160
  chat: Chat
156
161
  def __init__(self):
157
162
  self.chat = Chat(self)
@@ -0,0 +1,258 @@
1
+ import json
2
+ import uuid
3
+ import time
4
+ from typing import List, Dict, Optional, Union, Generator, Any
5
+ from urllib.parse import quote
6
+ from curl_cffi.requests import Session, CurlWsFlag
7
+
8
+ # Import base classes and utility structures
9
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
10
+ from .utils import (
11
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
12
+ ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
13
+ )
14
+
15
+ # Attempt to import LitAgent, fallback if not available
16
+ try:
17
+ from webscout.litagent import LitAgent
18
+ except ImportError:
19
+ pass
20
+
21
+ # --- Microsoft Copilot Client ---
22
+
23
+ class Completions(BaseCompletions):
24
+ def __init__(self, client: 'Copilot'):
25
+ self._client = client
26
+
27
+ def create(
28
+ self,
29
+ *,
30
+ model: str,
31
+ messages: List[Dict[str, str]],
32
+ max_tokens: Optional[int] = None,
33
+ stream: bool = False,
34
+ temperature: Optional[float] = None,
35
+ top_p: Optional[float] = None,
36
+ timeout: Optional[int] = None,
37
+ proxies: Optional[dict] = None,
38
+ **kwargs: Any
39
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
40
+ """
41
+ Creates a model response for the given chat conversation.
42
+ Mimics openai.chat.completions.create
43
+ """
44
+ # Format the entire conversation using the utility function
45
+ formatted_prompt = format_prompt(messages, add_special_tokens=True, include_system=True, do_continue=True)
46
+
47
+ request_id = f"chatcmpl-{uuid.uuid4()}"
48
+ created_time = int(time.time())
49
+
50
+ # Handle image if provided
51
+ image = kwargs.get("image")
52
+
53
+ if stream:
54
+ return self._create_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
55
+ else:
56
+ return self._create_non_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
57
+
58
+ def _create_stream(
59
+ self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
60
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
61
+ ) -> Generator[ChatCompletionChunk, None, None]:
62
+ original_proxies = self._client.session.proxies
63
+ if proxies is not None:
64
+ self._client.session.proxies = proxies
65
+ else:
66
+ self._client.session.proxies = {}
67
+ try:
68
+ timeout_val = timeout if timeout is not None else self._client.timeout
69
+ s = self._client.session
70
+ # Create a new conversation if needed
71
+ r = s.post(self._client.conversation_url, timeout=timeout_val)
72
+ if r.status_code != 200:
73
+ raise RuntimeError(f"Failed to create conversation: {r.text}")
74
+ conv_id = r.json().get("id")
75
+
76
+ # Handle image upload if provided
77
+ images = []
78
+ if image:
79
+ r = s.post(
80
+ f"{self._client.url}/c/api/attachments",
81
+ headers={"content-type": "image/jpeg"},
82
+ data=image,
83
+ timeout=timeout_val
84
+ )
85
+ if r.status_code != 200:
86
+ raise RuntimeError(f"Image upload failed: {r.text}")
87
+ images.append({"type": "image", "url": r.json().get("url")})
88
+
89
+ # Connect to websocket
90
+ # Note: ws_connect might not use timeout in the same way as POST/GET
91
+ ws = s.ws_connect(self._client.websocket_url)
92
+
93
+ # Use model to set mode ("reasoning" for Think Deeper)
94
+ mode = "reasoning" if "Think" in model else "chat"
95
+
96
+ # Send the message to Copilot
97
+ ws.send(json.dumps({
98
+ "event": "send",
99
+ "conversationId": conv_id,
100
+ "content": images + [{"type": "text", "text": prompt_text}],
101
+ "mode": mode
102
+ }).encode(), CurlWsFlag.TEXT)
103
+
104
+ # Track token usage using count_tokens
105
+ prompt_tokens = count_tokens(prompt_text)
106
+ completion_tokens = 0
107
+ total_tokens = prompt_tokens
108
+
109
+ started = False
110
+ while True:
111
+ try:
112
+ msg = json.loads(ws.recv()[0])
113
+ except Exception:
114
+ break
115
+
116
+ if msg.get("event") == "appendText":
117
+ started = True
118
+ content = msg.get("text", "")
119
+
120
+ # Update token counts using count_tokens
121
+ content_tokens = count_tokens(content)
122
+ completion_tokens += content_tokens
123
+ total_tokens = prompt_tokens + completion_tokens
124
+
125
+ # Create the delta object
126
+ delta = ChoiceDelta(
127
+ content=content,
128
+ role="assistant"
129
+ )
130
+
131
+ # Create the choice object
132
+ choice = Choice(
133
+ index=0,
134
+ delta=delta,
135
+ finish_reason=None
136
+ )
137
+
138
+ # Create the chunk object
139
+ chunk = ChatCompletionChunk(
140
+ id=request_id,
141
+ choices=[choice],
142
+ created=created_time,
143
+ model=model
144
+ )
145
+
146
+ yield chunk
147
+ elif msg.get("event") == "done":
148
+ # Final chunk with finish_reason
149
+ delta = ChoiceDelta(
150
+ content=None,
151
+ role=None
152
+ )
153
+
154
+ choice = Choice(
155
+ index=0,
156
+ delta=delta,
157
+ finish_reason="stop"
158
+ )
159
+
160
+ chunk = ChatCompletionChunk(
161
+ id=request_id,
162
+ choices=[choice],
163
+ created=created_time,
164
+ model=model
165
+ )
166
+
167
+ yield chunk
168
+ break
169
+ elif msg.get("event") == "error":
170
+ raise RuntimeError(f"Copilot error: {msg}")
171
+
172
+ ws.close()
173
+
174
+ if not started:
175
+ raise RuntimeError("No response received from Copilot")
176
+
177
+ except Exception as e:
178
+ raise RuntimeError(f"Stream error: {e}") from e
179
+ finally:
180
+ self._client.session.proxies = original_proxies
181
+
182
+ def _create_non_stream(
183
+ self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
184
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
185
+ ) -> ChatCompletion:
186
+ result = ""
187
+ # Pass timeout and proxies to the underlying _create_stream call
188
+ for chunk in self._create_stream(request_id, created_time, model, prompt_text, image, timeout=timeout, proxies=proxies):
189
+ if hasattr(chunk, 'choices') and chunk.choices and hasattr(chunk.choices[0], 'delta') and chunk.choices[0].delta.content:
190
+ result += chunk.choices[0].delta.content
191
+
192
+ # Create the message object
193
+ message = ChatCompletionMessage(
194
+ role="assistant",
195
+ content=result
196
+ )
197
+
198
+ # Create the choice object
199
+ choice = Choice(
200
+ index=0,
201
+ message=message,
202
+ finish_reason="stop"
203
+ )
204
+
205
+ # Estimate token usage using count_tokens
206
+ prompt_tokens = count_tokens(prompt_text)
207
+ completion_tokens = count_tokens(result)
208
+ total_tokens = prompt_tokens + completion_tokens
209
+
210
+ # Create usage object
211
+ usage = CompletionUsage(
212
+ prompt_tokens=prompt_tokens,
213
+ completion_tokens=completion_tokens,
214
+ total_tokens=total_tokens
215
+ )
216
+
217
+ # Create the completion object
218
+ completion = ChatCompletion(
219
+ id=request_id,
220
+ choices=[choice],
221
+ created=created_time,
222
+ model=model,
223
+ usage=usage
224
+ )
225
+
226
+ return completion
227
+
228
+ class Chat(BaseChat):
229
+ def __init__(self, client: 'Copilot'):
230
+ self.completions = Completions(client)
231
+
232
+ class Copilot(OpenAICompatibleProvider):
233
+
234
+ url = "https://copilot.microsoft.com"
235
+ conversation_url = f"{url}/c/api/conversations"
236
+ websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
237
+
238
+ AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
239
+
240
+ def __init__(self, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
241
+ self.timeout = 900
242
+ self.session = Session(impersonate=browser)
243
+ self.session.proxies = {}
244
+
245
+ # Initialize tools
246
+ self.available_tools = {}
247
+ if tools:
248
+ self.register_tools(tools)
249
+
250
+ # Set up the chat interface
251
+ self.chat = Chat(self)
252
+
253
+ @property
254
+ def models(self):
255
+ class _ModelList:
256
+ def list(inner_self):
257
+ return self.AVAILABLE_MODELS
258
+ return _ModelList()