webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,220 +0,0 @@
1
- from typing import Any, Dict, Optional, Union
2
- from curl_cffi import CurlError
3
- from curl_cffi.requests import Session
4
- from webscout import exceptions
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
8
- from webscout.AIbase import Provider
9
- from webscout.litagent import LitAgent
10
-
11
- class AskSteve(Provider):
12
- """
13
- A class to interact with the AskSteve API.
14
- """
15
- AVAILABLE_MODELS = ["Gemini"]
16
- def __init__(
17
- self,
18
- is_conversation: bool = True,
19
- max_tokens: int = 600,
20
- timeout: int = 30,
21
- intro: str = None,
22
- filepath: str = None,
23
- update_file: bool = True,
24
- proxies: dict = {},
25
- history_offset: int = 10250,
26
- act: str = None,
27
- ) -> None:
28
- """Instantiates AskSteve
29
-
30
- Args:
31
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
32
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
33
- timeout (int, optional): Http request timeout. Defaults to 30.
34
- intro (str, optional): Conversation introductory prompt. Defaults to None.
35
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
36
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
37
- proxies (dict, optional): Http request proxies. Defaults to {}.
38
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
39
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
40
- system_prompt (str, optional): System prompt for AskSteve. Defaults to the provided string.
41
- """
42
- self.session = Session() # Use curl_cffi Session
43
- self.is_conversation = is_conversation
44
- self.max_tokens_to_sample = max_tokens
45
- self.api_endpoint = "https://quickstart.asksteve.to/quickStartRequest"
46
- self.timeout = timeout
47
- self.last_response = {}
48
- self.headers = {
49
- "accept": "*/*",
50
- "accept-encoding": "gzip, deflate, br, zstd",
51
- "accept-language": "en-US,en;q=0.9",
52
- "content-type": "text/plain;charset=UTF-8",
53
- "origin": "chrome-extension://gldebcpkoojijledacjeboaehblhfbjg",
54
- "priority": "u=1, i",
55
- "sec-fetch-dest": "empty",
56
- "sec-fetch-mode": "cors",
57
- "sec-fetch-site": "none",
58
- "sec-fetch-storage-access": "active",
59
- "user-agent": LitAgent().random(),
60
- }
61
-
62
- self.__available_optimizers = (
63
- method
64
- for method in dir(Optimizers)
65
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
66
- )
67
- self.session.headers.update(self.headers)
68
- Conversation.intro = (
69
- AwesomePrompts().get_act(
70
- act, raise_not_found=True, default=None, case_insensitive=True
71
- )
72
- if act
73
- else intro or Conversation.intro
74
- )
75
- self.conversation = Conversation(
76
- is_conversation, self.max_tokens_to_sample, filepath, update_file
77
- )
78
- self.conversation.history_offset = history_offset
79
- self.session.proxies = proxies # Assign proxies directly
80
- @staticmethod
81
- def _asksteve_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
82
- """Extracts content from AskSteve JSON response."""
83
- if isinstance(chunk, dict) and "candidates" in chunk and len(chunk["candidates"]) > 0:
84
- parts = chunk["candidates"][0].get("content", {}).get("parts", [])
85
- if parts and isinstance(parts[0].get("text"), str):
86
- return parts[0]["text"]
87
- return None
88
-
89
- def ask(
90
- self,
91
- prompt: str,
92
- stream: bool = False,
93
- raw: bool = False,
94
- optimizer: str = None,
95
- conversationally: bool = False,
96
- ) -> dict:
97
- """Chat with AI
98
-
99
- Args:
100
- prompt (str): Prompt to be send.
101
- stream (bool, optional): Flag for streaming response. Defaults to False.
102
- raw (bool, optional): Stream back raw response as received. Defaults to False.
103
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
104
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
105
- Returns:
106
- dict : {}
107
- ```json
108
- {
109
- "text" : "How may I assist you today?"
110
- }
111
- ```
112
- """
113
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
114
- if optimizer:
115
- if optimizer in self.__available_optimizers:
116
- conversation_prompt = getattr(Optimizers, optimizer)(
117
- conversation_prompt if conversationally else prompt
118
- )
119
- else:
120
- raise Exception(
121
- f"Optimizer is not one of {self.__available_optimizers}"
122
- )
123
-
124
- payload = {
125
- "key": "asksteve",
126
- "prompt": conversation_prompt
127
- }
128
-
129
-
130
- # This API doesn't stream, so we process the full response
131
- try:
132
- response = self.session.post(
133
- self.api_endpoint,
134
- headers=self.headers,
135
- json=payload,
136
- stream=False, # API doesn't stream
137
- timeout=self.timeout,
138
- impersonate="chrome120" # Add impersonate
139
- )
140
- response.raise_for_status()
141
- response_text_raw = response.text # Get raw text
142
-
143
- # Process the full JSON text using sanitize_stream
144
- processed_stream = sanitize_stream(
145
- data=response_text_raw,
146
- to_json=True, # Parse the whole text as JSON
147
- intro_value=None,
148
- content_extractor=self._asksteve_extractor, # Use the specific extractor
149
- yield_raw_on_error=False
150
- )
151
- # Extract the single result
152
- text = next(processed_stream, None)
153
- text = text if isinstance(text, str) else "" # Ensure it's a string
154
-
155
- self.last_response.update(dict(text=text))
156
- self.conversation.update_chat_history(
157
- prompt, self.get_message(self.last_response)
158
- )
159
- # Always return a dict for consistency
160
- return {"text": text} if raw else self.last_response
161
-
162
- except CurlError as e:
163
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
164
- except Exception as e: # Catch other potential errors
165
- raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}") from e
166
-
167
- def chat(
168
- self,
169
- prompt: str,
170
- stream: bool = False,
171
- optimizer: str = None,
172
- conversationally: bool = False,
173
- ) -> str:
174
- """Generate response `str`
175
- Args:
176
- prompt (str): Prompt to be send.
177
- stream (bool, optional): Flag for streaming response. Defaults to False.
178
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
179
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
180
- Returns:
181
- str: Response generated
182
- """
183
-
184
- response_data = self.ask(
185
- prompt,
186
- stream=False, # Always False for this API
187
- raw=False, # Get the dict back
188
- optimizer=optimizer,
189
- conversationally=conversationally,
190
- )
191
- if stream:
192
- def stream_wrapper():
193
- yield self.get_message(response_data)
194
- return stream_wrapper()
195
- else:
196
- return self.get_message(response_data)
197
-
198
- def get_message(self, response) -> str:
199
- """Retrieves message only from response
200
-
201
- Args:
202
- response (dict or str): Response generated by `self.ask` or a string
203
-
204
- Returns:
205
- str: Message extracted
206
- """
207
- if isinstance(response, dict):
208
- return response.get("text", "") # Use .get for safety
209
- elif isinstance(response, str):
210
- return response
211
- else:
212
- raise TypeError(f"Unsupported response type: {type(response)}")
213
-
214
-
215
- if __name__ == "__main__":
216
- from rich import print
217
- ai = AskSteve()
218
- response = ai.chat("write a short poem about AI", stream=True)
219
- for chunk in response:
220
- print(chunk, end="", flush=True)
@@ -1,441 +0,0 @@
1
- import asyncio
2
- import base64
3
- import json
4
- import os
5
- from typing import Any, Dict, Generator, Union
6
- from urllib.parse import quote
7
-
8
- # Import trio before curl_cffi to prevent eventlet socket monkey-patching conflicts
9
- # See: https://github.com/python-trio/trio/issues/3015
10
- try:
11
- import trio # noqa: F401
12
- except ImportError:
13
- pass # trio is optional, ignore if not available
14
- from curl_cffi.requests import CurlWsFlag, Session
15
-
16
- from webscout import exceptions
17
- from webscout.AIbase import Provider
18
- from webscout.AIutel import AwesomePrompts, Conversation, Optimizers
19
- from webscout.litagent import LitAgent
20
-
21
- try:
22
- has_curl_cffi = True
23
- except ImportError:
24
- has_curl_cffi = False
25
-
26
- try:
27
- import nodriver
28
- has_nodriver = True
29
- except ImportError:
30
- has_nodriver = False
31
-
32
-
33
- class NoValidHarFileError(Exception):
34
- pass
35
-
36
-
37
- class CopilotConversation:
38
- conversation_id: str
39
-
40
- def __init__(self, conversation_id: str):
41
- self.conversation_id = conversation_id
42
-
43
-
44
- class Copilot(Provider):
45
- """
46
- A class to interact with the Microsoft Copilot API.
47
- """
48
-
49
- label = "Microsoft Copilot"
50
- url = "https://copilot.microsoft.com"
51
- websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
52
- conversation_url = f"{url}/c/api/conversations"
53
- AVAILABLE_MODELS = ["Copilot", "Think Deeper", "Smart"]
54
- MODEL_ALIASES = {
55
- "gpt-4o": "Copilot",
56
- "o4-mini": "Think Deeper",
57
- "gpt-5": "Smart",
58
- }
59
- _access_token: str = None
60
- _cookies: dict = None
61
-
62
- def __init__(
63
- self,
64
- is_conversation: bool = True,
65
- max_tokens: int = 2000,
66
- timeout: int = 900,
67
- intro: str = None,
68
- filepath: str = None,
69
- update_file: bool = True,
70
- proxies: dict = {},
71
- history_offset: int = 10250,
72
- act: str = None,
73
- model: str = "Copilot"
74
- ):
75
- """Initializes the Copilot API client."""
76
- # Map alias to real model name if needed
77
- real_model = self.MODEL_ALIASES.get(model, model)
78
- if real_model not in self.AVAILABLE_MODELS:
79
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
80
- self.model = real_model
81
-
82
- # Use LitAgent for user-agent
83
- self.headers = {
84
- 'User-Agent': LitAgent().random(),
85
- 'Accept-Language': 'en-US,en;q=0.9',
86
- 'Connection': 'keep-alive',
87
- 'Content-Type': 'application/json',
88
- 'Origin': self.url,
89
- 'Referer': f'{self.url}/',
90
- 'Sec-Fetch-Dest': 'empty',
91
- 'Sec-Fetch-Mode': 'cors',
92
- 'Sec-Fetch-Site': 'same-origin',
93
- }
94
-
95
- self.is_conversation = is_conversation
96
- self.max_tokens_to_sample = max_tokens
97
- self.timeout = timeout
98
- self.last_response = {}
99
- self.model = model
100
- self.proxies = proxies
101
-
102
- self.__available_optimizers = (
103
- method
104
- for method in dir(Optimizers)
105
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
106
- )
107
- Conversation.intro = (
108
- AwesomePrompts().get_act(
109
- act, raise_not_found=True, default=None, case_insensitive=True
110
- )
111
- if act
112
- else intro or Conversation.intro
113
- )
114
-
115
- self.conversation = Conversation(
116
- is_conversation, self.max_tokens_to_sample, filepath, update_file
117
- )
118
- self.conversation.history_offset = history_offset
119
-
120
- def ask(
121
- self,
122
- prompt: str,
123
- stream: bool = True,
124
- raw: bool = False,
125
- optimizer: str = None,
126
- conversationally: bool = False,
127
- images=None,
128
- api_key: str = None,
129
- **kwargs
130
- ) -> Union[Dict[str, Any], Generator]:
131
- """
132
- Enhanced Copilot.ask with:
133
- - return_conversation support
134
- - multiple image upload
135
- - event dispatch for websocket events
136
- - suggested followups and metadata
137
- - improved error handling
138
- """
139
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
140
- if optimizer:
141
- if optimizer in self.__available_optimizers:
142
- conversation_prompt = getattr(Optimizers, optimizer)(
143
- conversation_prompt if conversationally else prompt
144
- )
145
- else:
146
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
147
-
148
- def handle_event(msg, state):
149
- event = msg.get("event")
150
- if event == "appendText":
151
- state["is_started"] = True
152
- content = msg.get("text")
153
- state["streaming_text"] += content
154
- resp = {"text": content}
155
- return resp if raw else resp
156
- elif event == "generatingImage":
157
- state["image_prompt"] = msg.get("prompt")
158
- elif event == "imageGenerated":
159
- return {"type": "image", "url": msg.get("url"), "prompt": state.get("image_prompt"), "preview": msg.get("thumbnailUrl")}
160
- elif event == "done":
161
- state["done"] = True
162
- elif event == "suggestedFollowups":
163
- return {"type": "suggested_followups", "suggestions": msg.get("suggestions")}
164
- elif event == "replaceText":
165
- content = msg.get("text")
166
- state["streaming_text"] += content
167
- resp = {"text": content}
168
- return resp if raw else resp
169
- elif event == "error":
170
- raise exceptions.FailedToGenerateResponseError(f"Error: {msg}")
171
- elif event not in ["received", "startMessage", "citation", "partCompleted"]:
172
- pass
173
- return None
174
-
175
- def for_stream():
176
- try:
177
- if not has_curl_cffi:
178
- raise Exception('Install or update "curl_cffi" package | pip install -U curl_cffi')
179
-
180
- websocket_url = self.websocket_url
181
- headers = None
182
-
183
- # Auth logic (token/cookies)
184
- if images is not None or api_key is not None:
185
- if api_key is not None:
186
- self._access_token = api_key
187
- if self._access_token is None:
188
- try:
189
- self._access_token, self._cookies = readHAR(self.url)
190
- except NoValidHarFileError as h:
191
- if has_nodriver:
192
- yield {"type": "login", "provider": self.label, "url": os.environ.get("webscout_login", "")}
193
- self._access_token, self._cookies = asyncio.run(get_access_token_and_cookies(self.url, self.proxies.get("https")))
194
- else:
195
- raise h
196
- websocket_url = f"{websocket_url}&accessToken={quote(self._access_token)}"
197
- headers = {"authorization": f"Bearer {self._access_token}"}
198
-
199
- with Session(
200
- timeout=self.timeout,
201
- proxy=self.proxies.get("https"),
202
- impersonate="chrome",
203
- headers=headers,
204
- cookies=self._cookies,
205
- ) as session:
206
- if self._access_token is not None:
207
- self._cookies = session.cookies.jar if hasattr(session.cookies, "jar") else session.cookies
208
-
209
- response = session.get(f"{self.url}/c/api/user")
210
- if response.status_code == 401:
211
- raise exceptions.AuthenticationError("Status 401: Invalid access token")
212
- if response.status_code != 200:
213
- raise exceptions.APIConnectionError(f"Status {response.status_code}: {response.text}")
214
- user = response.json().get('firstName')
215
- if user is None:
216
- self._access_token = None
217
-
218
- # Conversation management
219
- conversation = kwargs.get("conversation", None)
220
- if conversation is None:
221
- response = session.post(self.conversation_url)
222
- if response.status_code != 200:
223
- raise exceptions.APIConnectionError(f"Status {response.status_code}: {response.text}")
224
- conversation_id = response.json().get("id")
225
- conversation = CopilotConversation(conversation_id)
226
- if kwargs.get("return_conversation", False):
227
- yield conversation
228
- else:
229
- conversation_id = conversation.conversation_id
230
-
231
- # Multiple image upload
232
- uploaded_images = []
233
- if images is not None:
234
- for image_tuple in images:
235
- image = image_tuple[0] if isinstance(image_tuple, (tuple, list)) else image_tuple
236
- # Convert image to bytes if needed
237
- if isinstance(image, str):
238
- if image.startswith("data:"):
239
- header, encoded = image.split(",", 1)
240
- data = base64.b64decode(encoded)
241
- else:
242
- with open(image, "rb") as f:
243
- data = f.read()
244
- else:
245
- data = image
246
- # Get content type
247
- content_type = "image/jpeg"
248
- if data[:2] == b'\xff\xd8':
249
- content_type = "image/jpeg"
250
- elif data[:8] == b'\x89PNG\r\n\x1a\n':
251
- content_type = "image/png"
252
- elif data[:6] in (b'GIF87a', b'GIF89a'):
253
- content_type = "image/gif"
254
- elif data[:2] in (b'BM', b'BA'):
255
- content_type = "image/bmp"
256
- response = session.post(
257
- f"{self.url}/c/api/attachments",
258
- headers={"content-type": content_type},
259
- data=data
260
- )
261
- if response.status_code != 200:
262
- raise exceptions.APIConnectionError(f"Status {response.status_code}: {response.text}")
263
- uploaded_images.append({"type": "image", "url": response.json().get("url")})
264
-
265
- # WebSocket connection
266
- wss = session.ws_connect(websocket_url)
267
- wss.send(json.dumps({"event": "setOptions", "supportedCards": ["weather", "local", "image", "sports", "video", "ads", "finance"], "ads": {"supportedTypes": ["multimedia", "product", "tourActivity", "propertyPromotion", "text"]}}))
268
- if self.model == "Smart":
269
- mode_value = "smart"
270
- elif "Think" in self.model:
271
- mode_value = "reasoning"
272
- else:
273
- mode_value = "chat"
274
- wss.send(json.dumps({
275
- "event": "send",
276
- "conversationId": conversation_id,
277
- "content": [*uploaded_images, {
278
- "type": "text",
279
- "text": conversation_prompt,
280
- }],
281
- "mode": mode_value,
282
- "model": self.model
283
- }).encode(), CurlWsFlag.TEXT)
284
-
285
- # Event-driven response loop
286
- state = {"is_started": False, "image_prompt": None, "done": False, "streaming_text": ""}
287
- last_msg = None
288
- try:
289
- while not state["done"]:
290
- try:
291
- msg = wss.recv()[0]
292
- msg = json.loads(msg)
293
- except Exception:
294
- break
295
- last_msg = msg
296
- result = handle_event(msg, state)
297
- if result is not None:
298
- yield result
299
- if not state["is_started"]:
300
- raise exceptions.FailedToGenerateResponseError(f"Invalid response: {last_msg}")
301
- self.conversation.update_chat_history(prompt, state["streaming_text"])
302
- self.last_response = {"text": state["streaming_text"]}
303
- finally:
304
- wss.close()
305
- except Exception as e:
306
- raise exceptions.FailedToGenerateResponseError(f"Error: {str(e)}")
307
-
308
- def for_non_stream():
309
- streaming_text = ""
310
- for response in for_stream():
311
- if isinstance(response, dict) and "text" in response:
312
- streaming_text += response["text"]
313
- self.last_response = {"text": streaming_text}
314
- return self.last_response
315
-
316
- return for_stream() if stream else for_non_stream()
317
-
318
- def chat(
319
- self,
320
- prompt: str,
321
- stream: bool = True,
322
- optimizer: str = None,
323
- conversationally: bool = False,
324
- images = None,
325
- api_key: str = None,
326
- **kwargs
327
- ) -> Union[str, Generator]:
328
- def for_stream():
329
- for response in self.ask(prompt, True, optimizer=optimizer,
330
- conversationally=conversationally,
331
- images=images, api_key=api_key, **kwargs):
332
- if isinstance(response, dict):
333
- if "text" in response:
334
- yield response["text"]
335
- elif "type" in response:
336
- if response["type"] == "image":
337
- yield f"\n![Image]({response['url']})\n"
338
- elif response["type"] == "suggested_followups":
339
- yield "\nSuggested follow-up questions:\n"
340
- for suggestion in response["suggestions"]:
341
- yield f"- {suggestion}\n"
342
-
343
- def for_non_stream():
344
- response = self.ask(prompt, False, optimizer=optimizer,
345
- conversationally=conversationally,
346
- images=images, api_key=api_key, **kwargs)
347
- return self.get_message(response)
348
-
349
- return for_stream() if stream else for_non_stream()
350
-
351
- def get_message(self, response: dict) -> str:
352
- assert isinstance(response, dict), "Response should be of dict data-type only"
353
- return response.get("text", "")
354
-
355
-
356
- async def get_access_token_and_cookies(url: str, proxy: str = None, target: str = "ChatAI"):
357
- browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="copilot")
358
- try:
359
- page = await browser.get(url)
360
- access_token = None
361
- while access_token is None:
362
- access_token = await page.evaluate("""
363
- (() => {
364
- for (var i = 0; i < localStorage.length; i++) {
365
- try {
366
- item = JSON.parse(localStorage.getItem(localStorage.key(i)));
367
- if (item.credentialType == "AccessToken"
368
- && item.expiresOn > Math.floor(Date.now() / 1000)
369
- && item.target.includes("target")) {
370
- return item.secret;
371
- }
372
- } catch(e) {}
373
- }
374
- })()
375
- """.replace('"target"', json.dumps(target)))
376
- if access_token is None:
377
- await asyncio.sleep(1)
378
- cookies = {}
379
- for c in await page.send(nodriver.cdp.network.get_cookies([url])):
380
- cookies[c.name] = c.value
381
- await page.close()
382
- return access_token, cookies
383
- finally:
384
- stop_browser()
385
-
386
-
387
- def readHAR(url: str):
388
- api_key = None
389
- cookies = None
390
- har_files = []
391
- # Look for HAR files in common locations
392
- har_paths = [
393
- os.path.join(os.path.expanduser("~"), "Downloads"),
394
- os.path.join(os.path.expanduser("~"), "Desktop")
395
- ]
396
- for path in har_paths:
397
- if os.path.exists(path):
398
- for file in os.listdir(path):
399
- if file.endswith(".har"):
400
- har_files.append(os.path.join(path, file))
401
-
402
- for path in har_files:
403
- with open(path, 'rb') as file:
404
- try:
405
- harFile = json.loads(file.read())
406
- except json.JSONDecodeError:
407
- # Error: not a HAR file!
408
- continue
409
- for v in harFile['log']['entries']:
410
- if v['request']['url'].startswith(url):
411
- v_headers = {h['name'].lower(): h['value'] for h in v['request']['headers']}
412
- if "authorization" in v_headers:
413
- api_key = v_headers["authorization"].split(maxsplit=1).pop()
414
- if v['request']['cookies']:
415
- cookies = {c['name']: c['value'] for c in v['request']['cookies']}
416
- if api_key is None:
417
- raise NoValidHarFileError("No access token found in .har files")
418
-
419
- return api_key, cookies
420
-
421
-
422
- # def get_clarity() -> bytes:
423
- # body = base64.b64decode("H4sIAAAAAAAAA23RwU7DMAwG4HfJ2aqS2E5ibjxH1cMOnQYqYZvUTQPx7vyJRGGAemj01XWcP+9udg+j80MetDhSyrEISc5GrqrtZnmaTydHbrdUnSsWYT2u+8Obo0Ce/IQvaDBmjkwhUlKKIRNHmQgosqEArWPRDQMx90rxeUMPzB1j+UJvwNIxhTvsPcXyX1T+rizE4juK3mEEhpAUg/JvzW1/+U/tB1LATmhqotoiweMea50PLy2vui4LOY3XfD1dwnkor5fn/e18XBFgm6fHjSzZmCyV7d3aRByAEYextaTHEH3i5pgKGVP/s+DScE5PuLKIpW6FnCi1gY3Rbpqmj0/DI/+L7QEAAA==")
424
- # return body
425
-
426
-
427
- async def get_nodriver(proxy=None, user_data_dir=None):
428
- browser = await nodriver.Browser(
429
- headless=True,
430
- proxy=proxy,
431
- user_data_dir=user_data_dir
432
- )
433
- return browser, lambda: browser.close()
434
-
435
-
436
- if __name__ == "__main__":
437
- from rich import print
438
- ai = Copilot(timeout=900, model="gpt-5")
439
- response = ai.chat(input("> "), stream=True)
440
- for chunk in response:
441
- print(chunk, end="", flush=True)