webscout 4.6__py3-none-any.whl → 4.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Agents/functioncall.py +97 -37
- webscout/Bard.py +365 -0
- webscout/Local/_version.py +1 -1
- webscout/Provider/Andi.py +7 -1
- webscout/Provider/BasedGPT.py +11 -5
- webscout/Provider/Berlin4h.py +11 -5
- webscout/Provider/Blackboxai.py +10 -4
- webscout/Provider/Cohere.py +11 -5
- webscout/Provider/DARKAI.py +25 -7
- webscout/Provider/Deepinfra.py +2 -1
- webscout/Provider/Deepseek.py +25 -9
- webscout/Provider/DiscordRocks.py +389 -0
- webscout/Provider/{ChatGPTUK.py → Farfalle.py} +80 -67
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/Groq.py +244 -110
- webscout/Provider/Llama.py +13 -5
- webscout/Provider/Llama3.py +15 -2
- webscout/Provider/OLLAMA.py +8 -7
- webscout/Provider/Perplexity.py +422 -52
- webscout/Provider/Phind.py +6 -5
- webscout/Provider/PizzaGPT.py +7 -1
- webscout/Provider/__init__.py +15 -31
- webscout/Provider/ai4chat.py +193 -0
- webscout/Provider/koala.py +11 -5
- webscout/Provider/{VTLchat.py → liaobots.py} +120 -104
- webscout/Provider/meta.py +779 -0
- webscout/exceptions.py +6 -0
- webscout/version.py +1 -1
- webscout/webai.py +2 -64
- webscout/webscout_search.py +1 -1
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/METADATA +254 -297
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/RECORD +36 -40
- webscout/Provider/FreeGemini.py +0 -169
- webscout/Provider/Geminiflash.py +0 -152
- webscout/Provider/Geminipro.py +0 -152
- webscout/Provider/Leo.py +0 -469
- webscout/Provider/OpenGPT.py +0 -867
- webscout/Provider/Xjai.py +0 -230
- webscout/Provider/Yepchat.py +0 -478
- webscout/Provider/Youchat.py +0 -225
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/LICENSE.md +0 -0
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/WHEEL +0 -0
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/entry_points.txt +0 -0
- {webscout-4.6.dist-info → webscout-4.8.dist-info}/top_level.txt +0 -0
webscout/Agents/functioncall.py
CHANGED
|
@@ -3,14 +3,24 @@ import logging
|
|
|
3
3
|
from webscout import DeepInfra, WEBS
|
|
4
4
|
|
|
5
5
|
class FunctionCallingAgent:
|
|
6
|
-
def __init__(self, model: str = "Qwen/Qwen2-72B-Instruct",
|
|
7
|
-
|
|
6
|
+
def __init__(self, model: str = "Qwen/Qwen2-72B-Instruct",
|
|
7
|
+
system_prompt: str = 'You are a helpful assistant that will always answer what the user wants',
|
|
8
|
+
tools: list = None):
|
|
9
|
+
"""
|
|
10
|
+
Initialize the FunctionCallingAgent with the model, system prompt, and tools.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
model (str): The model to use for deepinfra chat.
|
|
14
|
+
system_prompt (str): The system prompt to initialize the model.
|
|
15
|
+
tools (list): A list of tools the agent can use.
|
|
16
|
+
"""
|
|
17
|
+
self.deepinfra = DeepInfra(model=model, system_prompt=system_prompt, timeout=300)
|
|
8
18
|
self.tools = tools if tools is not None else []
|
|
9
|
-
# logging.basicConfig(level=logging.INFO)
|
|
10
|
-
# self.webs = WEBS() # Initialize a WEBS object for web search
|
|
11
19
|
|
|
12
|
-
|
|
13
|
-
|
|
20
|
+
|
|
21
|
+
def function_call_handler(self, message_text: str) -> dict:
|
|
22
|
+
"""
|
|
23
|
+
Handles function calls based on the provided message text.
|
|
14
24
|
|
|
15
25
|
Args:
|
|
16
26
|
message_text (str): The input message text from the user.
|
|
@@ -18,31 +28,72 @@ class FunctionCallingAgent:
|
|
|
18
28
|
Returns:
|
|
19
29
|
dict: The extracted function call and arguments.
|
|
20
30
|
"""
|
|
21
|
-
system_message =
|
|
22
|
-
|
|
31
|
+
system_message = self._generate_system_message(message_text)
|
|
23
32
|
response = self.deepinfra.chat(system_message)
|
|
24
33
|
# logging.info(f"Raw response: {response}")
|
|
25
34
|
|
|
35
|
+
return self._parse_function_call(response)
|
|
36
|
+
|
|
37
|
+
def _generate_system_message(self, user_message: str) -> str:
|
|
38
|
+
"""
|
|
39
|
+
Generates a system message incorporating the user message and available tools.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
user_message (str): The input message from the user.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
str: The formatted system message.
|
|
46
|
+
"""
|
|
47
|
+
tools_description = '\n'.join([f"{tool['function']['name']}: {tool['function'].get('description', '')}" for tool in self.tools])
|
|
48
|
+
return (
|
|
49
|
+
f"[SYSTEM] You are a helpful and capable AI assistant. "
|
|
50
|
+
"Your goal is to understand the user's request and provide accurate and relevant information. "
|
|
51
|
+
"You have access to the following tools:\n\n"
|
|
52
|
+
f"{tools_description}\n\n"
|
|
53
|
+
"To use a tool, please follow this format:\n\n"
|
|
54
|
+
"```json\n"
|
|
55
|
+
"{{ 'tool_name': 'tool_name', 'tool_input': {{ 'arg_1': 'value_1', 'arg_2': 'value_2', ... }} }}\n"
|
|
56
|
+
"```\n\n"
|
|
57
|
+
f"[USER] {user_message}"
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def _parse_function_call(self, response: str) -> dict:
|
|
61
|
+
"""
|
|
62
|
+
Parses the response from the model to extract the function call.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
response (str): The raw response from the model.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
dict: A dictionary containing the function name and arguments.
|
|
69
|
+
"""
|
|
26
70
|
try:
|
|
27
|
-
#
|
|
71
|
+
# Find the JSON-like part of the response
|
|
28
72
|
start_idx = response.find("{")
|
|
29
73
|
end_idx = response.rfind("}") + 1
|
|
74
|
+
|
|
30
75
|
if start_idx == -1 or end_idx == -1:
|
|
31
|
-
raise ValueError("JSON
|
|
76
|
+
raise ValueError("No valid JSON structure found in the response.")
|
|
32
77
|
|
|
33
78
|
response_json_str = response[start_idx:end_idx]
|
|
34
|
-
|
|
35
|
-
|
|
79
|
+
|
|
80
|
+
# Replace single quotes with double quotes and remove extra braces
|
|
81
|
+
response_json_str = response_json_str.replace("'", '"')
|
|
82
|
+
response_json_str = response_json_str.replace("{{", "{").replace("}}", "}")
|
|
83
|
+
|
|
84
|
+
# Remove any leading or trailing whitespace
|
|
36
85
|
response_json_str = response_json_str.strip()
|
|
37
|
-
|
|
86
|
+
|
|
87
|
+
# Attempt to load the JSON string
|
|
88
|
+
return json.loads(response_json_str)
|
|
89
|
+
|
|
38
90
|
except (ValueError, json.JSONDecodeError) as e:
|
|
39
|
-
|
|
91
|
+
logging.error(f"Error parsing function call: {e}")
|
|
40
92
|
return {"error": str(e)}
|
|
41
93
|
|
|
42
|
-
return response_data
|
|
43
|
-
|
|
44
94
|
def execute_function(self, function_call_data: dict) -> str:
|
|
45
|
-
"""
|
|
95
|
+
"""
|
|
96
|
+
Executes the specified function with the provided arguments.
|
|
46
97
|
|
|
47
98
|
Args:
|
|
48
99
|
function_call_data (dict): A dictionary containing the function name and arguments.
|
|
@@ -50,28 +101,37 @@ class FunctionCallingAgent:
|
|
|
50
101
|
Returns:
|
|
51
102
|
str: The result of the function execution.
|
|
52
103
|
"""
|
|
53
|
-
function_name = function_call_data.get("
|
|
54
|
-
arguments = function_call_data.get("
|
|
104
|
+
function_name = function_call_data.get("tool_name") # Use 'tool_name' instead of 'name'
|
|
105
|
+
arguments = function_call_data.get("tool_input", {}) # Use 'tool_input' instead of 'arguments'
|
|
55
106
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
arguments_dict = json.loads(arguments)
|
|
59
|
-
except json.JSONDecodeError:
|
|
60
|
-
# logging.error("Failed to parse arguments as JSON.")
|
|
107
|
+
if not isinstance(arguments, dict):
|
|
108
|
+
logging.error("Invalid arguments format.")
|
|
61
109
|
return "Invalid arguments format."
|
|
62
110
|
|
|
63
|
-
|
|
111
|
+
logging.info(f"Executing function: {function_name} with arguments: {arguments}")
|
|
112
|
+
|
|
113
|
+
if function_name == "web_search":
|
|
114
|
+
return self._handle_web_search(arguments)
|
|
115
|
+
else:
|
|
116
|
+
return f"Function '{function_name}' is not implemented."
|
|
117
|
+
|
|
118
|
+
# def _handle_web_search(self, arguments: dict) -> str:
|
|
119
|
+
# """
|
|
120
|
+
# Handles web search queries using the WEBS tool.
|
|
121
|
+
|
|
122
|
+
# Args:
|
|
123
|
+
# arguments (dict): A dictionary containing the query argument.
|
|
124
|
+
|
|
125
|
+
# Returns:
|
|
126
|
+
# str: The result of the web search.
|
|
127
|
+
# """
|
|
128
|
+
# query = arguments.get("query")
|
|
129
|
+
# if not query:
|
|
130
|
+
# return "Please provide a search query."
|
|
64
131
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
# search_results = self.webs.text(query)
|
|
69
|
-
# # You can process the search results here, e.g., extract URLs, summarize, etc.
|
|
70
|
-
# return f"Here's what I found:\n\n{search_results}"
|
|
71
|
-
# else:
|
|
72
|
-
# return "Please provide a search query."
|
|
73
|
-
# else:
|
|
74
|
-
# return f"Function '{function_name}' is not yet implemented."
|
|
132
|
+
# search_results = self.webs.text(query)
|
|
133
|
+
# # Additional processing of search results can be done here if needed.
|
|
134
|
+
# return f"Here's what I found:\n\n{search_results}"
|
|
75
135
|
|
|
76
136
|
# Example usage
|
|
77
137
|
if __name__ == "__main__":
|
|
@@ -117,10 +177,10 @@ if __name__ == "__main__":
|
|
|
117
177
|
]
|
|
118
178
|
|
|
119
179
|
agent = FunctionCallingAgent(tools=tools)
|
|
120
|
-
message = "
|
|
180
|
+
message = "websearch about helpingai-9b"
|
|
121
181
|
function_call_data = agent.function_call_handler(message)
|
|
122
182
|
print(f"Function Call Data: {function_call_data}")
|
|
123
183
|
|
|
124
184
|
if "error" not in function_call_data:
|
|
125
185
|
result = agent.execute_function(function_call_data)
|
|
126
|
-
|
|
186
|
+
print(f"Function Execution Result: {result}")
|
webscout/Bard.py
ADDED
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import asyncio
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import random
|
|
6
|
+
import re
|
|
7
|
+
import string
|
|
8
|
+
import sys
|
|
9
|
+
from typing import Dict
|
|
10
|
+
from typing import List
|
|
11
|
+
|
|
12
|
+
import httpx
|
|
13
|
+
from prompt_toolkit import prompt
|
|
14
|
+
from prompt_toolkit import PromptSession
|
|
15
|
+
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
|
16
|
+
from prompt_toolkit.completion import WordCompleter
|
|
17
|
+
from prompt_toolkit.history import InMemoryHistory
|
|
18
|
+
from prompt_toolkit.key_binding import KeyBindings
|
|
19
|
+
from rich.console import Console
|
|
20
|
+
from rich.markdown import Markdown
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def __create_session() -> PromptSession:
|
|
24
|
+
return PromptSession(history=InMemoryHistory())
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def __create_completer(commands: list, pattern_str: str = "$") -> WordCompleter:
|
|
28
|
+
return WordCompleter(words=commands, pattern=re.compile(pattern_str))
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def __get_input(
|
|
32
|
+
prompt_sess: PromptSession = None,
|
|
33
|
+
completer: WordCompleter = None,
|
|
34
|
+
key_bindings: KeyBindings = None,
|
|
35
|
+
) -> str:
|
|
36
|
+
"""
|
|
37
|
+
Multiline input function.
|
|
38
|
+
"""
|
|
39
|
+
return (
|
|
40
|
+
prompt_sess.prompt(
|
|
41
|
+
completer=completer,
|
|
42
|
+
multiline=True,
|
|
43
|
+
auto_suggest=AutoSuggestFromHistory(),
|
|
44
|
+
key_bindings=key_bindings,
|
|
45
|
+
)
|
|
46
|
+
if prompt_sess
|
|
47
|
+
else prompt(multiline=True)
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class Chatbot:
|
|
52
|
+
"""
|
|
53
|
+
Synchronous wrapper for the AsyncChatbot class.
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
secure_1psid: str,
|
|
59
|
+
secure_1psidts: str,
|
|
60
|
+
proxy: dict = None,
|
|
61
|
+
timeout: int = 20,
|
|
62
|
+
):
|
|
63
|
+
self.loop = asyncio.get_event_loop()
|
|
64
|
+
self.async_chatbot = self.loop.run_until_complete(
|
|
65
|
+
AsyncChatbot.create(secure_1psid, secure_1psidts, proxy, timeout),
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def save_conversation(self, file_path: str, conversation_name: str):
|
|
69
|
+
return self.loop.run_until_complete(
|
|
70
|
+
self.async_chatbot.save_conversation(file_path, conversation_name),
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
def load_conversations(self, file_path: str) -> List[Dict]:
|
|
74
|
+
return self.loop.run_until_complete(
|
|
75
|
+
self.async_chatbot.load_conversations(file_path),
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
def load_conversation(self, file_path: str, conversation_name: str) -> bool:
|
|
79
|
+
return self.loop.run_until_complete(
|
|
80
|
+
self.async_chatbot.load_conversation(file_path, conversation_name),
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
def ask(self, message: str) -> dict:
|
|
84
|
+
return self.loop.run_until_complete(self.async_chatbot.ask(message))
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class AsyncChatbot:
|
|
88
|
+
"""
|
|
89
|
+
A class to interact with Google Gemini.
|
|
90
|
+
Parameters
|
|
91
|
+
session: str
|
|
92
|
+
The __Secure_1PSID cookie.
|
|
93
|
+
session_ts: str
|
|
94
|
+
The __secure_1psidts cookie.
|
|
95
|
+
proxy: str
|
|
96
|
+
timeout: int
|
|
97
|
+
Request timeout in seconds.
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
__slots__ = [
|
|
101
|
+
"headers",
|
|
102
|
+
"_reqid",
|
|
103
|
+
"SNlM0e",
|
|
104
|
+
"conversation_id",
|
|
105
|
+
"response_id",
|
|
106
|
+
"choice_id",
|
|
107
|
+
"proxy",
|
|
108
|
+
"secure_1psidts",
|
|
109
|
+
"secure_1psid",
|
|
110
|
+
"session",
|
|
111
|
+
"timeout",
|
|
112
|
+
]
|
|
113
|
+
|
|
114
|
+
def __init__(
|
|
115
|
+
self,
|
|
116
|
+
secure_1psid: str,
|
|
117
|
+
secure_1psidts: str,
|
|
118
|
+
proxy: dict = None,
|
|
119
|
+
timeout: int = 20,
|
|
120
|
+
):
|
|
121
|
+
"""Constructor
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
secure_1psid (str): __Secure-1PSID cookie value
|
|
125
|
+
secure_1psidts (str): __Secure-1PSIDTS cookie value
|
|
126
|
+
proxy (dict, optional): Http request proxy. Defaults to None.
|
|
127
|
+
timeout (int, optional): htpp request timeout. Defaults to 20.
|
|
128
|
+
"""
|
|
129
|
+
headers = {
|
|
130
|
+
"Content-Type": "application/x-www-form-urlencoded;charset=utf-8",
|
|
131
|
+
"Host": "gemini.google.com",
|
|
132
|
+
"Origin": "https://gemini.google.com",
|
|
133
|
+
"Referer": "https://gemini.google.com/",
|
|
134
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
135
|
+
"X-Same-Domain": "1",
|
|
136
|
+
}
|
|
137
|
+
self._reqid = int("".join(random.choices(string.digits, k=4)))
|
|
138
|
+
self.proxy = proxy
|
|
139
|
+
self.conversation_id = ""
|
|
140
|
+
self.response_id = ""
|
|
141
|
+
self.choice_id = ""
|
|
142
|
+
self.secure_1psid = secure_1psid
|
|
143
|
+
self.secure_1psidts = secure_1psidts
|
|
144
|
+
self.session = httpx.AsyncClient(proxies=self.proxy)
|
|
145
|
+
self.session.headers = headers
|
|
146
|
+
self.session.cookies.set("__Secure-1PSID", secure_1psid)
|
|
147
|
+
self.session.cookies.set("__Secure-1PSIDTS", secure_1psidts)
|
|
148
|
+
self.timeout = timeout
|
|
149
|
+
|
|
150
|
+
@classmethod
|
|
151
|
+
async def create(
|
|
152
|
+
cls,
|
|
153
|
+
secure_1psid: str,
|
|
154
|
+
secure_1psidts: str,
|
|
155
|
+
proxy: dict = None,
|
|
156
|
+
timeout: int = 20,
|
|
157
|
+
) -> "AsyncChatbot":
|
|
158
|
+
"""
|
|
159
|
+
Async constructor.
|
|
160
|
+
"""
|
|
161
|
+
instance = cls(secure_1psid, secure_1psidts, proxy, timeout)
|
|
162
|
+
instance.SNlM0e = await instance.__get_snlm0e()
|
|
163
|
+
return instance
|
|
164
|
+
|
|
165
|
+
async def save_conversation(self, file_path: str, conversation_name: str) -> None:
|
|
166
|
+
"""
|
|
167
|
+
Saves conversation to the file
|
|
168
|
+
:param file_path: file to save (json)
|
|
169
|
+
:param conversation_name: any name of current conversation (unique one)
|
|
170
|
+
:return: None
|
|
171
|
+
"""
|
|
172
|
+
# Load conversations from file
|
|
173
|
+
conversations = await self.load_conversations(file_path)
|
|
174
|
+
|
|
175
|
+
# Update existing one
|
|
176
|
+
conversation_exists = False
|
|
177
|
+
for conversation in conversations:
|
|
178
|
+
if conversation["conversation_name"] == conversation_name:
|
|
179
|
+
conversation["conversation_name"] = conversation_name
|
|
180
|
+
conversation["_reqid"] = self._reqid
|
|
181
|
+
conversation["conversation_id"] = self.conversation_id
|
|
182
|
+
conversation["response_id"] = self.response_id
|
|
183
|
+
conversation["choice_id"] = self.choice_id
|
|
184
|
+
conversation["SNlM0e"] = self.SNlM0e
|
|
185
|
+
conversation_exists = True
|
|
186
|
+
|
|
187
|
+
# Create conversation object
|
|
188
|
+
if not conversation_exists:
|
|
189
|
+
conversation = {
|
|
190
|
+
"conversation_name": conversation_name,
|
|
191
|
+
"_reqid": self._reqid,
|
|
192
|
+
"conversation_id": self.conversation_id,
|
|
193
|
+
"response_id": self.response_id,
|
|
194
|
+
"choice_id": self.choice_id,
|
|
195
|
+
"SNlM0e": self.SNlM0e,
|
|
196
|
+
}
|
|
197
|
+
conversations.append(conversation)
|
|
198
|
+
|
|
199
|
+
# Save to the file
|
|
200
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
201
|
+
json.dump(conversations, f, indent=4)
|
|
202
|
+
|
|
203
|
+
async def load_conversations(self, file_path: str) -> List[Dict]:
|
|
204
|
+
# Check if file exists
|
|
205
|
+
if not os.path.isfile(file_path):
|
|
206
|
+
return []
|
|
207
|
+
with open(file_path, encoding="utf-8") as f:
|
|
208
|
+
return json.load(f)
|
|
209
|
+
|
|
210
|
+
async def load_conversation(self, file_path: str, conversation_name: str) -> bool:
|
|
211
|
+
"""
|
|
212
|
+
Loads a conversation from history file. Returns whether the conversation was found
|
|
213
|
+
:param file_path: File with conversations (json)
|
|
214
|
+
:param conversation_name: unique conversation name
|
|
215
|
+
:return: True if the conversation was found
|
|
216
|
+
"""
|
|
217
|
+
conversations = await self.load_conversations(file_path)
|
|
218
|
+
for conversation in conversations:
|
|
219
|
+
if conversation["conversation_name"] == conversation_name:
|
|
220
|
+
self._reqid = conversation["_reqid"]
|
|
221
|
+
self.conversation_id = conversation["conversation_id"]
|
|
222
|
+
self.response_id = conversation["response_id"]
|
|
223
|
+
self.choice_id = conversation["choice_id"]
|
|
224
|
+
self.SNlM0e = conversation["SNlM0e"]
|
|
225
|
+
return True
|
|
226
|
+
return False
|
|
227
|
+
|
|
228
|
+
async def __get_snlm0e(self):
|
|
229
|
+
# Find "SNlM0e":"<ID>"
|
|
230
|
+
if (
|
|
231
|
+
not (self.secure_1psid and self.secure_1psidts)
|
|
232
|
+
or self.secure_1psid[:2] != "g."
|
|
233
|
+
):
|
|
234
|
+
raise Exception(
|
|
235
|
+
"Enter correct __Secure_1PSID and __Secure_1PSIDTS value. __Secure_1PSID value must start with a g dot (g.). ",
|
|
236
|
+
)
|
|
237
|
+
resp = await self.session.get(
|
|
238
|
+
"https://gemini.google.com/app",
|
|
239
|
+
timeout=10,
|
|
240
|
+
follow_redirects=True,
|
|
241
|
+
)
|
|
242
|
+
if resp.status_code != 200:
|
|
243
|
+
raise Exception(
|
|
244
|
+
f"Response code not 200. Response Status is {resp.status_code}",
|
|
245
|
+
)
|
|
246
|
+
SNlM0e = re.search(r'"SNlM0e":"(.*?)"', resp.text)
|
|
247
|
+
if not SNlM0e:
|
|
248
|
+
raise Exception(
|
|
249
|
+
"SNlM0e value not found in response. Check __Secure_1PSID value."
|
|
250
|
+
"\nNOTE : The cookies expire after a short period; ensure you update them as frequent as possible."
|
|
251
|
+
f" Failed with status {resp.status_code} - {resp.reason_phrase}",
|
|
252
|
+
)
|
|
253
|
+
return SNlM0e.group(1)
|
|
254
|
+
|
|
255
|
+
async def ask(self, message: str) -> dict:
|
|
256
|
+
"""
|
|
257
|
+
Send a message to Google Gemini and return the response.
|
|
258
|
+
:param message: The message to send to Google Gemini.
|
|
259
|
+
:return: A dict containing the response from Google Gemini.
|
|
260
|
+
"""
|
|
261
|
+
# url params
|
|
262
|
+
params = {
|
|
263
|
+
"bl": "boq_assistant-bard-web-server_20230713.13_p0",
|
|
264
|
+
"_reqid": str(self._reqid),
|
|
265
|
+
"rt": "c",
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
# message arr -> data["f.req"]. Message is double json stringified
|
|
269
|
+
message_struct = [
|
|
270
|
+
[message],
|
|
271
|
+
None,
|
|
272
|
+
[self.conversation_id, self.response_id, self.choice_id],
|
|
273
|
+
]
|
|
274
|
+
data = {
|
|
275
|
+
"f.req": json.dumps([None, json.dumps(message_struct)]),
|
|
276
|
+
"at": self.SNlM0e,
|
|
277
|
+
}
|
|
278
|
+
resp = await self.session.post(
|
|
279
|
+
"https://gemini.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate",
|
|
280
|
+
params=params,
|
|
281
|
+
data=data,
|
|
282
|
+
timeout=self.timeout,
|
|
283
|
+
)
|
|
284
|
+
chat_data = json.loads(resp.content.splitlines()[3])[0][2]
|
|
285
|
+
if not chat_data:
|
|
286
|
+
return {"content": f"Gemini encountered an error: {resp.content}."}
|
|
287
|
+
json_chat_data = json.loads(chat_data)
|
|
288
|
+
images = []
|
|
289
|
+
if len(json_chat_data) >= 3:
|
|
290
|
+
if len(json_chat_data[4][0]) >= 4:
|
|
291
|
+
if json_chat_data[4][0][4]:
|
|
292
|
+
for img in json_chat_data[4][0][4]:
|
|
293
|
+
images.append(img[0][0][0])
|
|
294
|
+
results = {
|
|
295
|
+
"content": json_chat_data[4][0][1][0],
|
|
296
|
+
"conversation_id": json_chat_data[1][0],
|
|
297
|
+
"response_id": json_chat_data[1][1],
|
|
298
|
+
"factualityQueries": json_chat_data[3],
|
|
299
|
+
"textQuery": json_chat_data[2][0] if json_chat_data[2] is not None else "",
|
|
300
|
+
"choices": [{"id": i[0], "content": i[1]} for i in json_chat_data[4]],
|
|
301
|
+
"images": images,
|
|
302
|
+
}
|
|
303
|
+
self.conversation_id = results["conversation_id"]
|
|
304
|
+
self.response_id = results["response_id"]
|
|
305
|
+
self.choice_id = results["choices"][0]["id"]
|
|
306
|
+
self._reqid += 100000
|
|
307
|
+
return results
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
if __name__ == "__main__":
|
|
311
|
+
import sys
|
|
312
|
+
console = Console()
|
|
313
|
+
if os.getenv("Gemini_QUICK"):
|
|
314
|
+
Secure_1PSID = os.getenv("Gemini__Secure_1PSID")
|
|
315
|
+
secure_1psidts = os.getenv("Gemini__secure_1psidts")
|
|
316
|
+
if not (Secure_1PSID and secure_1psidts):
|
|
317
|
+
print(
|
|
318
|
+
"Gemini__Secure_1PSID or Gemini__secure_1psidts environment variable not set.",
|
|
319
|
+
)
|
|
320
|
+
sys.exit(1)
|
|
321
|
+
chatbot = Chatbot(Secure_1PSID, secure_1psidts)
|
|
322
|
+
# Join arguments into a single string
|
|
323
|
+
MESSAGE = " ".join(sys.argv[1:])
|
|
324
|
+
response = chatbot.ask(MESSAGE)
|
|
325
|
+
console.print(Markdown(response["content"]))
|
|
326
|
+
console.print(response["images"] if response.get("images") else "")
|
|
327
|
+
sys.exit(0)
|
|
328
|
+
parser = argparse.ArgumentParser()
|
|
329
|
+
parser.add_argument(
|
|
330
|
+
"--session",
|
|
331
|
+
help="__Secure-1PSID cookie",
|
|
332
|
+
type=str,
|
|
333
|
+
required=True,
|
|
334
|
+
)
|
|
335
|
+
parser.add_argument(
|
|
336
|
+
"--session_ts",
|
|
337
|
+
help="__secure_1psidts cookie.",
|
|
338
|
+
type=str,
|
|
339
|
+
required=True,
|
|
340
|
+
)
|
|
341
|
+
args = parser.parse_args()
|
|
342
|
+
|
|
343
|
+
chatbot = Chatbot(args.session, args.session_ts)
|
|
344
|
+
prompt_session = __create_session()
|
|
345
|
+
completions = __create_completer(["!exit", "!reset"])
|
|
346
|
+
|
|
347
|
+
try:
|
|
348
|
+
while True:
|
|
349
|
+
console.print("You:")
|
|
350
|
+
user_prompt = __get_input(prompt_sess=prompt_session, completer=completions)
|
|
351
|
+
console.print()
|
|
352
|
+
if user_prompt == "!exit":
|
|
353
|
+
break
|
|
354
|
+
elif user_prompt == "!reset":
|
|
355
|
+
chatbot.conversation_id = ""
|
|
356
|
+
chatbot.response_id = ""
|
|
357
|
+
chatbot.choice_id = ""
|
|
358
|
+
continue
|
|
359
|
+
print("Google Gemini:")
|
|
360
|
+
response = chatbot.ask(user_prompt)
|
|
361
|
+
console.print(Markdown(response["content"]))
|
|
362
|
+
console.print(response["images"] if response.get("images") else "")
|
|
363
|
+
print()
|
|
364
|
+
except KeyboardInterrupt:
|
|
365
|
+
print("Exiting...")
|
webscout/Local/_version.py
CHANGED
webscout/Provider/Andi.py
CHANGED
|
@@ -272,4 +272,10 @@ class AndiSearch(Provider):
|
|
|
272
272
|
str: Message extracted
|
|
273
273
|
"""
|
|
274
274
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
275
|
-
return response["text"]
|
|
275
|
+
return response["text"]
|
|
276
|
+
if __name__ == '__main__':
|
|
277
|
+
from rich import print
|
|
278
|
+
ai = AndiSearch()
|
|
279
|
+
response = ai.chat("tell me about india")
|
|
280
|
+
for chunk in response:
|
|
281
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/BasedGPT.py
CHANGED
|
@@ -19,10 +19,10 @@ import io
|
|
|
19
19
|
import re
|
|
20
20
|
import json
|
|
21
21
|
import yaml
|
|
22
|
-
from
|
|
23
|
-
from
|
|
24
|
-
from
|
|
25
|
-
from
|
|
22
|
+
from webscout.AIutel import Optimizers
|
|
23
|
+
from webscout.AIutel import Conversation
|
|
24
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
26
26
|
from webscout import exceptions
|
|
27
27
|
from typing import Any, AsyncGenerator, Dict
|
|
28
28
|
import logging
|
|
@@ -225,4 +225,10 @@ class BasedGPT(Provider):
|
|
|
225
225
|
str: Message extracted
|
|
226
226
|
"""
|
|
227
227
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
228
|
-
return response["text"]
|
|
228
|
+
return response["text"]
|
|
229
|
+
if __name__ == '__main__':
|
|
230
|
+
from rich import print
|
|
231
|
+
ai = BasedGPT()
|
|
232
|
+
response = ai.chat("tell me about india")
|
|
233
|
+
for chunk in response:
|
|
234
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Berlin4h.py
CHANGED
|
@@ -2,10 +2,10 @@ import requests
|
|
|
2
2
|
import json
|
|
3
3
|
import uuid
|
|
4
4
|
from typing import Any, Dict, Optional
|
|
5
|
-
from
|
|
6
|
-
from
|
|
7
|
-
from
|
|
8
|
-
from
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
9
9
|
from webscout import exceptions
|
|
10
10
|
|
|
11
11
|
class Berlin4h(Provider):
|
|
@@ -208,4 +208,10 @@ class Berlin4h(Provider):
|
|
|
208
208
|
str: Message extracted
|
|
209
209
|
"""
|
|
210
210
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
211
|
-
return response["text"]
|
|
211
|
+
return response["text"]
|
|
212
|
+
if __name__ == '__main__':
|
|
213
|
+
from rich import print
|
|
214
|
+
ai = Berlin4h()
|
|
215
|
+
response = ai.chat("tell me about india")
|
|
216
|
+
for chunk in response:
|
|
217
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Blackboxai.py
CHANGED
|
@@ -19,10 +19,10 @@ import io
|
|
|
19
19
|
import re
|
|
20
20
|
import json
|
|
21
21
|
import yaml
|
|
22
|
-
from
|
|
23
|
-
from
|
|
24
|
-
from
|
|
25
|
-
from
|
|
22
|
+
from webscout.AIutel import Optimizers
|
|
23
|
+
from webscout.AIutel import Conversation
|
|
24
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
26
26
|
from Helpingai_T2 import Perplexity
|
|
27
27
|
from webscout import exceptions
|
|
28
28
|
from typing import Any, AsyncGenerator, Dict
|
|
@@ -442,3 +442,9 @@ def clean_response(response_text: str) -> str:
|
|
|
442
442
|
# Remove any remaining special characters or markers
|
|
443
443
|
response_text = re.sub(r'\$~~~', '', response_text)
|
|
444
444
|
return response_text
|
|
445
|
+
if __name__ == '__main__':
|
|
446
|
+
from rich import print
|
|
447
|
+
ai = BLACKBOXAI()
|
|
448
|
+
response = ai.chat("tell me about india")
|
|
449
|
+
for chunk in response:
|
|
450
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Cohere.py
CHANGED
|
@@ -19,10 +19,10 @@ import io
|
|
|
19
19
|
import re
|
|
20
20
|
import json
|
|
21
21
|
import yaml
|
|
22
|
-
from
|
|
23
|
-
from
|
|
24
|
-
from
|
|
25
|
-
from
|
|
22
|
+
from webscout.AIutel import Optimizers
|
|
23
|
+
from webscout.AIutel import Conversation
|
|
24
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
26
26
|
from Helpingai_T2 import Perplexity
|
|
27
27
|
from webscout import exceptions
|
|
28
28
|
from typing import Any, AsyncGenerator, Dict
|
|
@@ -220,4 +220,10 @@ class Cohere(Provider):
|
|
|
220
220
|
str: Message extracted
|
|
221
221
|
"""
|
|
222
222
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
223
|
-
return response["result"]["chatStreamEndEvent"]["response"]["text"]
|
|
223
|
+
return response["result"]["chatStreamEndEvent"]["response"]["text"]
|
|
224
|
+
if __name__ == '__main__':
|
|
225
|
+
from rich import print
|
|
226
|
+
ai = Cohere(api_key="")
|
|
227
|
+
response = ai.chat("tell me about india")
|
|
228
|
+
for chunk in response:
|
|
229
|
+
print(chunk, end="", flush=True)
|