webscout 5.4__py3-none-any.whl → 5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/Agents/Onlinesearcher.py +3 -3
- webscout/Agents/__init__.py +0 -1
- webscout/Agents/functioncall.py +3 -3
- webscout/Provider/Bing.py +243 -0
- webscout/Provider/Chatify.py +1 -1
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/DARKAI.py +1 -1
- webscout/Provider/DiscordRocks.py +109 -246
- webscout/Provider/Farfalle.py +1 -1
- webscout/Provider/Free2GPT.py +234 -0
- webscout/{Agents/ai.py → Provider/GPTWeb.py} +40 -33
- webscout/Provider/Llama3.py +65 -62
- webscout/Provider/OLLAMA.py +1 -1
- webscout/Provider/PizzaGPT.py +1 -1
- webscout/Provider/RUBIKSAI.py +13 -3
- webscout/Provider/Reka.py +0 -1
- webscout/Provider/TTI/Nexra.py +120 -0
- webscout/Provider/TTI/__init__.py +4 -1
- webscout/Provider/TTI/aiforce.py +137 -0
- webscout/Provider/TTI/blackboximage.py +153 -0
- webscout/Provider/TTI/deepinfra.py +2 -2
- webscout/Provider/TeachAnything.py +1 -1
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +11 -6
- webscout/Provider/{NetFly.py → aigames.py} +76 -79
- webscout/Provider/cleeai.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/felo_search.py +1 -1
- webscout/Provider/genspark.py +1 -1
- webscout/Provider/julius.py +7 -1
- webscout/Provider/lepton.py +1 -1
- webscout/Provider/meta.py +1 -1
- webscout/Provider/turboseek.py +1 -1
- webscout/Provider/upstage.py +230 -0
- webscout/Provider/x0gpt.py +1 -1
- webscout/Provider/xdash.py +1 -1
- webscout/Provider/yep.py +2 -2
- webscout/tempid.py +46 -2
- webscout/version.py +1 -1
- webscout/webai.py +1 -1
- webscout/webscout_search_async.py +9 -9
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/METADATA +7 -30
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/RECORD +47 -42
- webscout/Provider/ThinkAnyAI.py +0 -219
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/LICENSE.md +0 -0
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/WHEEL +0 -0
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/entry_points.txt +0 -0
- {webscout-5.4.dist-info → webscout-5.6.dist-info}/top_level.txt +0 -0
|
@@ -1,24 +1,17 @@
|
|
|
1
1
|
import requests
|
|
2
|
-
|
|
3
|
-
from random import randint
|
|
4
|
-
|
|
2
|
+
import uuid
|
|
5
3
|
import json
|
|
6
4
|
|
|
7
5
|
from webscout.AIutel import Optimizers
|
|
8
6
|
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts
|
|
10
|
-
from webscout.AIbase import Provider
|
|
11
|
-
from webscout import exceptions
|
|
12
|
-
from typing import Any, AsyncGenerator, Dict
|
|
13
|
-
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
14
9
|
|
|
15
|
-
class
|
|
10
|
+
class AIGameIO(Provider):
|
|
16
11
|
"""
|
|
17
|
-
A class to interact with the
|
|
12
|
+
A class to interact with the AI-Game.io API.
|
|
18
13
|
"""
|
|
19
14
|
|
|
20
|
-
AVAILABLE_MODELS = ["gpt-3.5-turbo"]
|
|
21
|
-
|
|
22
15
|
def __init__(
|
|
23
16
|
self,
|
|
24
17
|
is_conversation: bool = True,
|
|
@@ -30,11 +23,10 @@ class NetFly(Provider):
|
|
|
30
23
|
proxies: dict = {},
|
|
31
24
|
history_offset: int = 10250,
|
|
32
25
|
act: str = None,
|
|
33
|
-
|
|
34
|
-
system_prompt: str = "You are a helpful and friendly AI assistant.",
|
|
26
|
+
system_prompt: str = "You are a Helpful ai"
|
|
35
27
|
):
|
|
36
28
|
"""
|
|
37
|
-
Initializes the
|
|
29
|
+
Initializes the AI-Game.io API with given parameters.
|
|
38
30
|
|
|
39
31
|
Args:
|
|
40
32
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
@@ -46,36 +38,26 @@ class NetFly(Provider):
|
|
|
46
38
|
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
47
39
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
48
40
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
49
|
-
|
|
50
|
-
|
|
41
|
+
system_prompt (str, optional): System prompt for AI-Game.io.
|
|
42
|
+
Defaults to "You are a Helpful ai".
|
|
51
43
|
"""
|
|
52
|
-
if model not in self.AVAILABLE_MODELS:
|
|
53
|
-
raise ValueError(f"Invalid model: {model}. Available model is: {self.AVAILABLE_MODELS[0]}")
|
|
54
|
-
|
|
55
44
|
self.session = requests.Session()
|
|
56
45
|
self.is_conversation = is_conversation
|
|
57
46
|
self.max_tokens_to_sample = max_tokens
|
|
58
|
-
self.api_endpoint =
|
|
47
|
+
self.api_endpoint = 'https://stream-chat-blmeirpipa-uc.a.run.app/streamChat'
|
|
59
48
|
self.stream_chunk_size = 64
|
|
60
49
|
self.timeout = timeout
|
|
61
50
|
self.last_response = {}
|
|
62
|
-
self.model = model
|
|
63
51
|
self.system_prompt = system_prompt
|
|
64
52
|
self.headers = {
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
"sec-ch-ua-mobile": "?0",
|
|
74
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
75
|
-
"sec-fetch-dest": "empty",
|
|
76
|
-
"sec-fetch-mode": "cors",
|
|
77
|
-
"sec-fetch-site": "same-origin",
|
|
78
|
-
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
|
|
53
|
+
'authority': 'stream-chat-blmeirpipa-uc.a.run.app',
|
|
54
|
+
'method': 'POST',
|
|
55
|
+
'path': '/streamChat',
|
|
56
|
+
'accept': 'text/event-stream',
|
|
57
|
+
'content-type': 'application/json',
|
|
58
|
+
'origin': 'https://www.ai-game.io',
|
|
59
|
+
'priority': 'u=1, i',
|
|
60
|
+
'referer': 'https://www.ai-game.io/',
|
|
79
61
|
}
|
|
80
62
|
|
|
81
63
|
self.__available_optimizers = (
|
|
@@ -105,6 +87,22 @@ class NetFly(Provider):
|
|
|
105
87
|
optimizer: str = None,
|
|
106
88
|
conversationally: bool = False,
|
|
107
89
|
) -> dict:
|
|
90
|
+
"""Chat with AI
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
prompt (str): Prompt to be send.
|
|
94
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
95
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
96
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
97
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
98
|
+
Returns:
|
|
99
|
+
dict : {}
|
|
100
|
+
```json
|
|
101
|
+
{
|
|
102
|
+
"text" : "How may I assist you today?"
|
|
103
|
+
}
|
|
104
|
+
```
|
|
105
|
+
"""
|
|
108
106
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
109
107
|
if optimizer:
|
|
110
108
|
if optimizer in self.__available_optimizers:
|
|
@@ -115,58 +113,46 @@ class NetFly(Provider):
|
|
|
115
113
|
raise Exception(
|
|
116
114
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
117
115
|
)
|
|
118
|
-
|
|
116
|
+
|
|
119
117
|
payload = {
|
|
120
|
-
"
|
|
121
|
-
{
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
118
|
+
"history": [
|
|
119
|
+
{
|
|
120
|
+
"role": "system",
|
|
121
|
+
"content": self.system_prompt
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
"role": "user",
|
|
125
|
+
"content": conversation_prompt
|
|
126
|
+
}
|
|
127
|
+
]
|
|
130
128
|
}
|
|
131
|
-
|
|
132
129
|
def for_stream():
|
|
133
130
|
response = self.session.post(
|
|
134
|
-
self.api_endpoint,
|
|
131
|
+
self.api_endpoint, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout
|
|
135
132
|
)
|
|
136
|
-
|
|
137
133
|
if not response.ok:
|
|
138
|
-
raise
|
|
139
|
-
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
134
|
+
raise Exception(
|
|
135
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
140
136
|
)
|
|
141
137
|
|
|
142
|
-
full_response =
|
|
138
|
+
full_response = ''
|
|
143
139
|
for line in response.iter_lines(decode_unicode=True):
|
|
144
|
-
if line:
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
if
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
full_response += content
|
|
153
|
-
yield content if raw else dict(text=content)
|
|
154
|
-
except json.decoder.JSONDecodeError:
|
|
155
|
-
continue
|
|
156
|
-
|
|
140
|
+
if line.startswith("data: "):
|
|
141
|
+
try:
|
|
142
|
+
event_data = json.loads(line[6:])
|
|
143
|
+
if event_data['event'] == 'text-chunk':
|
|
144
|
+
full_response += event_data['data']['text']
|
|
145
|
+
yield event_data['data']['text'] if raw else dict(text=full_response)
|
|
146
|
+
except json.JSONDecodeError:
|
|
147
|
+
pass
|
|
157
148
|
self.last_response.update(dict(text=full_response))
|
|
158
149
|
self.conversation.update_chat_history(
|
|
159
150
|
prompt, self.get_message(self.last_response)
|
|
160
151
|
)
|
|
161
|
-
|
|
162
152
|
def for_non_stream():
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
full_response += chunk['text']
|
|
167
|
-
else:
|
|
168
|
-
full_response += chunk
|
|
169
|
-
return dict(text=full_response)
|
|
153
|
+
for _ in for_stream():
|
|
154
|
+
pass
|
|
155
|
+
return self.last_response
|
|
170
156
|
|
|
171
157
|
return for_stream() if stream else for_non_stream()
|
|
172
158
|
|
|
@@ -177,6 +163,16 @@ class NetFly(Provider):
|
|
|
177
163
|
optimizer: str = None,
|
|
178
164
|
conversationally: bool = False,
|
|
179
165
|
) -> str:
|
|
166
|
+
"""Generate response `str`
|
|
167
|
+
Args:
|
|
168
|
+
prompt (str): Prompt to be send.
|
|
169
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
170
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
171
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
172
|
+
Returns:
|
|
173
|
+
str: Response generated
|
|
174
|
+
"""
|
|
175
|
+
|
|
180
176
|
def for_stream():
|
|
181
177
|
for response in self.ask(
|
|
182
178
|
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
@@ -207,10 +203,11 @@ class NetFly(Provider):
|
|
|
207
203
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
208
204
|
return response["text"]
|
|
209
205
|
|
|
210
|
-
|
|
206
|
+
|
|
207
|
+
if __name__ == "__main__":
|
|
211
208
|
from rich import print
|
|
212
|
-
|
|
213
|
-
|
|
209
|
+
|
|
210
|
+
ai = AIGameIO()
|
|
211
|
+
response = ai.chat("hi")
|
|
214
212
|
for chunk in response:
|
|
215
|
-
print(chunk, end="", flush=True)
|
|
216
|
-
print() # Add a newline at the end
|
|
213
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/cleeai.py
CHANGED
webscout/Provider/elmo.py
CHANGED
webscout/Provider/felo_search.py
CHANGED
webscout/Provider/genspark.py
CHANGED
webscout/Provider/julius.py
CHANGED
|
@@ -19,6 +19,12 @@ class Julius(Provider):
|
|
|
19
19
|
"Command R",
|
|
20
20
|
"Gemini Flash",
|
|
21
21
|
"Gemini 1.5",
|
|
22
|
+
"Claude Sonnet",
|
|
23
|
+
"Claude Opus",
|
|
24
|
+
"Claude Haiku",
|
|
25
|
+
"GPT-4",
|
|
26
|
+
"GPT-4o mini",
|
|
27
|
+
"Command R+",
|
|
22
28
|
]
|
|
23
29
|
def __init__(
|
|
24
30
|
self,
|
|
@@ -241,6 +247,6 @@ class Julius(Provider):
|
|
|
241
247
|
if __name__ == '__main__':
|
|
242
248
|
from rich import print
|
|
243
249
|
ai = Julius()
|
|
244
|
-
response = ai.chat(
|
|
250
|
+
response = ai.chat("hi")
|
|
245
251
|
for chunk in response:
|
|
246
252
|
print(chunk, end="", flush=True)
|
webscout/Provider/lepton.py
CHANGED
webscout/Provider/meta.py
CHANGED
webscout/Provider/turboseek.py
CHANGED
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
|
|
11
|
+
class Upstage(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the Upstage API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
AVAILABLE_MODELS = [
|
|
17
|
+
"upstage/solar-1-mini-chat",
|
|
18
|
+
"upstage/solar-1-mini-chat-ja",
|
|
19
|
+
"solar-pro"
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
is_conversation: bool = True,
|
|
25
|
+
max_tokens: int = 600,
|
|
26
|
+
timeout: int = 30,
|
|
27
|
+
intro: str = None,
|
|
28
|
+
filepath: str = None,
|
|
29
|
+
update_file: bool = True,
|
|
30
|
+
proxies: dict = {},
|
|
31
|
+
history_offset: int = 10250,
|
|
32
|
+
act: str = None,
|
|
33
|
+
model: str = "upstage/solar-1-mini-chat",
|
|
34
|
+
) -> None:
|
|
35
|
+
"""
|
|
36
|
+
Initializes the Upstage API with given parameters.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
40
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion.
|
|
41
|
+
Defaults to 600.
|
|
42
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
43
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
44
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
45
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
46
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
47
|
+
history_offset (int, optional): Limit conversation history to this number of last texts.
|
|
48
|
+
Defaults to 10250.
|
|
49
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
50
|
+
model (str, optional): AI model to use. Defaults to "upstage/solar-1-mini-chat".
|
|
51
|
+
Available models: "upstage/solar-1-mini-chat",
|
|
52
|
+
"upstage/solar-1-mini-chat-ja",
|
|
53
|
+
"solar-pro"
|
|
54
|
+
"""
|
|
55
|
+
if model not in self.AVAILABLE_MODELS:
|
|
56
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
57
|
+
|
|
58
|
+
self.session = requests.Session()
|
|
59
|
+
self.is_conversation = is_conversation
|
|
60
|
+
self.max_tokens_to_sample = max_tokens
|
|
61
|
+
self.api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
|
|
62
|
+
self.stream_chunk_size = 64
|
|
63
|
+
self.timeout = timeout
|
|
64
|
+
self.last_response = {}
|
|
65
|
+
self.model = model
|
|
66
|
+
self.headers = {
|
|
67
|
+
"Content-Type": "application/json",
|
|
68
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
|
|
69
|
+
"Origin": "https://console.upstage.ai",
|
|
70
|
+
"Referer": "https://console.upstage.ai/"
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
self.__available_optimizers = (
|
|
74
|
+
method
|
|
75
|
+
for method in dir(Optimizers)
|
|
76
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
77
|
+
)
|
|
78
|
+
self.session.headers.update(self.headers)
|
|
79
|
+
Conversation.intro = (
|
|
80
|
+
AwesomePrompts().get_act(
|
|
81
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
82
|
+
)
|
|
83
|
+
if act
|
|
84
|
+
else intro or Conversation.intro
|
|
85
|
+
)
|
|
86
|
+
self.conversation = Conversation(
|
|
87
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
88
|
+
)
|
|
89
|
+
self.conversation.history_offset = history_offset
|
|
90
|
+
self.session.proxies = proxies
|
|
91
|
+
|
|
92
|
+
def ask(
|
|
93
|
+
self,
|
|
94
|
+
prompt: str,
|
|
95
|
+
stream: bool = False,
|
|
96
|
+
raw: bool = False,
|
|
97
|
+
optimizer: str = None,
|
|
98
|
+
conversationally: bool = False,
|
|
99
|
+
) -> Dict[str, Any]:
|
|
100
|
+
"""
|
|
101
|
+
Sends a prompt to the Upstage API and returns the response.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
prompt: The text prompt to generate text from.
|
|
105
|
+
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
106
|
+
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
107
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
108
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
The response from the API.
|
|
112
|
+
"""
|
|
113
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
|
+
if optimizer:
|
|
115
|
+
if optimizer in self.__available_optimizers:
|
|
116
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
|
+
conversation_prompt if conversationally else prompt
|
|
118
|
+
)
|
|
119
|
+
else:
|
|
120
|
+
raise Exception(
|
|
121
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
payload = {
|
|
125
|
+
"stream": True,
|
|
126
|
+
"messages": [
|
|
127
|
+
{
|
|
128
|
+
"role": "user",
|
|
129
|
+
"content": conversation_prompt
|
|
130
|
+
}
|
|
131
|
+
],
|
|
132
|
+
"model": self.model
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
def for_stream():
|
|
136
|
+
response = self.session.post(
|
|
137
|
+
self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
if not response.ok:
|
|
141
|
+
# If 'solar-pro' fails, try mini-chat model
|
|
142
|
+
if self.model == "solar-pro":
|
|
143
|
+
print("solar-pro failed. Trying 'upstage/solar-1-mini-chat'...")
|
|
144
|
+
self.model = "upstage/solar-1-mini-chat"
|
|
145
|
+
return self.ask(prompt, stream, raw, optimizer, conversationally) # Retry with mini-chat
|
|
146
|
+
|
|
147
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
148
|
+
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
streaming_response = ""
|
|
152
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
153
|
+
if line:
|
|
154
|
+
if line.startswith("data: "):
|
|
155
|
+
data = line[6:] # Remove 'data: ' prefix
|
|
156
|
+
if data != "[DONE]":
|
|
157
|
+
try:
|
|
158
|
+
json_data = json.loads(data)
|
|
159
|
+
content = json_data['choices'][0]['delta'].get('content', '')
|
|
160
|
+
if content:
|
|
161
|
+
streaming_response += content
|
|
162
|
+
yield content if raw else dict(text=streaming_response)
|
|
163
|
+
except json.JSONDecodeError:
|
|
164
|
+
print(f"Error decoding JSON: {data}")
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
self.last_response.update(dict(text=streaming_response))
|
|
168
|
+
self.conversation.update_chat_history(
|
|
169
|
+
prompt, self.get_message(self.last_response)
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
def for_non_stream():
|
|
173
|
+
for _ in for_stream():
|
|
174
|
+
pass
|
|
175
|
+
return self.last_response
|
|
176
|
+
|
|
177
|
+
return for_stream() if stream else for_non_stream()
|
|
178
|
+
|
|
179
|
+
def chat(
|
|
180
|
+
self,
|
|
181
|
+
prompt: str,
|
|
182
|
+
stream: bool = False,
|
|
183
|
+
optimizer: str = None,
|
|
184
|
+
conversationally: bool = False,
|
|
185
|
+
) -> str:
|
|
186
|
+
"""Generate response `str`
|
|
187
|
+
Args:
|
|
188
|
+
prompt (str): Prompt to be send.
|
|
189
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
190
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
191
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
192
|
+
Returns:
|
|
193
|
+
str: Response generated
|
|
194
|
+
"""
|
|
195
|
+
|
|
196
|
+
def for_stream():
|
|
197
|
+
for response in self.ask(
|
|
198
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
199
|
+
):
|
|
200
|
+
yield self.get_message(response)
|
|
201
|
+
|
|
202
|
+
def for_non_stream():
|
|
203
|
+
return self.get_message(
|
|
204
|
+
self.ask(
|
|
205
|
+
prompt,
|
|
206
|
+
False,
|
|
207
|
+
optimizer=optimizer,
|
|
208
|
+
conversationally=conversationally,
|
|
209
|
+
)
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
return for_stream() if stream else for_non_stream()
|
|
213
|
+
|
|
214
|
+
def get_message(self, response: dict) -> str:
|
|
215
|
+
"""Retrieves message only from response
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
response (dict): Response generated by `self.ask`
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
str: Message extracted
|
|
222
|
+
"""
|
|
223
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
224
|
+
return response["text"]
|
|
225
|
+
if __name__ == '__main__':
|
|
226
|
+
from rich import print
|
|
227
|
+
ai = Upstage()
|
|
228
|
+
response = ai.chat("hi")
|
|
229
|
+
for chunk in response:
|
|
230
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/x0gpt.py
CHANGED
webscout/Provider/xdash.py
CHANGED
webscout/Provider/yep.py
CHANGED
|
@@ -467,7 +467,7 @@ if __name__ == "__main__":
|
|
|
467
467
|
else:
|
|
468
468
|
return "No results found for your query."
|
|
469
469
|
|
|
470
|
-
ai = YEPCHAT(Tools=
|
|
470
|
+
ai = YEPCHAT(Tools=False)
|
|
471
471
|
|
|
472
472
|
ai.tool_registry.register_tool("get_current_time", get_current_time, "Gets the current time.")
|
|
473
473
|
ai.tool_registry.register_tool(
|
|
@@ -495,6 +495,6 @@ if __name__ == "__main__":
|
|
|
495
495
|
},
|
|
496
496
|
)
|
|
497
497
|
|
|
498
|
-
response = ai.chat(
|
|
498
|
+
response = ai.chat("hi")
|
|
499
499
|
for chunk in response:
|
|
500
500
|
print(chunk, end="", flush=True)
|
webscout/tempid.py
CHANGED
|
@@ -3,7 +3,8 @@ from dataclasses import dataclass
|
|
|
3
3
|
from bs4 import BeautifulSoup
|
|
4
4
|
import tls_client
|
|
5
5
|
import random
|
|
6
|
-
|
|
6
|
+
from typing import ClassVar, NoReturn, List, Dict, Any
|
|
7
|
+
import requests
|
|
7
8
|
|
|
8
9
|
@dataclass
|
|
9
10
|
class DomainModel:
|
|
@@ -148,7 +149,50 @@ class TemporaryPhoneNumber:
|
|
|
148
149
|
message = {"content": None, "frm": "", "time": ""}
|
|
149
150
|
|
|
150
151
|
return messages
|
|
151
|
-
|
|
152
|
+
|
|
153
|
+
class VNEngine:
|
|
154
|
+
def __init__(self) -> NoReturn:
|
|
155
|
+
self.lang: str = "?lang=en"
|
|
156
|
+
self.base: str = "https://onlinesim.io/"
|
|
157
|
+
self.endpoint: str = "api/v1/free_numbers_content/"
|
|
158
|
+
self.country_url: str = f"{self.base}{self.endpoint}countries"
|
|
159
|
+
|
|
160
|
+
def get_online_countries(self) -> List[Dict[str, str]]:
|
|
161
|
+
response: Any = requests.get(url=self.country_url).json()
|
|
162
|
+
if response["response"] == "1":
|
|
163
|
+
all_countries: List[Dict[str, str]] = response["counties"]
|
|
164
|
+
online_countries: List[Dict[str, str]] = list(
|
|
165
|
+
filter(lambda x: x["online"] == True, all_countries)
|
|
166
|
+
)
|
|
167
|
+
return online_countries
|
|
168
|
+
return []
|
|
169
|
+
|
|
170
|
+
def get_country_numbers(self, country: str) -> List[Dict[str, str]]:
|
|
171
|
+
numbers_url: str = f"{self.country_url}/{country}{self.lang}"
|
|
172
|
+
response: Any = requests.get(url=numbers_url).json()
|
|
173
|
+
if response["response"] == "1":
|
|
174
|
+
numbers: List[Dict[str, str]] = list(
|
|
175
|
+
map(lambda x: {"data_humans": x["data_humans"], "full_number": x["full_number"]}, response["numbers"])
|
|
176
|
+
)
|
|
177
|
+
return numbers
|
|
178
|
+
return []
|
|
179
|
+
|
|
180
|
+
def get_number_inbox(self, country: str, number: str) -> Dict[str, str]:
|
|
181
|
+
number_detail_url: str = f"{self.country_url}/{country}/{number}{self.lang}"
|
|
182
|
+
response: Any = requests.get(url=number_detail_url).json()
|
|
183
|
+
if response["response"] != "1" or not response["online"]:
|
|
184
|
+
print(f"Error: Unable to retrieve inbox messages for {country} - {number}")
|
|
185
|
+
return {}
|
|
186
|
+
|
|
187
|
+
messages: List[Dict[str, str]] = []
|
|
188
|
+
for msg_data in response["messages"]["data"]:
|
|
189
|
+
try:
|
|
190
|
+
msg = {"data_humans": msg_data["data_humans"], "text": msg_data["text"]}
|
|
191
|
+
messages.append(msg)
|
|
192
|
+
except KeyError as e:
|
|
193
|
+
print(f"Warning: Missing key '{str(e)}' in message data")
|
|
194
|
+
|
|
195
|
+
return {"messages": messages}
|
|
152
196
|
|
|
153
197
|
class sms_message:
|
|
154
198
|
def __init__(self, content, frm, time):
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "5.
|
|
1
|
+
__version__ = "5.6"
|
|
2
2
|
__prog__ = "webscout"
|