webscout 7.8__py3-none-any.whl → 8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Bard.py +5 -25
- webscout/DWEBS.py +476 -476
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -103
- webscout/Extra/__init__.py +2 -0
- webscout/Extra/autocoder/__init__.py +1 -1
- webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
- webscout/Extra/tempmail/__init__.py +26 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +156 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Provider/AISEARCH/__init__.py +5 -1
- webscout/Provider/AISEARCH/hika_search.py +194 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +320 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/AllenAI.py +255 -122
- webscout/Provider/DeepSeek.py +1 -2
- webscout/Provider/Deepinfra.py +296 -286
- webscout/Provider/ElectronHub.py +709 -716
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +28 -6
- webscout/Provider/Gemini.py +167 -165
- webscout/Provider/GithubChat.py +2 -1
- webscout/Provider/Groq.py +38 -24
- webscout/Provider/LambdaChat.py +2 -1
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OpenGPT.py +199 -0
- webscout/Provider/PI.py +39 -24
- webscout/Provider/TextPollinationsAI.py +232 -230
- webscout/Provider/Youchat.py +326 -296
- webscout/Provider/__init__.py +10 -4
- webscout/Provider/ai4chat.py +58 -56
- webscout/Provider/akashgpt.py +34 -22
- webscout/Provider/copilot.py +427 -427
- webscout/Provider/freeaichat.py +9 -2
- webscout/Provider/labyrinth.py +121 -20
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/scira_chat.py +271 -0
- webscout/Provider/typefully.py +280 -0
- webscout/Provider/uncovr.py +312 -299
- webscout/Provider/yep.py +64 -12
- webscout/__init__.py +38 -36
- webscout/cli.py +293 -293
- webscout/conversation.py +350 -17
- webscout/litprinter/__init__.py +59 -667
- webscout/optimizers.py +419 -419
- webscout/update_checker.py +14 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1346 -1282
- webscout/webscout_search_async.py +877 -813
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/METADATA +44 -39
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/RECORD +63 -46
- webscout/Provider/DARKAI.py +0 -225
- webscout/Provider/EDITEE.py +0 -192
- webscout/litprinter/colors.py +0 -54
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/LICENSE.md +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/WHEEL +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/entry_points.txt +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
import re
|
|
5
|
+
from typing import Any, Dict, Optional, Union
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
class SciraAI(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the Scira AI chat API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
AVAILABLE_MODELS = {
|
|
19
|
+
"scira-default": "Grok3",
|
|
20
|
+
"scira-grok-3-mini": "Grok3-mini", # thinking model
|
|
21
|
+
"scira-vision" : "Grok2-Vision", # vision model
|
|
22
|
+
"scira-claude": "Sonnet-3.7",
|
|
23
|
+
"scira-optimus": "optimus",
|
|
24
|
+
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
is_conversation: bool = True,
|
|
30
|
+
max_tokens: int = 2049,
|
|
31
|
+
timeout: int = 30,
|
|
32
|
+
intro: str = None,
|
|
33
|
+
filepath: str = None,
|
|
34
|
+
update_file: bool = True,
|
|
35
|
+
proxies: dict = {},
|
|
36
|
+
history_offset: int = 10250,
|
|
37
|
+
act: str = None,
|
|
38
|
+
model: str = "scira-default",
|
|
39
|
+
chat_id: str = None,
|
|
40
|
+
user_id: str = None,
|
|
41
|
+
browser: str = "chrome"
|
|
42
|
+
):
|
|
43
|
+
"""Initializes the Scira AI API client.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
is_conversation (bool): Whether to maintain conversation history.
|
|
47
|
+
max_tokens (int): Maximum number of tokens to generate.
|
|
48
|
+
timeout (int): Request timeout in seconds.
|
|
49
|
+
intro (str): Introduction text for the conversation.
|
|
50
|
+
filepath (str): Path to save conversation history.
|
|
51
|
+
update_file (bool): Whether to update the conversation history file.
|
|
52
|
+
proxies (dict): Proxy configuration for requests.
|
|
53
|
+
history_offset (int): Maximum history length in characters.
|
|
54
|
+
act (str): Persona for the AI to adopt.
|
|
55
|
+
model (str): Model to use, must be one of AVAILABLE_MODELS.
|
|
56
|
+
chat_id (str): Unique identifier for the chat session.
|
|
57
|
+
user_id (str): Unique identifier for the user.
|
|
58
|
+
browser (str): Browser to emulate in requests.
|
|
59
|
+
|
|
60
|
+
"""
|
|
61
|
+
if model not in self.AVAILABLE_MODELS:
|
|
62
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
63
|
+
|
|
64
|
+
self.url = "https://scira.ai/api/search"
|
|
65
|
+
|
|
66
|
+
# Initialize LitAgent for user agent generation
|
|
67
|
+
self.agent = LitAgent()
|
|
68
|
+
# Use fingerprinting to create a consistent browser identity
|
|
69
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
70
|
+
|
|
71
|
+
# Use the fingerprint for headers
|
|
72
|
+
self.headers = {
|
|
73
|
+
"Accept": self.fingerprint["accept"],
|
|
74
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
75
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
76
|
+
"Content-Type": "application/json",
|
|
77
|
+
"Origin": "https://scira.ai",
|
|
78
|
+
"Referer": "https://scira.ai/",
|
|
79
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
80
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
81
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
82
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
83
|
+
"Sec-Fetch-Dest": "empty",
|
|
84
|
+
"Sec-Fetch-Mode": "cors",
|
|
85
|
+
"Sec-Fetch-Site": "same-origin"
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
self.session = requests.Session()
|
|
89
|
+
self.session.headers.update(self.headers)
|
|
90
|
+
self.session.proxies.update(proxies)
|
|
91
|
+
|
|
92
|
+
self.is_conversation = is_conversation
|
|
93
|
+
self.max_tokens_to_sample = max_tokens
|
|
94
|
+
self.timeout = timeout
|
|
95
|
+
self.last_response = {}
|
|
96
|
+
self.model = model
|
|
97
|
+
self.chat_id = chat_id or str(uuid.uuid4())
|
|
98
|
+
self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
|
|
99
|
+
|
|
100
|
+
# Always use chat mode (no web search)
|
|
101
|
+
self.search_mode = "chat"
|
|
102
|
+
|
|
103
|
+
self.__available_optimizers = (
|
|
104
|
+
method
|
|
105
|
+
for method in dir(Optimizers)
|
|
106
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
107
|
+
)
|
|
108
|
+
Conversation.intro = (
|
|
109
|
+
AwesomePrompts().get_act(
|
|
110
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
111
|
+
)
|
|
112
|
+
if act
|
|
113
|
+
else intro or Conversation.intro
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
self.conversation = Conversation(
|
|
117
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
118
|
+
)
|
|
119
|
+
self.conversation.history_offset = history_offset
|
|
120
|
+
|
|
121
|
+
def refresh_identity(self, browser: str = None):
|
|
122
|
+
"""
|
|
123
|
+
Refreshes the browser identity fingerprint.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
browser: Specific browser to use for the new fingerprint
|
|
127
|
+
"""
|
|
128
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
129
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
130
|
+
|
|
131
|
+
# Update headers with new fingerprint
|
|
132
|
+
self.headers.update({
|
|
133
|
+
"Accept": self.fingerprint["accept"],
|
|
134
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
135
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
136
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
137
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
138
|
+
})
|
|
139
|
+
|
|
140
|
+
# Update session headers
|
|
141
|
+
for header, value in self.headers.items():
|
|
142
|
+
self.session.headers[header] = value
|
|
143
|
+
|
|
144
|
+
return self.fingerprint
|
|
145
|
+
|
|
146
|
+
def ask(
|
|
147
|
+
self,
|
|
148
|
+
prompt: str,
|
|
149
|
+
optimizer: str = None,
|
|
150
|
+
conversationally: bool = False,
|
|
151
|
+
) -> Dict[str, Any]:
|
|
152
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
153
|
+
if optimizer:
|
|
154
|
+
if optimizer in self.__available_optimizers:
|
|
155
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
156
|
+
conversation_prompt if conversationally else prompt
|
|
157
|
+
)
|
|
158
|
+
else:
|
|
159
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
160
|
+
|
|
161
|
+
# Prepare the request payload
|
|
162
|
+
payload = {
|
|
163
|
+
"id": self.chat_id,
|
|
164
|
+
"messages": [
|
|
165
|
+
{
|
|
166
|
+
"role": "user",
|
|
167
|
+
"content": conversation_prompt,
|
|
168
|
+
"parts": [{"type": "text", "text": conversation_prompt}]
|
|
169
|
+
}
|
|
170
|
+
],
|
|
171
|
+
"model": self.model,
|
|
172
|
+
"group": "chat", # Always use chat mode (no web search)
|
|
173
|
+
"user_id": self.user_id,
|
|
174
|
+
"timezone": "Asia/Calcutta"
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
response = self.session.post(self.url, json=payload, timeout=self.timeout)
|
|
179
|
+
if response.status_code != 200:
|
|
180
|
+
# Try to get response content for better error messages
|
|
181
|
+
try:
|
|
182
|
+
error_content = response.text
|
|
183
|
+
except:
|
|
184
|
+
error_content = "<could not read response content>"
|
|
185
|
+
|
|
186
|
+
if response.status_code in [403, 429]:
|
|
187
|
+
print(f"Received status code {response.status_code}, refreshing identity...")
|
|
188
|
+
self.refresh_identity()
|
|
189
|
+
response = self.session.post(self.url, json=payload, timeout=self.timeout)
|
|
190
|
+
if not response.ok:
|
|
191
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
192
|
+
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
|
|
193
|
+
)
|
|
194
|
+
print("Identity refreshed successfully.")
|
|
195
|
+
else:
|
|
196
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
197
|
+
f"Request failed with status code {response.status_code}. Response: {error_content}"
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
full_response = ""
|
|
201
|
+
debug_lines = []
|
|
202
|
+
|
|
203
|
+
# Collect the first few lines for debugging
|
|
204
|
+
for i, line in enumerate(response.iter_lines()):
|
|
205
|
+
if line:
|
|
206
|
+
try:
|
|
207
|
+
line_str = line.decode('utf-8')
|
|
208
|
+
debug_lines.append(line_str)
|
|
209
|
+
|
|
210
|
+
# Format 2: 0:"content" (quoted format)
|
|
211
|
+
match = re.search(r'0:"(.*?)"', line_str)
|
|
212
|
+
if match:
|
|
213
|
+
content = match.group(1)
|
|
214
|
+
full_response += content
|
|
215
|
+
continue
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
except: pass
|
|
219
|
+
self.last_response = {"text": full_response}
|
|
220
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
221
|
+
return {"text": full_response}
|
|
222
|
+
except Exception as e:
|
|
223
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
224
|
+
|
|
225
|
+
def chat(
|
|
226
|
+
self,
|
|
227
|
+
prompt: str,
|
|
228
|
+
optimizer: str = None,
|
|
229
|
+
conversationally: bool = False,
|
|
230
|
+
) -> str:
|
|
231
|
+
return self.get_message(
|
|
232
|
+
self.ask(
|
|
233
|
+
prompt, optimizer=optimizer, conversationally=conversationally
|
|
234
|
+
)
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
def get_message(self, response: dict) -> str:
|
|
238
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
239
|
+
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
240
|
+
|
|
241
|
+
if __name__ == "__main__":
|
|
242
|
+
print("-" * 100)
|
|
243
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
244
|
+
print("-" * 100)
|
|
245
|
+
|
|
246
|
+
test_prompt = "Say 'Hello' in one word"
|
|
247
|
+
|
|
248
|
+
# Test each model
|
|
249
|
+
for model in SciraAI.AVAILABLE_MODELS:
|
|
250
|
+
print(f"\rTesting {model}...", end="")
|
|
251
|
+
|
|
252
|
+
try:
|
|
253
|
+
test_ai = SciraAI(model=model, timeout=120) # Increased timeout
|
|
254
|
+
response = test_ai.chat(test_prompt)
|
|
255
|
+
|
|
256
|
+
if response and len(response.strip()) > 0:
|
|
257
|
+
status = "✓"
|
|
258
|
+
# Clean and truncate response
|
|
259
|
+
clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
260
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
261
|
+
else:
|
|
262
|
+
status = "✗"
|
|
263
|
+
display_text = "Empty or invalid response"
|
|
264
|
+
|
|
265
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
266
|
+
except Exception as e:
|
|
267
|
+
error_msg = str(e)
|
|
268
|
+
# Truncate very long error messages
|
|
269
|
+
if len(error_msg) > 100:
|
|
270
|
+
error_msg = error_msg[:97] + "..."
|
|
271
|
+
print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
|
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
from typing import Union, Any, Dict
|
|
2
|
+
import requests
|
|
3
|
+
import re
|
|
4
|
+
from uuid import uuid4
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
class TypefullyAI(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the Typefully AI API.
|
|
16
|
+
|
|
17
|
+
Attributes:
|
|
18
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
19
|
+
model (str): The model identifier to use for completions.
|
|
20
|
+
output_length (int): Maximum length of the generated output.
|
|
21
|
+
|
|
22
|
+
Examples:
|
|
23
|
+
>>> from webscout.Provider.typefully import TypefullyAI
|
|
24
|
+
>>> ai = TypefullyAI()
|
|
25
|
+
>>> response = ai.chat("What's the weather today?")
|
|
26
|
+
>>> print(response)
|
|
27
|
+
'The weather today is sunny with a high of 75°F.'
|
|
28
|
+
"""
|
|
29
|
+
AVAILABLE_MODELS = ["openai:gpt-4o-mini", "openai:gpt-4o", "anthropic:claude-3-5-haiku-20241022", "groq:llama-3.3-70b-versatile"]
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
is_conversation: bool = True,
|
|
34
|
+
max_tokens: int = 600,
|
|
35
|
+
timeout: int = 30,
|
|
36
|
+
intro: str = None,
|
|
37
|
+
filepath: str = None,
|
|
38
|
+
update_file: bool = True,
|
|
39
|
+
proxies: dict = {},
|
|
40
|
+
history_offset: int = 10250,
|
|
41
|
+
act: str = None,
|
|
42
|
+
system_prompt: str = "You're a helpful assistant.",
|
|
43
|
+
model: str = "openai:gpt-4o-mini",
|
|
44
|
+
):
|
|
45
|
+
"""
|
|
46
|
+
Initializes the TypefullyAI API with given parameters.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
is_conversation (bool): Whether the provider is in conversation mode.
|
|
50
|
+
max_tokens (int): Maximum number of tokens to sample.
|
|
51
|
+
timeout (int): Timeout for API requests.
|
|
52
|
+
intro (str): Introduction message for the conversation.
|
|
53
|
+
filepath (str): Filepath for storing conversation history.
|
|
54
|
+
update_file (bool): Whether to update the conversation history file.
|
|
55
|
+
proxies (dict): Proxies for the API requests.
|
|
56
|
+
history_offset (int): Offset for conversation history.
|
|
57
|
+
act (str): Act for the conversation.
|
|
58
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
59
|
+
model (str): The model identifier to use.
|
|
60
|
+
|
|
61
|
+
Examples:
|
|
62
|
+
>>> ai = TypefullyAI(system_prompt="You are a friendly assistant.")
|
|
63
|
+
>>> print(ai.system_prompt)
|
|
64
|
+
'You are a friendly assistant.'
|
|
65
|
+
"""
|
|
66
|
+
self.session = requests.Session()
|
|
67
|
+
self.is_conversation = is_conversation
|
|
68
|
+
self.max_tokens_to_sample = max_tokens
|
|
69
|
+
self.api_endpoint = "https://typefully.com/tools/ai/api/completion"
|
|
70
|
+
self.timeout = timeout
|
|
71
|
+
self.last_response = {}
|
|
72
|
+
self.system_prompt = system_prompt
|
|
73
|
+
self.model = model
|
|
74
|
+
self.output_length = max_tokens
|
|
75
|
+
|
|
76
|
+
# Initialize LitAgent for user agent generation
|
|
77
|
+
self.agent = LitAgent()
|
|
78
|
+
|
|
79
|
+
self.headers = {
|
|
80
|
+
"authority": "typefully.com",
|
|
81
|
+
"accept": "*/*",
|
|
82
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
83
|
+
"accept-language": "en-US,en;q=0.9",
|
|
84
|
+
"content-type": "application/json",
|
|
85
|
+
"dnt": "1",
|
|
86
|
+
"origin": "https://typefully.com",
|
|
87
|
+
"referer": "https://typefully.com/tools/ai/chat-gpt-alternative",
|
|
88
|
+
"sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
89
|
+
"sec-ch-ua-mobile": "?0",
|
|
90
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
91
|
+
"user-agent": self.agent.random() # Use LitAgent to generate a random user agent
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
self.__available_optimizers = (
|
|
95
|
+
method
|
|
96
|
+
for method in dir(Optimizers)
|
|
97
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
98
|
+
)
|
|
99
|
+
self.session.headers.update(self.headers)
|
|
100
|
+
Conversation.intro = (
|
|
101
|
+
AwesomePrompts().get_act(
|
|
102
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
103
|
+
)
|
|
104
|
+
if act
|
|
105
|
+
else intro or Conversation.intro
|
|
106
|
+
)
|
|
107
|
+
self.conversation = Conversation(
|
|
108
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
109
|
+
)
|
|
110
|
+
self.conversation.history_offset = history_offset
|
|
111
|
+
self.session.proxies = proxies
|
|
112
|
+
|
|
113
|
+
def ask(
|
|
114
|
+
self,
|
|
115
|
+
prompt: str,
|
|
116
|
+
stream: bool = False,
|
|
117
|
+
raw: bool = False,
|
|
118
|
+
optimizer: str = None,
|
|
119
|
+
conversationally: bool = False,
|
|
120
|
+
) -> Dict[str, Any]:
|
|
121
|
+
"""
|
|
122
|
+
Sends a prompt to the Typefully AI API and returns the response.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
prompt (str): The prompt to send to the API.
|
|
126
|
+
stream (bool): Whether to stream the response.
|
|
127
|
+
raw (bool): Whether to return the raw response.
|
|
128
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
129
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
Dict[str, Any]: The API response.
|
|
133
|
+
|
|
134
|
+
Examples:
|
|
135
|
+
>>> ai = TypefullyAI()
|
|
136
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
137
|
+
>>> print(response)
|
|
138
|
+
{'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
|
|
139
|
+
"""
|
|
140
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
141
|
+
if optimizer:
|
|
142
|
+
if optimizer in self.__available_optimizers:
|
|
143
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
144
|
+
conversation_prompt if conversationally else prompt
|
|
145
|
+
)
|
|
146
|
+
else:
|
|
147
|
+
raise Exception(
|
|
148
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
payload = {
|
|
152
|
+
"prompt": conversation_prompt,
|
|
153
|
+
"systemPrompt": self.system_prompt,
|
|
154
|
+
"modelIdentifier": self.model,
|
|
155
|
+
"outputLength": self.output_length
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
def for_stream():
|
|
159
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
|
|
160
|
+
if not response.ok:
|
|
161
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
162
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
163
|
+
)
|
|
164
|
+
streaming_response = ""
|
|
165
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
166
|
+
if line:
|
|
167
|
+
match = re.search(r'0:"(.*?)"', line)
|
|
168
|
+
if match:
|
|
169
|
+
content = match.group(1)
|
|
170
|
+
streaming_response += content
|
|
171
|
+
yield content if raw else dict(text=content)
|
|
172
|
+
elif line.startswith('e:') or line.startswith('d:'):
|
|
173
|
+
# End of response
|
|
174
|
+
break
|
|
175
|
+
self.last_response.update(dict(text=streaming_response))
|
|
176
|
+
self.conversation.update_chat_history(
|
|
177
|
+
prompt, self.get_message(self.last_response)
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
def for_non_stream():
|
|
181
|
+
for _ in for_stream():
|
|
182
|
+
pass
|
|
183
|
+
return self.last_response
|
|
184
|
+
|
|
185
|
+
return for_stream() if stream else for_non_stream()
|
|
186
|
+
|
|
187
|
+
def chat(
|
|
188
|
+
self,
|
|
189
|
+
prompt: str,
|
|
190
|
+
stream: bool = False,
|
|
191
|
+
optimizer: str = None,
|
|
192
|
+
conversationally: bool = False,
|
|
193
|
+
) -> str:
|
|
194
|
+
"""
|
|
195
|
+
Generates a response from the Typefully AI API.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
prompt (str): The prompt to send to the API.
|
|
199
|
+
stream (bool): Whether to stream the response.
|
|
200
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
201
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
str: The API response.
|
|
205
|
+
|
|
206
|
+
Examples:
|
|
207
|
+
>>> ai = TypefullyAI()
|
|
208
|
+
>>> response = ai.chat("What's the weather today?")
|
|
209
|
+
>>> print(response)
|
|
210
|
+
'The weather today is sunny with a high of 75°F.'
|
|
211
|
+
"""
|
|
212
|
+
|
|
213
|
+
def for_stream():
|
|
214
|
+
for response in self.ask(
|
|
215
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
216
|
+
):
|
|
217
|
+
yield self.get_message(response)
|
|
218
|
+
|
|
219
|
+
def for_non_stream():
|
|
220
|
+
return self.get_message(
|
|
221
|
+
self.ask(
|
|
222
|
+
prompt,
|
|
223
|
+
False,
|
|
224
|
+
optimizer=optimizer,
|
|
225
|
+
conversationally=conversationally,
|
|
226
|
+
)
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
return for_stream() if stream else for_non_stream()
|
|
230
|
+
|
|
231
|
+
def get_message(self, response: dict) -> str:
|
|
232
|
+
"""
|
|
233
|
+
Extracts the message from the API response.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
response (dict): The API response.
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
str: The message content.
|
|
240
|
+
|
|
241
|
+
Examples:
|
|
242
|
+
>>> ai = TypefullyAI()
|
|
243
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
244
|
+
>>> message = ai.get_message(response)
|
|
245
|
+
>>> print(message)
|
|
246
|
+
'Why did the scarecrow win an award? Because he was outstanding in his field!'
|
|
247
|
+
"""
|
|
248
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
249
|
+
formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
250
|
+
return formatted_text
|
|
251
|
+
|
|
252
|
+
if __name__ == "__main__":
|
|
253
|
+
print("-" * 80)
|
|
254
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
255
|
+
print("-" * 80)
|
|
256
|
+
|
|
257
|
+
# Test all available models
|
|
258
|
+
working = 0
|
|
259
|
+
total = len(TypefullyAI.AVAILABLE_MODELS)
|
|
260
|
+
|
|
261
|
+
for model in TypefullyAI.AVAILABLE_MODELS:
|
|
262
|
+
try:
|
|
263
|
+
test_ai = TypefullyAI(model=model, timeout=60)
|
|
264
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
265
|
+
response_text = ""
|
|
266
|
+
for chunk in response:
|
|
267
|
+
response_text += chunk
|
|
268
|
+
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
269
|
+
|
|
270
|
+
if response_text and len(response_text.strip()) > 0:
|
|
271
|
+
status = "✓"
|
|
272
|
+
# Truncate response if too long
|
|
273
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
274
|
+
else:
|
|
275
|
+
status = "✗"
|
|
276
|
+
display_text = "Empty or invalid response"
|
|
277
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
278
|
+
except Exception as e:
|
|
279
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
280
|
+
|