webscout 5.1__py3-none-any.whl → 5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +83 -277
- webscout/AIbase.py +106 -4
- webscout/AIutel.py +41 -10
- webscout/Agents/Onlinesearcher.py +91 -104
- webscout/Agents/__init__.py +2 -1
- webscout/Agents/ai.py +186 -0
- webscout/Agents/functioncall.py +57 -27
- webscout/Bing_search.py +73 -43
- webscout/DWEBS.py +99 -77
- webscout/Local/_version.py +1 -1
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/Chatify.py +174 -0
- webscout/Provider/Cloudflare.py +0 -4
- webscout/Provider/EDITEE.py +215 -0
- webscout/Provider/{Berlin4h.py → NetFly.py} +81 -82
- webscout/Provider/RUBIKSAI.py +11 -5
- webscout/Provider/TTI/PollinationsAI.py +138 -0
- webscout/Provider/TTI/__init__.py +2 -0
- webscout/Provider/TTI/deepinfra.py +148 -0
- webscout/Provider/TTS/__init__.py +2 -0
- webscout/Provider/TTS/streamElements.py +292 -0
- webscout/Provider/TTS/voicepod.py +118 -0
- webscout/Provider/{liaobots.py → TeachAnything.py} +31 -122
- webscout/Provider/__init__.py +14 -4
- webscout/Provider/ai4chat.py +14 -8
- webscout/Provider/cerebras.py +199 -0
- webscout/Provider/felo_search.py +28 -68
- webscout/Provider/x0gpt.py +181 -0
- webscout/__init__.py +4 -2
- webscout/exceptions.py +2 -1
- webscout/transcriber.py +195 -140
- webscout/version.py +1 -1
- {webscout-5.1.dist-info → webscout-5.3.dist-info}/METADATA +41 -82
- {webscout-5.1.dist-info → webscout-5.3.dist-info}/RECORD +38 -28
- webscout/async_providers.py +0 -21
- webscout/voice.py +0 -34
- {webscout-5.1.dist-info → webscout-5.3.dist-info}/LICENSE.md +0 -0
- {webscout-5.1.dist-info → webscout-5.3.dist-info}/WHEEL +0 -0
- {webscout-5.1.dist-info → webscout-5.3.dist-info}/entry_points.txt +0 -0
- {webscout-5.1.dist-info → webscout-5.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
from webscout.AIutel import Optimizers
|
|
2
|
+
from webscout.AIutel import Conversation
|
|
3
|
+
from webscout.AIutel import AwesomePrompts
|
|
4
|
+
from webscout.AIbase import Provider
|
|
5
|
+
from webscout import exceptions
|
|
6
|
+
import requests
|
|
7
|
+
|
|
8
|
+
class Chatify(Provider):
|
|
9
|
+
"""
|
|
10
|
+
A class to interact with the Chatify AI API.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
is_conversation: bool = True,
|
|
16
|
+
max_tokens: int = 600,
|
|
17
|
+
timeout: int = 30,
|
|
18
|
+
intro: str = None,
|
|
19
|
+
filepath: str = None,
|
|
20
|
+
update_file: bool = True,
|
|
21
|
+
proxies: dict = {},
|
|
22
|
+
history_offset: int = 10250,
|
|
23
|
+
act: str = None,
|
|
24
|
+
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
25
|
+
):
|
|
26
|
+
"""
|
|
27
|
+
Initializes the Chatify AI API with given parameters.
|
|
28
|
+
"""
|
|
29
|
+
self.session = requests.Session()
|
|
30
|
+
self.is_conversation = is_conversation
|
|
31
|
+
self.max_tokens_to_sample = max_tokens
|
|
32
|
+
self.api_endpoint = "https://chatify-ai.vercel.app/api/chat"
|
|
33
|
+
self.timeout = timeout
|
|
34
|
+
self.last_response = {}
|
|
35
|
+
self.headers = {
|
|
36
|
+
'Accept': '*/*',
|
|
37
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
38
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
39
|
+
'Content-Type': 'application/json',
|
|
40
|
+
'DNT': '1',
|
|
41
|
+
'Origin': 'https://chatify-ai.vercel.app',
|
|
42
|
+
'Referer': 'https://chatify-ai.vercel.app/',
|
|
43
|
+
'Sec-CH-UA': '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
44
|
+
'Sec-CH-UA-Mobile': '?0',
|
|
45
|
+
'Sec-CH-UA-Platform': '"Windows"',
|
|
46
|
+
'Sec-Fetch-Dest': 'empty',
|
|
47
|
+
'Sec-Fetch-Mode': 'cors',
|
|
48
|
+
'Sec-Fetch-Site': 'same-origin',
|
|
49
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0',
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
self.__available_optimizers = (
|
|
53
|
+
method
|
|
54
|
+
for method in dir(Optimizers)
|
|
55
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
56
|
+
)
|
|
57
|
+
self.session.headers.update(self.headers)
|
|
58
|
+
Conversation.intro = (
|
|
59
|
+
AwesomePrompts().get_act(
|
|
60
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
61
|
+
)
|
|
62
|
+
if act
|
|
63
|
+
else intro or Conversation.intro
|
|
64
|
+
)
|
|
65
|
+
self.conversation = Conversation(
|
|
66
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
67
|
+
)
|
|
68
|
+
self.conversation.history_offset = history_offset
|
|
69
|
+
self.session.proxies = proxies
|
|
70
|
+
self.system_prompt = system_prompt
|
|
71
|
+
|
|
72
|
+
def ask(
|
|
73
|
+
self,
|
|
74
|
+
prompt: str,
|
|
75
|
+
stream: bool = False,
|
|
76
|
+
raw: bool = False,
|
|
77
|
+
optimizer: str = None,
|
|
78
|
+
conversationally: bool = False,
|
|
79
|
+
) -> dict:
|
|
80
|
+
"""
|
|
81
|
+
Sends a prompt to the Chatify API and returns the response.
|
|
82
|
+
"""
|
|
83
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
84
|
+
if optimizer:
|
|
85
|
+
if optimizer in self.__available_optimizers:
|
|
86
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
87
|
+
conversation_prompt if conversationally else prompt
|
|
88
|
+
)
|
|
89
|
+
else:
|
|
90
|
+
raise Exception(
|
|
91
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
messages = []
|
|
95
|
+
if self.system_prompt:
|
|
96
|
+
messages.append({"role": "system", "content": self.system_prompt})
|
|
97
|
+
messages.append({"role": "user", "content": conversation_prompt})
|
|
98
|
+
|
|
99
|
+
payload = {
|
|
100
|
+
"messages": messages
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
def for_stream():
|
|
104
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
|
|
105
|
+
if not response.ok:
|
|
106
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
107
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
streaming_text = ""
|
|
111
|
+
for line in response.iter_lines():
|
|
112
|
+
if line:
|
|
113
|
+
decoded_line = line.decode('utf-8') # Decode the line
|
|
114
|
+
parts = decoded_line.split(':', 1)
|
|
115
|
+
if len(parts) > 1:
|
|
116
|
+
content = parts[1].strip().strip('"')
|
|
117
|
+
streaming_text += content
|
|
118
|
+
yield content if raw else dict(text=streaming_text)
|
|
119
|
+
self.last_response.update(dict(text=streaming_text))
|
|
120
|
+
self.conversation.update_chat_history(
|
|
121
|
+
prompt, self.get_message(self.last_response)
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def for_non_stream():
|
|
125
|
+
for _ in for_stream():
|
|
126
|
+
pass
|
|
127
|
+
return self.last_response
|
|
128
|
+
|
|
129
|
+
return for_stream() if stream else for_non_stream()
|
|
130
|
+
|
|
131
|
+
def chat(
|
|
132
|
+
self,
|
|
133
|
+
prompt: str,
|
|
134
|
+
stream: bool = False,
|
|
135
|
+
optimizer: str = None,
|
|
136
|
+
conversationally: bool = False,
|
|
137
|
+
) -> str:
|
|
138
|
+
"""
|
|
139
|
+
Generates a response from the Chatify API.
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
def for_stream():
|
|
143
|
+
for response in self.ask(
|
|
144
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
145
|
+
):
|
|
146
|
+
yield self.get_message(response)
|
|
147
|
+
|
|
148
|
+
def for_non_stream():
|
|
149
|
+
return self.get_message(
|
|
150
|
+
self.ask(
|
|
151
|
+
prompt,
|
|
152
|
+
False,
|
|
153
|
+
optimizer=optimizer,
|
|
154
|
+
conversationally=conversationally,
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return for_stream() if stream else for_non_stream()
|
|
159
|
+
|
|
160
|
+
def get_message(self, response: dict) -> str:
|
|
161
|
+
"""
|
|
162
|
+
Extracts the message from the API response.
|
|
163
|
+
"""
|
|
164
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
165
|
+
return response["text"]
|
|
166
|
+
|
|
167
|
+
# Example usage
|
|
168
|
+
if __name__ == "__main__":
|
|
169
|
+
from rich import print
|
|
170
|
+
|
|
171
|
+
ai = Chatify()
|
|
172
|
+
response = ai.chat(input(">>> "))
|
|
173
|
+
for chunk in response:
|
|
174
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Cloudflare.py
CHANGED
|
@@ -30,10 +30,6 @@ import httpx
|
|
|
30
30
|
import cloudscraper
|
|
31
31
|
|
|
32
32
|
class Cloudflare(Provider):
|
|
33
|
-
"""
|
|
34
|
-
This class provides methods for interacting with the Playground AI API
|
|
35
|
-
(Cloudflare) in a consistent provider structure for webscout.
|
|
36
|
-
"""
|
|
37
33
|
|
|
38
34
|
AVAILABLE_MODELS = [
|
|
39
35
|
"@cf/llava-hf/llava-1.5-7b-hf",
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
from selenium import webdriver
|
|
4
|
+
from selenium.webdriver.chrome.options import Options
|
|
5
|
+
from selenium.webdriver.common.by import By
|
|
6
|
+
from selenium.webdriver.support import expected_conditions as EC
|
|
7
|
+
from selenium.webdriver.support.ui import WebDriverWait
|
|
8
|
+
import click
|
|
9
|
+
import requests
|
|
10
|
+
from requests import get
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
from re import findall
|
|
13
|
+
from requests.exceptions import RequestException
|
|
14
|
+
from curl_cffi.requests import get, RequestsError
|
|
15
|
+
import g4f
|
|
16
|
+
from random import randint
|
|
17
|
+
from PIL import Image
|
|
18
|
+
import io
|
|
19
|
+
import re
|
|
20
|
+
import json
|
|
21
|
+
import yaml
|
|
22
|
+
from webscout.AIutel import Optimizers
|
|
23
|
+
from webscout.AIutel import Conversation, Proxy
|
|
24
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
25
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
26
|
+
from webscout import exceptions
|
|
27
|
+
from typing import Any, AsyncGenerator, Dict
|
|
28
|
+
import logging
|
|
29
|
+
import httpx
|
|
30
|
+
import random
|
|
31
|
+
proxy = Proxy()
|
|
32
|
+
|
|
33
|
+
class Editee(Provider):
|
|
34
|
+
"""
|
|
35
|
+
A class to interact with the Editee.com API.
|
|
36
|
+
"""
|
|
37
|
+
AVAILABLE_MODELS = [
|
|
38
|
+
"gemini", # it is gemini 1.5pro
|
|
39
|
+
"claude", # it is claude 3.5
|
|
40
|
+
"gpt4", # it is gpt4o
|
|
41
|
+
"mistrallarge", # it is mistral large2
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
is_conversation: bool = True,
|
|
47
|
+
max_tokens: int = 600,
|
|
48
|
+
timeout: int = 30,
|
|
49
|
+
intro: str = None,
|
|
50
|
+
filepath: str = None,
|
|
51
|
+
update_file: bool = True,
|
|
52
|
+
proxies: dict = {},
|
|
53
|
+
history_offset: int = 10250,
|
|
54
|
+
act: str = None,
|
|
55
|
+
model: str = "mistrallarge",
|
|
56
|
+
) -> None:
|
|
57
|
+
"""
|
|
58
|
+
Initializes the Editee API with given parameters.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
62
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
63
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
64
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
65
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
66
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
67
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
68
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
69
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
70
|
+
model (str, optional): AI model to use for text generation. Defaults to "gemini".
|
|
71
|
+
"""
|
|
72
|
+
if model not in self.AVAILABLE_MODELS:
|
|
73
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
74
|
+
|
|
75
|
+
self.session = requests.Session()
|
|
76
|
+
self.is_conversation = is_conversation
|
|
77
|
+
self.max_tokens_to_sample = max_tokens
|
|
78
|
+
self.api_endpoint = "https://editee.com/submit/chatgptfree"
|
|
79
|
+
self.stream_chunk_size = 64
|
|
80
|
+
self.timeout = timeout
|
|
81
|
+
self.last_response = {}
|
|
82
|
+
self.model = model
|
|
83
|
+
self._sessionValue = self._get_session()
|
|
84
|
+
self.headers = {
|
|
85
|
+
"authority": "editee.com",
|
|
86
|
+
"path": "/submit/chatgptfree",
|
|
87
|
+
"scheme": "https",
|
|
88
|
+
"accept": "application/json, text/plain, */*",
|
|
89
|
+
"accept-encoding": "gzip, deflate, br",
|
|
90
|
+
"accept-language": "ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7",
|
|
91
|
+
"content-type": "application/json",
|
|
92
|
+
"cookie": f"editeecom_session={self._sessionValue}",
|
|
93
|
+
"origin": "https://editee.com",
|
|
94
|
+
"referer": "https://editee.com/chat-gpt",
|
|
95
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
|
|
96
|
+
"x-requested-with": "XMLHttpRequest"
|
|
97
|
+
}
|
|
98
|
+
self.__available_optimizers = (
|
|
99
|
+
method
|
|
100
|
+
for method in dir(Optimizers)
|
|
101
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
102
|
+
)
|
|
103
|
+
self.session.headers.update(self.headers)
|
|
104
|
+
Conversation.intro = (
|
|
105
|
+
AwesomePrompts().get_act(
|
|
106
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
107
|
+
)
|
|
108
|
+
if act
|
|
109
|
+
else intro or Conversation.intro
|
|
110
|
+
)
|
|
111
|
+
self.conversation = Conversation(
|
|
112
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
113
|
+
)
|
|
114
|
+
self.conversation.history_offset = history_offset
|
|
115
|
+
self.session.proxies = proxies
|
|
116
|
+
|
|
117
|
+
def _get_session(self):
|
|
118
|
+
"""Gets the editeecom_session value."""
|
|
119
|
+
res = proxy.get("https://editee.com/chat-gpt")
|
|
120
|
+
if res.cookies.get_dict():
|
|
121
|
+
first_cookie_name, session_value = next(iter(res.cookies.get_dict().items()))
|
|
122
|
+
return session_value
|
|
123
|
+
|
|
124
|
+
def ask(
|
|
125
|
+
self,
|
|
126
|
+
prompt: str,
|
|
127
|
+
stream: bool = False,
|
|
128
|
+
raw: bool = False,
|
|
129
|
+
optimizer: str = None,
|
|
130
|
+
conversationally: bool = False,
|
|
131
|
+
) -> dict:
|
|
132
|
+
"""Chat with AI
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
prompt (str): Prompt to be send.
|
|
136
|
+
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
137
|
+
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
138
|
+
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
139
|
+
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
The response from the API.
|
|
143
|
+
"""
|
|
144
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
145
|
+
if optimizer:
|
|
146
|
+
if optimizer in self.__available_optimizers:
|
|
147
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
148
|
+
conversation_prompt if conversationally else prompt
|
|
149
|
+
)
|
|
150
|
+
else:
|
|
151
|
+
raise Exception(
|
|
152
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
payload = {
|
|
156
|
+
"context": " ",
|
|
157
|
+
"selected_model": self.model,
|
|
158
|
+
"template_id": "",
|
|
159
|
+
"user_input": conversation_prompt
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
response = proxy.post(self.api_endpoint, headers=self.headers, json=payload, timeout=self.timeout)
|
|
163
|
+
if not response.ok:
|
|
164
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
165
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
resp = response.json()
|
|
169
|
+
self.last_response.update(dict(text=resp['text']))
|
|
170
|
+
self.conversation.update_chat_history(
|
|
171
|
+
prompt, self.get_message(self.last_response)
|
|
172
|
+
)
|
|
173
|
+
return self.last_response
|
|
174
|
+
|
|
175
|
+
def chat(
|
|
176
|
+
self,
|
|
177
|
+
prompt: str,
|
|
178
|
+
stream: bool = False,
|
|
179
|
+
optimizer: str = None,
|
|
180
|
+
conversationally: bool = False,
|
|
181
|
+
) -> str:
|
|
182
|
+
"""Generate response `str`
|
|
183
|
+
Args:
|
|
184
|
+
prompt (str): Prompt to be send.
|
|
185
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
186
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
187
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
188
|
+
Returns:
|
|
189
|
+
str: Response generated
|
|
190
|
+
"""
|
|
191
|
+
return self.get_message(
|
|
192
|
+
self.ask(
|
|
193
|
+
prompt,
|
|
194
|
+
optimizer=optimizer,
|
|
195
|
+
conversationally=conversationally,
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
def get_message(self, response: dict) -> str:
|
|
200
|
+
"""Retrieves message only from response
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
response (dict): Response generated by `self.ask`
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
str: Message extracted
|
|
207
|
+
"""
|
|
208
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
209
|
+
return response["text"]
|
|
210
|
+
if __name__ == '__main__':
|
|
211
|
+
from rich import print
|
|
212
|
+
ai = Editee()
|
|
213
|
+
response = ai.chat("tell me about india")
|
|
214
|
+
for chunk in response:
|
|
215
|
+
print(chunk, end="", flush=True)
|
|
@@ -1,28 +1,28 @@
|
|
|
1
1
|
import requests
|
|
2
|
+
|
|
3
|
+
from random import randint
|
|
4
|
+
|
|
2
5
|
import json
|
|
3
|
-
|
|
4
|
-
from typing import Any, Dict, Optional
|
|
6
|
+
|
|
5
7
|
from webscout.AIutel import Optimizers
|
|
6
8
|
from webscout.AIutel import Conversation
|
|
7
9
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
10
|
from webscout.AIbase import Provider, AsyncProvider
|
|
9
11
|
from webscout import exceptions
|
|
12
|
+
from typing import Any, AsyncGenerator, Dict
|
|
10
13
|
|
|
11
|
-
|
|
14
|
+
|
|
15
|
+
class NetFly(Provider):
|
|
12
16
|
"""
|
|
13
|
-
A class to interact with the
|
|
17
|
+
A class to interact with the NetFly API.
|
|
14
18
|
"""
|
|
15
19
|
|
|
20
|
+
AVAILABLE_MODELS = ["gpt-3.5-turbo"]
|
|
21
|
+
|
|
16
22
|
def __init__(
|
|
17
23
|
self,
|
|
18
|
-
api_token: str = "3bf369cd84339603f8a5361e964f9ebe",
|
|
19
|
-
api_endpoint: str = "https://ai.berlin4h.top/api/chat/completions",
|
|
20
|
-
model: str = "gpt-3.5-turbo",
|
|
21
|
-
temperature: float = 0.9,
|
|
22
|
-
presence_penalty: float = 0,
|
|
23
|
-
frequency_penalty: float = 0,
|
|
24
|
-
max_tokens: int = 4000,
|
|
25
24
|
is_conversation: bool = True,
|
|
25
|
+
max_tokens: int = 600,
|
|
26
26
|
timeout: int = 30,
|
|
27
27
|
intro: str = None,
|
|
28
28
|
filepath: str = None,
|
|
@@ -30,19 +30,15 @@ class Berlin4h(Provider):
|
|
|
30
30
|
proxies: dict = {},
|
|
31
31
|
history_offset: int = 10250,
|
|
32
32
|
act: str = None,
|
|
33
|
-
|
|
33
|
+
model: str = "gpt-3.5-turbo",
|
|
34
|
+
system_prompt: str = "You are a helpful and friendly AI assistant.",
|
|
35
|
+
):
|
|
34
36
|
"""
|
|
35
|
-
Initializes the
|
|
37
|
+
Initializes the NetFly API with given parameters.
|
|
36
38
|
|
|
37
39
|
Args:
|
|
38
|
-
api_token (str): The API token for authentication.
|
|
39
|
-
api_endpoint (str): The API endpoint to use for requests.
|
|
40
|
-
model (str): The AI model to use for text generation.
|
|
41
|
-
temperature (float): The temperature parameter for the model.
|
|
42
|
-
presence_penalty (float): The presence penalty parameter for the model.
|
|
43
|
-
frequency_penalty (float): The frequency penalty parameter for the model.
|
|
44
|
-
max_tokens (int): The maximum number of tokens to generate.
|
|
45
40
|
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
41
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
46
42
|
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
47
43
|
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
48
44
|
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
@@ -50,22 +46,38 @@ class Berlin4h(Provider):
|
|
|
50
46
|
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
51
47
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
52
48
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
49
|
+
model (str, optional): AI model to use for text generation. Defaults to "gpt-3.5-turbo".
|
|
50
|
+
system_prompt (str, optional): System prompt for NetFly. Defaults to the provided string.
|
|
53
51
|
"""
|
|
54
|
-
self.
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
self.temperature = temperature
|
|
58
|
-
self.presence_penalty = presence_penalty
|
|
59
|
-
self.frequency_penalty = frequency_penalty
|
|
60
|
-
self.max_tokens = max_tokens
|
|
61
|
-
self.parent_message_id: Optional[str] = None
|
|
52
|
+
if model not in self.AVAILABLE_MODELS:
|
|
53
|
+
raise ValueError(f"Invalid model: {model}. Available model is: {self.AVAILABLE_MODELS[0]}")
|
|
54
|
+
|
|
62
55
|
self.session = requests.Session()
|
|
63
56
|
self.is_conversation = is_conversation
|
|
64
57
|
self.max_tokens_to_sample = max_tokens
|
|
65
|
-
self.
|
|
58
|
+
self.api_endpoint = "https://free.netfly.top/api/openai/v1/chat/completions"
|
|
59
|
+
self.stream_chunk_size = 64
|
|
66
60
|
self.timeout = timeout
|
|
67
61
|
self.last_response = {}
|
|
68
|
-
self.
|
|
62
|
+
self.model = model
|
|
63
|
+
self.system_prompt = system_prompt
|
|
64
|
+
self.headers = {
|
|
65
|
+
"accept": "application/json, text/event-stream",
|
|
66
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
67
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
68
|
+
"content-type": "application/json",
|
|
69
|
+
"dnt": "1",
|
|
70
|
+
"origin": "https://free.netfly.top",
|
|
71
|
+
"referer": "https://free.netfly.top/",
|
|
72
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
73
|
+
"sec-ch-ua-mobile": "?0",
|
|
74
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
75
|
+
"sec-fetch-dest": "empty",
|
|
76
|
+
"sec-fetch-mode": "cors",
|
|
77
|
+
"sec-fetch-site": "same-origin",
|
|
78
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
|
|
79
|
+
}
|
|
80
|
+
|
|
69
81
|
self.__available_optimizers = (
|
|
70
82
|
method
|
|
71
83
|
for method in dir(Optimizers)
|
|
@@ -92,20 +104,7 @@ class Berlin4h(Provider):
|
|
|
92
104
|
raw: bool = False,
|
|
93
105
|
optimizer: str = None,
|
|
94
106
|
conversationally: bool = False,
|
|
95
|
-
) ->
|
|
96
|
-
"""
|
|
97
|
-
Sends a prompt to the Berlin4h AI API and returns the response.
|
|
98
|
-
|
|
99
|
-
Args:
|
|
100
|
-
prompt: The text prompt to generate text from.
|
|
101
|
-
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
102
|
-
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
103
|
-
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
104
|
-
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
105
|
-
|
|
106
|
-
Returns:
|
|
107
|
-
The response from the API.
|
|
108
|
-
"""
|
|
107
|
+
) -> dict:
|
|
109
108
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
110
109
|
if optimizer:
|
|
111
110
|
if optimizer in self.__available_optimizers:
|
|
@@ -117,16 +116,17 @@ class Berlin4h(Provider):
|
|
|
117
116
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
118
117
|
)
|
|
119
118
|
|
|
120
|
-
payload
|
|
121
|
-
"
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
119
|
+
payload = {
|
|
120
|
+
"messages": [
|
|
121
|
+
{"role": "system", "content": self.system_prompt},
|
|
122
|
+
{"role": "user", "content": conversation_prompt},
|
|
123
|
+
],
|
|
124
|
+
"stream": True,
|
|
125
|
+
"model": self.model,
|
|
126
|
+
"temperature": 0.5,
|
|
127
|
+
"presence_penalty": 0,
|
|
128
|
+
"frequency_penalty": 0,
|
|
129
|
+
"top_p": 1
|
|
130
130
|
}
|
|
131
131
|
|
|
132
132
|
def for_stream():
|
|
@@ -139,27 +139,34 @@ class Berlin4h(Provider):
|
|
|
139
139
|
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
140
140
|
)
|
|
141
141
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
142
|
+
full_response = ""
|
|
143
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
145
144
|
if line:
|
|
146
|
-
|
|
147
|
-
json_data =
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
145
|
+
if line.startswith("data: "):
|
|
146
|
+
json_data = line[6:]
|
|
147
|
+
if json_data == "[DONE]":
|
|
148
|
+
break
|
|
149
|
+
try:
|
|
150
|
+
data = json.loads(json_data)
|
|
151
|
+
content = data["choices"][0]["delta"].get("content", "")
|
|
152
|
+
full_response += content
|
|
153
|
+
yield content if raw else dict(text=content)
|
|
154
|
+
except json.decoder.JSONDecodeError:
|
|
155
|
+
continue
|
|
156
|
+
|
|
157
|
+
self.last_response.update(dict(text=full_response))
|
|
155
158
|
self.conversation.update_chat_history(
|
|
156
159
|
prompt, self.get_message(self.last_response)
|
|
157
160
|
)
|
|
158
161
|
|
|
159
162
|
def for_non_stream():
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
+
full_response = ""
|
|
164
|
+
for chunk in for_stream():
|
|
165
|
+
if isinstance(chunk, dict):
|
|
166
|
+
full_response += chunk['text']
|
|
167
|
+
else:
|
|
168
|
+
full_response += chunk
|
|
169
|
+
return dict(text=full_response)
|
|
163
170
|
|
|
164
171
|
return for_stream() if stream else for_non_stream()
|
|
165
172
|
|
|
@@ -170,16 +177,6 @@ class Berlin4h(Provider):
|
|
|
170
177
|
optimizer: str = None,
|
|
171
178
|
conversationally: bool = False,
|
|
172
179
|
) -> str:
|
|
173
|
-
"""Generate response `str`
|
|
174
|
-
Args:
|
|
175
|
-
prompt (str): Prompt to be send.
|
|
176
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
177
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
178
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
179
|
-
Returns:
|
|
180
|
-
str: Response generated
|
|
181
|
-
"""
|
|
182
|
-
|
|
183
180
|
def for_stream():
|
|
184
181
|
for response in self.ask(
|
|
185
182
|
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
@@ -209,9 +206,11 @@ class Berlin4h(Provider):
|
|
|
209
206
|
"""
|
|
210
207
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
211
208
|
return response["text"]
|
|
209
|
+
|
|
212
210
|
if __name__ == '__main__':
|
|
213
211
|
from rich import print
|
|
214
|
-
ai =
|
|
215
|
-
response = ai.chat("tell me about india")
|
|
212
|
+
ai = NetFly()
|
|
213
|
+
response = ai.chat("tell me about india", stream=True)
|
|
216
214
|
for chunk in response:
|
|
217
|
-
print(chunk, end="", flush=True)
|
|
215
|
+
print(chunk, end="", flush=True)
|
|
216
|
+
print() # Add a newline at the end
|
webscout/Provider/RUBIKSAI.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
3
|
from typing import Any, Dict, Optional
|
|
4
|
-
from
|
|
5
|
-
from
|
|
6
|
-
from
|
|
7
|
-
from
|
|
4
|
+
from webscout.AIutel import Optimizers
|
|
5
|
+
from webscout.AIutel import Conversation
|
|
6
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
8
|
from webscout import exceptions
|
|
9
9
|
|
|
10
10
|
|
|
@@ -198,4 +198,10 @@ class RUBIKSAI(Provider):
|
|
|
198
198
|
str: Message extracted
|
|
199
199
|
"""
|
|
200
200
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
201
|
-
return response["text"]
|
|
201
|
+
return response["text"]
|
|
202
|
+
if __name__ == '__main__':
|
|
203
|
+
from rich import print
|
|
204
|
+
ai = RUBIKSAI()
|
|
205
|
+
response = ai.chat(input(">>> "))
|
|
206
|
+
for chunk in response:
|
|
207
|
+
print(chunk, end="", flush=True)
|