webscout 6.9__py3-none-any.whl → 7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIbase.py +12 -2
- webscout/DWEBS.py +38 -22
- webscout/Extra/autocoder/autocoder_utiles.py +68 -7
- webscout/Extra/autollama.py +0 -16
- webscout/Extra/gguf.py +0 -13
- webscout/LLM.py +1 -1
- webscout/Provider/AISEARCH/DeepFind.py +251 -0
- webscout/Provider/AISEARCH/__init__.py +2 -2
- webscout/Provider/AISEARCH/felo_search.py +167 -118
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/Cloudflare.py +92 -78
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Glider.py +222 -0
- webscout/Provider/Groq.py +26 -18
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +65 -10
- webscout/Provider/Netwrck.py +61 -49
- webscout/Provider/PI.py +77 -122
- webscout/Provider/PizzaGPT.py +129 -82
- webscout/Provider/TextPollinationsAI.py +229 -0
- webscout/Provider/Youchat.py +28 -22
- webscout/Provider/__init__.py +12 -4
- webscout/Provider/askmyai.py +2 -2
- webscout/Provider/chatglm.py +205 -0
- webscout/Provider/dgaf.py +215 -0
- webscout/Provider/gaurish.py +106 -66
- webscout/Provider/hermes.py +219 -0
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +62 -35
- webscout/Provider/meta.py +6 -6
- webscout/Provider/multichat.py +205 -104
- webscout/Provider/typegpt.py +26 -23
- webscout/Provider/yep.py +3 -3
- webscout/litagent/__init__.py +3 -146
- webscout/litagent/agent.py +120 -0
- webscout/litagent/constants.py +31 -0
- webscout/tempid.py +0 -4
- webscout/version.py +1 -1
- webscout/webscout_search.py +1141 -1140
- webscout/webscout_search_async.py +635 -635
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/METADATA +37 -33
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/RECORD +49 -41
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/WHEEL +1 -1
- webscout/Provider/AISEARCH/ooai.py +0 -155
- webscout/Provider/RUBIKSAI.py +0 -272
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/LICENSE.md +0 -0
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/entry_points.txt +0 -0
- {webscout-6.9.dist-info → webscout-7.1.dist-info}/top_level.txt +0 -0
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -8,10 +8,12 @@ from webscout.AIutel import Conversation
|
|
|
8
8
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
10
|
from webscout import exceptions
|
|
11
|
+
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
|
|
11
13
|
|
|
12
14
|
class DeepInfra(Provider):
|
|
13
15
|
"""
|
|
14
|
-
A class to interact with the DeepInfra API.
|
|
16
|
+
A class to interact with the DeepInfra API with logging and LitAgent user-agent.
|
|
15
17
|
"""
|
|
16
18
|
|
|
17
19
|
def __init__(
|
|
@@ -25,13 +27,29 @@ class DeepInfra(Provider):
|
|
|
25
27
|
proxies: dict = {},
|
|
26
28
|
history_offset: int = 10250,
|
|
27
29
|
act: str = None,
|
|
28
|
-
model: str = "Qwen/Qwen2.5-72B-Instruct",
|
|
30
|
+
model: str = "Qwen/Qwen2.5-72B-Instruct",
|
|
31
|
+
logging: bool = False
|
|
29
32
|
):
|
|
30
|
-
"""Initializes the DeepInfra API client."""
|
|
33
|
+
"""Initializes the DeepInfra API client with logging support."""
|
|
31
34
|
self.url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
35
|
+
# Use LitAgent for user-agent instead of hardcoded string.
|
|
32
36
|
self.headers = {
|
|
33
|
-
|
|
34
|
-
|
|
37
|
+
'User-Agent': LitAgent().random(),
|
|
38
|
+
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
39
|
+
'Cache-Control': 'no-cache',
|
|
40
|
+
'Connection': 'keep-alive',
|
|
41
|
+
'Content-Type': 'application/json',
|
|
42
|
+
'Origin': 'https://deepinfra.com',
|
|
43
|
+
'Pragma': 'no-cache',
|
|
44
|
+
'Referer': 'https://deepinfra.com/',
|
|
45
|
+
'Sec-Fetch-Dest': 'empty',
|
|
46
|
+
'Sec-Fetch-Mode': 'cors',
|
|
47
|
+
'Sec-Fetch-Site': 'same-site',
|
|
48
|
+
'X-Deepinfra-Source': 'web-embed',
|
|
49
|
+
'accept': 'text/event-stream',
|
|
50
|
+
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
|
51
|
+
'sec-ch-ua-mobile': '?0',
|
|
52
|
+
'sec-ch-ua-platform': '"macOS"'
|
|
35
53
|
}
|
|
36
54
|
self.session = requests.Session()
|
|
37
55
|
self.session.headers.update(self.headers)
|
|
@@ -61,6 +79,16 @@ class DeepInfra(Provider):
|
|
|
61
79
|
)
|
|
62
80
|
self.conversation.history_offset = history_offset
|
|
63
81
|
|
|
82
|
+
# Initialize logger if enabled
|
|
83
|
+
self.logger = LitLogger(
|
|
84
|
+
name="DeepInfra",
|
|
85
|
+
format=LogFormat.MODERN_EMOJI,
|
|
86
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
87
|
+
) if logging else None
|
|
88
|
+
|
|
89
|
+
if self.logger:
|
|
90
|
+
self.logger.info("DeepInfra initialized successfully")
|
|
91
|
+
|
|
64
92
|
def ask(
|
|
65
93
|
self,
|
|
66
94
|
prompt: str,
|
|
@@ -69,14 +97,17 @@ class DeepInfra(Provider):
|
|
|
69
97
|
optimizer: str = None,
|
|
70
98
|
conversationally: bool = False,
|
|
71
99
|
) -> Union[Dict[str, Any], Generator]:
|
|
72
|
-
|
|
73
100
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
74
101
|
if optimizer:
|
|
75
102
|
if optimizer in self.__available_optimizers:
|
|
76
103
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
77
104
|
conversation_prompt if conversationally else prompt
|
|
78
105
|
)
|
|
106
|
+
if self.logger:
|
|
107
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
79
108
|
else:
|
|
109
|
+
if self.logger:
|
|
110
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
80
111
|
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
81
112
|
|
|
82
113
|
# Payload construction
|
|
@@ -90,17 +121,23 @@ class DeepInfra(Provider):
|
|
|
90
121
|
}
|
|
91
122
|
|
|
92
123
|
def for_stream():
|
|
124
|
+
if self.logger:
|
|
125
|
+
self.logger.debug("Sending streaming request to DeepInfra API...")
|
|
93
126
|
try:
|
|
94
127
|
with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
|
|
95
128
|
if response.status_code != 200:
|
|
96
|
-
|
|
129
|
+
if self.logger:
|
|
130
|
+
self.logger.error(f"Request failed with status code {response.status_code}")
|
|
97
131
|
|
|
132
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
|
|
133
|
+
if self.logger:
|
|
134
|
+
self.logger.debug(response.text)
|
|
98
135
|
streaming_text = ""
|
|
99
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
136
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
100
137
|
if line:
|
|
101
138
|
line = line.strip()
|
|
102
139
|
if line.startswith("data: "):
|
|
103
|
-
json_str = line[6:]
|
|
140
|
+
json_str = line[6:] # Remove "data: " prefix
|
|
104
141
|
if json_str == "[DONE]":
|
|
105
142
|
break
|
|
106
143
|
try:
|
|
@@ -110,28 +147,27 @@ class DeepInfra(Provider):
|
|
|
110
147
|
if 'delta' in choice and 'content' in choice['delta']:
|
|
111
148
|
content = choice['delta']['content']
|
|
112
149
|
streaming_text += content
|
|
113
|
-
|
|
114
|
-
# Yield ONLY the new content:
|
|
115
|
-
resp = dict(text=content)
|
|
150
|
+
resp = dict(text=content)
|
|
116
151
|
yield resp if raw else resp
|
|
117
152
|
except json.JSONDecodeError:
|
|
118
|
-
|
|
119
|
-
|
|
153
|
+
if self.logger:
|
|
154
|
+
self.logger.error("JSON decode error in streaming data")
|
|
155
|
+
pass
|
|
156
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
157
|
+
if self.logger:
|
|
158
|
+
self.logger.info("Streaming response completed successfully")
|
|
120
159
|
except requests.RequestException as e:
|
|
160
|
+
if self.logger:
|
|
161
|
+
self.logger.error(f"Request failed: {e}")
|
|
121
162
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
122
163
|
|
|
123
|
-
|
|
124
164
|
def for_non_stream():
|
|
125
|
-
# let's make use of stream
|
|
126
165
|
for _ in for_stream():
|
|
127
166
|
pass
|
|
128
167
|
return self.last_response
|
|
129
168
|
|
|
130
|
-
|
|
131
169
|
return for_stream() if stream else for_non_stream()
|
|
132
170
|
|
|
133
|
-
|
|
134
|
-
|
|
135
171
|
def chat(
|
|
136
172
|
self,
|
|
137
173
|
prompt: str,
|
|
@@ -139,34 +175,22 @@ class DeepInfra(Provider):
|
|
|
139
175
|
optimizer: str = None,
|
|
140
176
|
conversationally: bool = False,
|
|
141
177
|
) -> str:
|
|
142
|
-
|
|
143
178
|
def for_stream():
|
|
144
|
-
for response in self.ask(
|
|
145
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
146
|
-
):
|
|
179
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
147
180
|
yield self.get_message(response)
|
|
148
|
-
|
|
149
181
|
def for_non_stream():
|
|
150
182
|
return self.get_message(
|
|
151
|
-
self.ask(
|
|
152
|
-
prompt,
|
|
153
|
-
False,
|
|
154
|
-
optimizer=optimizer,
|
|
155
|
-
conversationally=conversationally,
|
|
156
|
-
)
|
|
183
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
157
184
|
)
|
|
158
|
-
|
|
159
185
|
return for_stream() if stream else for_non_stream()
|
|
160
186
|
|
|
161
187
|
def get_message(self, response: dict) -> str:
|
|
162
188
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
163
189
|
return response["text"]
|
|
164
190
|
|
|
165
|
-
|
|
166
|
-
|
|
167
191
|
if __name__ == "__main__":
|
|
168
192
|
from rich import print
|
|
169
|
-
ai = DeepInfra(timeout=5000)
|
|
193
|
+
ai = DeepInfra(timeout=5000, logging=True)
|
|
170
194
|
response = ai.chat("write a poem about AI", stream=True)
|
|
171
195
|
for chunk in response:
|
|
172
|
-
print(chunk, end="", flush=True)
|
|
196
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Generator, Optional
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
|
+
from webscout.AIbase import Provider
|
|
7
|
+
from webscout import exceptions
|
|
8
|
+
from webscout.Litlogger import LitLogger, LogFormat, ColorScheme
|
|
9
|
+
from webscout import LitAgent as Lit
|
|
10
|
+
|
|
11
|
+
class GliderAI(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the Glider.so API with comprehensive logging.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
AVAILABLE_MODELS = {
|
|
17
|
+
"chat-llama-3-1-70b",
|
|
18
|
+
"chat-llama-3-1-8b",
|
|
19
|
+
"chat-llama-3-2-3b",
|
|
20
|
+
"deepseek-ai/DeepSeek-R1",
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
is_conversation: bool = True,
|
|
26
|
+
max_tokens: int = 600,
|
|
27
|
+
timeout: int = 30,
|
|
28
|
+
intro: Optional[str] = None,
|
|
29
|
+
filepath: Optional[str] = None,
|
|
30
|
+
update_file: bool = True,
|
|
31
|
+
proxies: dict = {},
|
|
32
|
+
history_offset: int = 10250,
|
|
33
|
+
act: Optional[str] = None,
|
|
34
|
+
model: str = "chat-llama-3-1-70b",
|
|
35
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
36
|
+
logging: bool = False
|
|
37
|
+
):
|
|
38
|
+
"""Initializes the GliderAI API client with logging capabilities."""
|
|
39
|
+
if model not in self.AVAILABLE_MODELS:
|
|
40
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
|
|
41
|
+
|
|
42
|
+
self.logger = LitLogger(
|
|
43
|
+
name="GliderAI",
|
|
44
|
+
format=LogFormat.MODERN_EMOJI,
|
|
45
|
+
color_scheme=ColorScheme.CYBERPUNK
|
|
46
|
+
) if logging else None
|
|
47
|
+
|
|
48
|
+
if self.logger:
|
|
49
|
+
self.logger.info(f"Initializing GliderAI with model: {model}")
|
|
50
|
+
|
|
51
|
+
self.session = requests.Session()
|
|
52
|
+
self.is_conversation = is_conversation
|
|
53
|
+
self.max_tokens_to_sample = max_tokens
|
|
54
|
+
self.api_endpoint = "https://glider.so/api/chat"
|
|
55
|
+
self.stream_chunk_size = 64
|
|
56
|
+
self.timeout = timeout
|
|
57
|
+
self.last_response = {}
|
|
58
|
+
self.model = model
|
|
59
|
+
self.system_prompt = system_prompt
|
|
60
|
+
self.headers = {
|
|
61
|
+
"accept": "*/*",
|
|
62
|
+
"accept-language": "en-US,en;q=0.9",
|
|
63
|
+
"content-type": "application/json",
|
|
64
|
+
"origin": "https://glider.so",
|
|
65
|
+
"referer": "https://glider.so/",
|
|
66
|
+
"user-agent": Lit().random(),
|
|
67
|
+
}
|
|
68
|
+
self.session.headers.update(self.headers)
|
|
69
|
+
self.session.proxies = proxies
|
|
70
|
+
|
|
71
|
+
self.__available_optimizers = (
|
|
72
|
+
method for method in dir(Optimizers)
|
|
73
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
Conversation.intro = (
|
|
77
|
+
AwesomePrompts().get_act(
|
|
78
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
79
|
+
)
|
|
80
|
+
if act
|
|
81
|
+
else intro or Conversation.intro
|
|
82
|
+
)
|
|
83
|
+
self.conversation = Conversation(
|
|
84
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
85
|
+
)
|
|
86
|
+
self.conversation.history_offset = history_offset
|
|
87
|
+
|
|
88
|
+
if self.logger:
|
|
89
|
+
self.logger.info("GliderAI initialized successfully")
|
|
90
|
+
|
|
91
|
+
def ask(
|
|
92
|
+
self,
|
|
93
|
+
prompt: str,
|
|
94
|
+
stream: bool = False,
|
|
95
|
+
raw: bool = False,
|
|
96
|
+
optimizer: Optional[str] = None,
|
|
97
|
+
conversationally: bool = False,
|
|
98
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
99
|
+
"""Chat with AI with logging capabilities.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
prompt (str): Prompt to be sent.
|
|
103
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
104
|
+
raw (bool, optional): Return raw response chunks instead of dict. Defaults to False.
|
|
105
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
106
|
+
conversationally (bool, optional): Use conversationally modified prompt when optimizer specified. Defaults to False.
|
|
107
|
+
Returns:
|
|
108
|
+
dict or Generator[dict, None, None]: The response from the API.
|
|
109
|
+
"""
|
|
110
|
+
if self.logger:
|
|
111
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
112
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
113
|
+
|
|
114
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
115
|
+
if optimizer:
|
|
116
|
+
if optimizer in self.__available_optimizers:
|
|
117
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
118
|
+
conversation_prompt if conversationally else prompt
|
|
119
|
+
)
|
|
120
|
+
if self.logger:
|
|
121
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
122
|
+
else:
|
|
123
|
+
if self.logger:
|
|
124
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
125
|
+
raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
126
|
+
|
|
127
|
+
payload = {
|
|
128
|
+
"messages": [
|
|
129
|
+
{"role": "user", "content": conversation_prompt},
|
|
130
|
+
{"role": "system", "content": self.system_prompt}
|
|
131
|
+
],
|
|
132
|
+
"model": self.model,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
def for_stream():
|
|
136
|
+
if self.logger:
|
|
137
|
+
self.logger.debug("Initiating streaming request to API")
|
|
138
|
+
response = self.session.post(
|
|
139
|
+
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
140
|
+
)
|
|
141
|
+
if not response.ok:
|
|
142
|
+
if self.logger:
|
|
143
|
+
self.logger.error(
|
|
144
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
145
|
+
)
|
|
146
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
147
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
148
|
+
)
|
|
149
|
+
streaming_text = ""
|
|
150
|
+
for value in response.iter_lines(decode_unicode=True):
|
|
151
|
+
if value:
|
|
152
|
+
if value.startswith("data: "):
|
|
153
|
+
try:
|
|
154
|
+
data = json.loads(value[6:])
|
|
155
|
+
content = data['choices'][0].get('delta', {}).get("content", "")
|
|
156
|
+
if content:
|
|
157
|
+
streaming_text += content
|
|
158
|
+
yield content if raw else {"text": content}
|
|
159
|
+
except json.JSONDecodeError:
|
|
160
|
+
if "stop" in value:
|
|
161
|
+
break
|
|
162
|
+
self.last_response.update(dict(text=streaming_text))
|
|
163
|
+
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
164
|
+
if self.logger:
|
|
165
|
+
self.logger.debug("Response processing completed")
|
|
166
|
+
|
|
167
|
+
def for_non_stream():
|
|
168
|
+
if self.logger:
|
|
169
|
+
self.logger.debug("Processing non-streaming request")
|
|
170
|
+
for _ in for_stream():
|
|
171
|
+
pass
|
|
172
|
+
return self.last_response
|
|
173
|
+
|
|
174
|
+
return for_stream() if stream else for_non_stream()
|
|
175
|
+
|
|
176
|
+
def chat(
|
|
177
|
+
self,
|
|
178
|
+
prompt: str,
|
|
179
|
+
stream: bool = False,
|
|
180
|
+
optimizer: Optional[str] = None,
|
|
181
|
+
conversationally: bool = False,
|
|
182
|
+
) -> str | Generator[str, None, None]:
|
|
183
|
+
"""Generate response as a string with logging.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
prompt (str): Prompt to be sent.
|
|
187
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
188
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
189
|
+
conversationally (bool, optional): Use conversationally modified prompt when optimizer specified. Defaults to False.
|
|
190
|
+
Returns:
|
|
191
|
+
str or Generator[str, None, None]: The response generated.
|
|
192
|
+
"""
|
|
193
|
+
if self.logger:
|
|
194
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
195
|
+
def for_stream():
|
|
196
|
+
for response in self.ask(
|
|
197
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
198
|
+
):
|
|
199
|
+
yield self.get_message(response)
|
|
200
|
+
def for_non_stream():
|
|
201
|
+
return self.get_message(
|
|
202
|
+
self.ask(
|
|
203
|
+
prompt,
|
|
204
|
+
False,
|
|
205
|
+
optimizer=optimizer,
|
|
206
|
+
conversationally=conversationally,
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
return for_stream() if stream else for_non_stream()
|
|
210
|
+
|
|
211
|
+
def get_message(self, response: dict) -> str:
|
|
212
|
+
"""Retrieves message only from response."""
|
|
213
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
214
|
+
return response["text"]
|
|
215
|
+
|
|
216
|
+
if __name__ == "__main__":
|
|
217
|
+
from rich import print
|
|
218
|
+
# For testing with logging enabled
|
|
219
|
+
ai = GliderAI(model="chat-llama-3-1-70b", logging=True)
|
|
220
|
+
response = ai.chat("Meaning of Life", stream=True)
|
|
221
|
+
for chunk in response:
|
|
222
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Groq.py
CHANGED
|
@@ -16,18 +16,22 @@ class GROQ(Provider):
|
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
AVAILABLE_MODELS = [
|
|
19
|
-
"
|
|
20
|
-
"llama-3.1-70b-versatile",
|
|
21
|
-
"llama-3.1-8b-instant",
|
|
22
|
-
"llama3-groq-70b-8192-tool-use-preview",
|
|
23
|
-
"llama3-groq-8b-8192-tool-use-preview",
|
|
24
|
-
"llama-guard-3-8b",
|
|
19
|
+
# "whisper-large-v3",
|
|
25
20
|
"llama3-70b-8192",
|
|
21
|
+
"llama-3.2-3b-preview",
|
|
22
|
+
"gemma2-9b-it",
|
|
23
|
+
"llama-3.2-11b-vision-preview",
|
|
26
24
|
"llama3-8b-8192",
|
|
25
|
+
"llama-3.3-70b-versatile",
|
|
26
|
+
"deepseek-r1-distill-llama-70b",
|
|
27
|
+
# "distil-whisper-large-v3-en",
|
|
27
28
|
"mixtral-8x7b-32768",
|
|
28
|
-
"
|
|
29
|
-
"
|
|
30
|
-
"
|
|
29
|
+
"llama-3.3-70b-specdec",
|
|
30
|
+
"llama-3.2-90b-vision-preview",
|
|
31
|
+
"llama-3.2-1b-preview",
|
|
32
|
+
# "whisper-large-v3-turbo",
|
|
33
|
+
"llama-3.1-8b-instant",
|
|
34
|
+
"llama-guard-3-8b"
|
|
31
35
|
]
|
|
32
36
|
|
|
33
37
|
def __init__(
|
|
@@ -337,18 +341,22 @@ class AsyncGROQ(AsyncProvider):
|
|
|
337
341
|
"""
|
|
338
342
|
|
|
339
343
|
AVAILABLE_MODELS = [
|
|
340
|
-
"
|
|
341
|
-
"llama-3.1-70b-versatile",
|
|
342
|
-
"llama-3.1-8b-instant",
|
|
343
|
-
"llama3-groq-70b-8192-tool-use-preview",
|
|
344
|
-
"llama3-groq-8b-8192-tool-use-preview",
|
|
345
|
-
"llama-guard-3-8b",
|
|
344
|
+
# "whisper-large-v3",
|
|
346
345
|
"llama3-70b-8192",
|
|
346
|
+
"llama-3.2-3b-preview",
|
|
347
|
+
"gemma2-9b-it",
|
|
348
|
+
"llama-3.2-11b-vision-preview",
|
|
347
349
|
"llama3-8b-8192",
|
|
350
|
+
"llama-3.3-70b-versatile",
|
|
351
|
+
"deepseek-r1-distill-llama-70b",
|
|
352
|
+
# "distil-whisper-large-v3-en",
|
|
348
353
|
"mixtral-8x7b-32768",
|
|
349
|
-
"
|
|
350
|
-
"
|
|
351
|
-
"
|
|
354
|
+
"llama-3.3-70b-specdec",
|
|
355
|
+
"llama-3.2-90b-vision-preview",
|
|
356
|
+
"llama-3.2-1b-preview",
|
|
357
|
+
# "whisper-large-v3-turbo",
|
|
358
|
+
"llama-3.1-8b-instant",
|
|
359
|
+
"llama-guard-3-8b"
|
|
352
360
|
]
|
|
353
361
|
|
|
354
362
|
def __init__(
|
|
File without changes
|
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from enum import Enum, auto
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
import uuid
|
|
7
|
+
from typing import List, Dict, Generator, Optional, Any, TypedDict, Literal, Union, Final
|
|
8
|
+
|
|
9
|
+
# Type definitions
|
|
10
|
+
class Role(Enum):
|
|
11
|
+
SYSTEM = "system"
|
|
12
|
+
USER = "user"
|
|
13
|
+
ASSISTANT = "assistant"
|
|
14
|
+
|
|
15
|
+
class Message(TypedDict):
|
|
16
|
+
role: str
|
|
17
|
+
content: str
|
|
18
|
+
|
|
19
|
+
class APIResponse(TypedDict):
|
|
20
|
+
event_id: str
|
|
21
|
+
fn_index: int
|
|
22
|
+
data: List[Any]
|
|
23
|
+
|
|
24
|
+
class StreamData(TypedDict):
|
|
25
|
+
msg: str
|
|
26
|
+
output: Dict[str, Any]
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class APIConfig:
|
|
30
|
+
url: Final[str] = "https://qwen-qwen2-72b-instruct.hf.space"
|
|
31
|
+
api_endpoint: Final[str] = "https://qwen-qwen2-72b-instruct.hf.space/queue/join?"
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class RequestHeaders:
|
|
35
|
+
join: Dict[str, str]
|
|
36
|
+
data: Dict[str, str]
|
|
37
|
+
|
|
38
|
+
@classmethod
|
|
39
|
+
def create_default(cls, base_url: str) -> 'RequestHeaders':
|
|
40
|
+
common_headers = {
|
|
41
|
+
'accept-language': 'en-US,en;q=0.9',
|
|
42
|
+
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return cls(
|
|
46
|
+
join={
|
|
47
|
+
**common_headers,
|
|
48
|
+
'accept': '*/*',
|
|
49
|
+
'content-type': 'application/json',
|
|
50
|
+
'origin': base_url,
|
|
51
|
+
'referer': f'{base_url}/',
|
|
52
|
+
},
|
|
53
|
+
data={
|
|
54
|
+
**common_headers,
|
|
55
|
+
'accept': 'text/event-stream',
|
|
56
|
+
'referer': f'{base_url}/',
|
|
57
|
+
}
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
class QwenAPI:
|
|
61
|
+
def __init__(self, config: APIConfig = APIConfig()):
|
|
62
|
+
self.config = config
|
|
63
|
+
self.headers = RequestHeaders.create_default(config.url)
|
|
64
|
+
|
|
65
|
+
@staticmethod
|
|
66
|
+
def generate_session_hash() -> str:
|
|
67
|
+
"""Generate a unique session hash."""
|
|
68
|
+
return str(uuid.uuid4()).replace('-', '')[:12]
|
|
69
|
+
|
|
70
|
+
@staticmethod
|
|
71
|
+
def format_prompt(messages: List[Message]) -> str:
|
|
72
|
+
"""
|
|
73
|
+
Formats a list of messages into a single prompt string.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
messages: A list of message dictionaries with "role" and "content" keys.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
str: The formatted prompt.
|
|
80
|
+
"""
|
|
81
|
+
return "\n".join(f"{message['role']}: {message['content']}" for message in messages)
|
|
82
|
+
|
|
83
|
+
def create_sync_generator(
|
|
84
|
+
self,
|
|
85
|
+
model: str,
|
|
86
|
+
messages: List[Message],
|
|
87
|
+
proxy: Optional[str] = None,
|
|
88
|
+
**kwargs: Any
|
|
89
|
+
) -> Generator[str, None, None]:
|
|
90
|
+
"""
|
|
91
|
+
Synchronously streams responses from the Qwen_Qwen2_72B_Instruct API.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
model: The model to use for the request.
|
|
95
|
+
messages: A list of message dictionaries with "role" and "content" keys.
|
|
96
|
+
proxy: Optional proxy URL for the request.
|
|
97
|
+
**kwargs: Additional keyword arguments.
|
|
98
|
+
|
|
99
|
+
Yields:
|
|
100
|
+
str: Text chunks from the API response.
|
|
101
|
+
|
|
102
|
+
Raises:
|
|
103
|
+
requests.exceptions.RequestException: If the API request fails.
|
|
104
|
+
json.JSONDecodeError: If the response cannot be parsed as JSON.
|
|
105
|
+
"""
|
|
106
|
+
session_hash: str = self.generate_session_hash()
|
|
107
|
+
|
|
108
|
+
# Prepare the prompt
|
|
109
|
+
system_messages: List[str] = [
|
|
110
|
+
message["content"]
|
|
111
|
+
for message in messages
|
|
112
|
+
if message["role"] == Role.SYSTEM.value
|
|
113
|
+
]
|
|
114
|
+
system_prompt: str = "\n".join(system_messages)
|
|
115
|
+
|
|
116
|
+
user_messages: List[Message] = [
|
|
117
|
+
message
|
|
118
|
+
for message in messages
|
|
119
|
+
if message["role"] != Role.SYSTEM.value
|
|
120
|
+
]
|
|
121
|
+
prompt: str = self.format_prompt(user_messages)
|
|
122
|
+
|
|
123
|
+
payload_join: Dict[str, Any] = {
|
|
124
|
+
"data": [prompt, [], system_prompt],
|
|
125
|
+
"event_data": None,
|
|
126
|
+
"fn_index": 0,
|
|
127
|
+
"trigger_id": 11,
|
|
128
|
+
"session_hash": session_hash
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
with requests.Session() as session:
|
|
132
|
+
# Send join request
|
|
133
|
+
response = session.post(
|
|
134
|
+
self.config.api_endpoint,
|
|
135
|
+
headers=self.headers.join,
|
|
136
|
+
json=payload_join
|
|
137
|
+
)
|
|
138
|
+
response.raise_for_status()
|
|
139
|
+
event_data: APIResponse = response.json()
|
|
140
|
+
|
|
141
|
+
# Prepare data stream request
|
|
142
|
+
url_data: str = f'{self.config.url}/queue/data'
|
|
143
|
+
params_data: Dict[str, str] = {'session_hash': session_hash}
|
|
144
|
+
|
|
145
|
+
# Send data stream request
|
|
146
|
+
full_response: str = ""
|
|
147
|
+
final_full_response: str = ""
|
|
148
|
+
|
|
149
|
+
with session.get(
|
|
150
|
+
url_data,
|
|
151
|
+
headers=self.headers.data,
|
|
152
|
+
params=params_data,
|
|
153
|
+
stream=True
|
|
154
|
+
) as response:
|
|
155
|
+
response.raise_for_status()
|
|
156
|
+
|
|
157
|
+
for line in response.iter_lines():
|
|
158
|
+
if line:
|
|
159
|
+
decoded_line: str = line.decode('utf-8')
|
|
160
|
+
if decoded_line.startswith('data: '):
|
|
161
|
+
try:
|
|
162
|
+
json_data: StreamData = json.loads(decoded_line[6:])
|
|
163
|
+
|
|
164
|
+
if json_data.get('msg') == 'process_generating':
|
|
165
|
+
if 'output' in json_data and 'data' in json_data['output']:
|
|
166
|
+
output_data: List[Any] = json_data['output']['data']
|
|
167
|
+
if len(output_data) > 1 and len(output_data[1]) > 0:
|
|
168
|
+
for item in output_data[1]:
|
|
169
|
+
if isinstance(item, list) and len(item) > 1:
|
|
170
|
+
fragment: str = str(item[1])
|
|
171
|
+
if not re.match(r'^\[.*\]$', fragment) and not full_response.endswith(fragment):
|
|
172
|
+
full_response += fragment
|
|
173
|
+
yield fragment
|
|
174
|
+
|
|
175
|
+
if json_data.get('msg') == 'process_completed':
|
|
176
|
+
if 'output' in json_data and 'data' in json_data['output']:
|
|
177
|
+
output_data = json_data['output']['data']
|
|
178
|
+
if len(output_data) > 1 and len(output_data[1]) > 0:
|
|
179
|
+
final_full_response = output_data[1][0][1]
|
|
180
|
+
|
|
181
|
+
if final_full_response.startswith(full_response):
|
|
182
|
+
final_full_response = final_full_response[len(full_response):]
|
|
183
|
+
|
|
184
|
+
if final_full_response:
|
|
185
|
+
yield final_full_response
|
|
186
|
+
break
|
|
187
|
+
|
|
188
|
+
except json.JSONDecodeError as e:
|
|
189
|
+
print(f"Could not parse JSON: {decoded_line}")
|
|
190
|
+
raise e
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def main() -> None:
|
|
194
|
+
messages: List[Message] = [
|
|
195
|
+
{"role": Role.SYSTEM.value, "content": "You are a helpful assistant."},
|
|
196
|
+
{"role": Role.USER.value, "content": "LOL"}
|
|
197
|
+
]
|
|
198
|
+
|
|
199
|
+
api = QwenAPI()
|
|
200
|
+
for text in api.create_sync_generator("qwen-qwen2-72b-instruct", messages):
|
|
201
|
+
print(text, end="", flush=True)
|
|
202
|
+
print("\n---\n")
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
if __name__ == "__main__":
|
|
206
|
+
main()
|