webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +367 -41
- webscout/Bard.py +2 -22
- webscout/Bing_search.py +1 -2
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/scira_search.py +24 -11
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/Deepinfra.py +75 -57
- webscout/Provider/ExaChat.py +93 -63
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +39 -59
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +31 -30
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +4 -2
- webscout/Provider/OPENAI/autoproxy.py +753 -18
- webscout/Provider/OPENAI/base.py +7 -76
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +96 -132
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +82 -49
- webscout/Provider/OPENAI/textpollinations.py +13 -12
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/typegpt.py +4 -4
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/Qodo.py +454 -0
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +91 -82
- webscout/Provider/TogetherAI.py +32 -48
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/__init__.py +6 -6
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +257 -104
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +43 -48
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +325 -299
- webscout/Provider/yep.py +79 -96
- webscout/__init__.py +7 -2
- webscout/auth/__init__.py +12 -1
- webscout/auth/providers.py +27 -5
- webscout/auth/routes.py +146 -105
- webscout/auth/server.py +367 -312
- webscout/client.py +121 -116
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/version.py +1 -1
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- webscout/auth/static/favicon.svg +0 -11
- webscout/auth/swagger_ui.py +0 -203
- webscout/auth/templates/components/authentication.html +0 -237
- webscout/auth/templates/components/base.html +0 -103
- webscout/auth/templates/components/endpoints.html +0 -750
- webscout/auth/templates/components/examples.html +0 -491
- webscout/auth/templates/components/footer.html +0 -75
- webscout/auth/templates/components/header.html +0 -27
- webscout/auth/templates/components/models.html +0 -286
- webscout/auth/templates/components/navigation.html +0 -70
- webscout/auth/templates/static/api.js +0 -455
- webscout/auth/templates/static/icons.js +0 -168
- webscout/auth/templates/static/main.js +0 -784
- webscout/auth/templates/static/particles.js +0 -201
- webscout/auth/templates/static/styles.css +0 -3353
- webscout/auth/templates/static/ui.js +0 -374
- webscout/auth/templates/swagger_ui.html +0 -170
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
import requests
|
|
4
|
+
from typing import Any, Dict, Optional, Union, Generator
|
|
5
|
+
from webscout.AIutel import sanitize_stream, Optimizers, Conversation, AwesomePrompts
|
|
6
|
+
from webscout.AIbase import Provider
|
|
7
|
+
from webscout import exceptions
|
|
8
|
+
|
|
9
|
+
class MiniMax(Provider):
|
|
10
|
+
"""
|
|
11
|
+
Provider for MiniMax-Reasoning-01 API, following the standard provider interface.
|
|
12
|
+
"""
|
|
13
|
+
AVAILABLE_MODELS = ["MiniMax-Reasoning-01"]
|
|
14
|
+
API_URL = "https://api.minimaxi.chat/v1/text/chatcompletion_v2"
|
|
15
|
+
# TODO: Move API_KEY to env/config for security
|
|
16
|
+
API_KEY = os.environ.get("MINIMAX_API_KEY") or """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJtbyBuaSIsIlVzZXJOYW1lIjoibW8gbmkiLCJBY2NvdW50IjoiIiwiU3ViamVjdElEIjoiMTg3NjIwMDY0ODA2NDYzNTI0MiIsIlBob25lIjoiIiwiR3JvdXBJRCI6IjE4NzYyMDA2NDgwNjA0NDA5MzgiLCJQYWdlTmFtZSI6IiIsIk1haWwiOiJuaW1vQHN1YnN1cC52aXAiLCJDcmVhdGVUaW1lIjoiMjAyNS0wMS0wNyAxMToyNzowNyIsIlRva2VuVHlwZSI6MSwiaXNzIjoibWluaW1heCJ9.Ge1ZnpFPUfXVdMini0P_qXbP_9VYwzXiffG9DsNQck4GtYEOs33LDeAiwrVsrrLZfvJ2icQZ4sRZS54wmPuWua_Dav6pYJty8ZtahmUX1IuhlUX5YErhhCRAIy3J1xB8FkLHLyylChuBHpkNz6O6BQLmPqmoa-cOYK9Qrc6IDeu8SX1iMzO9-MSkcWNvkvpCF2Pf9tekBVWNKMDK6IZoMEPbtkaPXdDyP6l0M0e2AlL_E0oM9exg3V-ohAi8OTPFyqM6dcd4TwF-b9DULxfIsRFw401mvIxcTDWa42u2LULewdATVRD2BthU65tuRqEiWeFWMvFlPj2soMze_QIiUA"""
|
|
17
|
+
MODEL_CONTROL_DEFAULTS = {"tokens_to_generate": 40000, "temperature": 1, "top_p": 0.95}
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 2049,
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
model: str = "MiniMax-Reasoning-01",
|
|
31
|
+
system_prompt: str = "You are a helpful assistant, always respond in english",
|
|
32
|
+
):
|
|
33
|
+
if model not in self.AVAILABLE_MODELS:
|
|
34
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
35
|
+
self.model = model
|
|
36
|
+
self.api_url = self.API_URL
|
|
37
|
+
self.api_key = self.API_KEY
|
|
38
|
+
self.timeout = timeout
|
|
39
|
+
self.is_conversation = is_conversation
|
|
40
|
+
self.max_tokens_to_sample = max_tokens
|
|
41
|
+
self.last_response = {}
|
|
42
|
+
self.system_prompt = system_prompt
|
|
43
|
+
self.proxies = proxies
|
|
44
|
+
self.__available_optimizers = tuple(
|
|
45
|
+
method for method in dir(Optimizers)
|
|
46
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
47
|
+
)
|
|
48
|
+
Conversation.intro = (
|
|
49
|
+
AwesomePrompts().get_act(
|
|
50
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
51
|
+
)
|
|
52
|
+
if act
|
|
53
|
+
else intro or Conversation.intro
|
|
54
|
+
)
|
|
55
|
+
self.conversation = Conversation(
|
|
56
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
57
|
+
)
|
|
58
|
+
self.conversation.history_offset = history_offset
|
|
59
|
+
|
|
60
|
+
@staticmethod
|
|
61
|
+
def _extract_content(chunk: Any) -> Optional[dict]:
|
|
62
|
+
if not isinstance(chunk, dict):
|
|
63
|
+
return None
|
|
64
|
+
choice = chunk.get('choices', [{}])[0]
|
|
65
|
+
delta = choice.get('delta', {})
|
|
66
|
+
content = delta.get('content')
|
|
67
|
+
reasoning = delta.get('reasoning_content')
|
|
68
|
+
result = {}
|
|
69
|
+
if content:
|
|
70
|
+
result['content'] = content
|
|
71
|
+
if reasoning:
|
|
72
|
+
result['reasoning_content'] = reasoning
|
|
73
|
+
return result if result else None
|
|
74
|
+
|
|
75
|
+
def ask(
|
|
76
|
+
self,
|
|
77
|
+
prompt: str,
|
|
78
|
+
stream: bool = True,
|
|
79
|
+
raw: bool = False,
|
|
80
|
+
optimizer: str = None,
|
|
81
|
+
conversationally: bool = False,
|
|
82
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
83
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
84
|
+
if optimizer:
|
|
85
|
+
if optimizer in self.__available_optimizers:
|
|
86
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
87
|
+
conversation_prompt if conversationally else prompt
|
|
88
|
+
)
|
|
89
|
+
else:
|
|
90
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
91
|
+
messages = [
|
|
92
|
+
{'role': 'system', 'content': self.system_prompt},
|
|
93
|
+
{'role': 'user', 'content': conversation_prompt}
|
|
94
|
+
]
|
|
95
|
+
data = {
|
|
96
|
+
'model': self.model,
|
|
97
|
+
'messages': messages,
|
|
98
|
+
'stream': True,
|
|
99
|
+
'max_tokens': self.MODEL_CONTROL_DEFAULTS.get('tokens_to_generate', 512),
|
|
100
|
+
'temperature': self.MODEL_CONTROL_DEFAULTS.get('temperature', 1.0),
|
|
101
|
+
'top_p': self.MODEL_CONTROL_DEFAULTS.get('top_p', 1.0),
|
|
102
|
+
}
|
|
103
|
+
headers = {
|
|
104
|
+
'Content-Type': 'application/json',
|
|
105
|
+
'Authorization': f'Bearer {self.api_key}',
|
|
106
|
+
}
|
|
107
|
+
def for_stream():
|
|
108
|
+
try:
|
|
109
|
+
response = requests.post(
|
|
110
|
+
self.api_url,
|
|
111
|
+
headers=headers,
|
|
112
|
+
data=json.dumps(data),
|
|
113
|
+
stream=True,
|
|
114
|
+
timeout=self.timeout,
|
|
115
|
+
proxies=self.proxies if self.proxies else None
|
|
116
|
+
)
|
|
117
|
+
if not response.ok:
|
|
118
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
119
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
120
|
+
)
|
|
121
|
+
streaming_response = ""
|
|
122
|
+
last_content = ""
|
|
123
|
+
last_reasoning = ""
|
|
124
|
+
in_think = False
|
|
125
|
+
processed_stream = sanitize_stream(
|
|
126
|
+
response.iter_lines(),
|
|
127
|
+
intro_value="data:",
|
|
128
|
+
to_json=True,
|
|
129
|
+
content_extractor=self._extract_content,
|
|
130
|
+
raw=False # always process as dict for logic
|
|
131
|
+
)
|
|
132
|
+
for chunk in processed_stream:
|
|
133
|
+
if not chunk:
|
|
134
|
+
continue
|
|
135
|
+
content = chunk.get('content') if isinstance(chunk, dict) else None
|
|
136
|
+
reasoning = chunk.get('reasoning_content') if isinstance(chunk, dict) else None
|
|
137
|
+
# Handle reasoning_content with <think> tags
|
|
138
|
+
if reasoning and reasoning != last_reasoning:
|
|
139
|
+
if not in_think:
|
|
140
|
+
yield "<think>\n\n"
|
|
141
|
+
in_think = True
|
|
142
|
+
yield reasoning
|
|
143
|
+
last_reasoning = reasoning
|
|
144
|
+
# If we were in <think> and now have new content, close <think>
|
|
145
|
+
if in_think and content and content != last_content:
|
|
146
|
+
yield "</think>\n\n"
|
|
147
|
+
in_think = False
|
|
148
|
+
# Handle normal content
|
|
149
|
+
if content and content != last_content:
|
|
150
|
+
yield content
|
|
151
|
+
streaming_response += content
|
|
152
|
+
last_content = content
|
|
153
|
+
if not raw:
|
|
154
|
+
self.last_response = {"text": streaming_response}
|
|
155
|
+
self.conversation.update_chat_history(prompt, streaming_response)
|
|
156
|
+
except Exception as e:
|
|
157
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
158
|
+
def for_non_stream():
|
|
159
|
+
full_response = ""
|
|
160
|
+
for chunk in for_stream():
|
|
161
|
+
if isinstance(chunk, dict) and "text" in chunk:
|
|
162
|
+
full_response += chunk["text"]
|
|
163
|
+
elif isinstance(chunk, str):
|
|
164
|
+
full_response += chunk
|
|
165
|
+
if not raw:
|
|
166
|
+
self.last_response = {"text": full_response}
|
|
167
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
168
|
+
return {"text": full_response}
|
|
169
|
+
else:
|
|
170
|
+
return full_response
|
|
171
|
+
return for_stream() if stream else for_non_stream()
|
|
172
|
+
|
|
173
|
+
def chat(
|
|
174
|
+
self,
|
|
175
|
+
prompt: str,
|
|
176
|
+
stream: bool = True,
|
|
177
|
+
optimizer: str = None,
|
|
178
|
+
conversationally: bool = False,
|
|
179
|
+
raw: bool = False,
|
|
180
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
181
|
+
def for_stream():
|
|
182
|
+
for response in self.ask(
|
|
183
|
+
prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
184
|
+
):
|
|
185
|
+
if raw:
|
|
186
|
+
yield response
|
|
187
|
+
else:
|
|
188
|
+
yield response
|
|
189
|
+
def for_non_stream():
|
|
190
|
+
result = self.ask(
|
|
191
|
+
prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
192
|
+
)
|
|
193
|
+
if raw:
|
|
194
|
+
return result
|
|
195
|
+
else:
|
|
196
|
+
return self.get_message(result)
|
|
197
|
+
return for_stream() if stream else for_non_stream()
|
|
198
|
+
|
|
199
|
+
def get_message(self, response: dict) -> str:
|
|
200
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
201
|
+
return response.get("text", "")
|
|
202
|
+
|
|
203
|
+
if __name__ == "__main__":
|
|
204
|
+
ai = MiniMax()
|
|
205
|
+
resp = ai.chat("What is the capital of France?", stream=True, raw=False)
|
|
206
|
+
for chunk in resp:
|
|
207
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Nemotron.py
CHANGED
|
@@ -110,7 +110,8 @@ class NEMOTRON(Provider):
|
|
|
110
110
|
def _make_request(
|
|
111
111
|
self,
|
|
112
112
|
message: str,
|
|
113
|
-
stream: bool = False
|
|
113
|
+
stream: bool = False,
|
|
114
|
+
raw: bool = False
|
|
114
115
|
) -> Generator[str, None, None]:
|
|
115
116
|
"""Make request to NEMOTRON API."""
|
|
116
117
|
payload = {
|
|
@@ -131,10 +132,26 @@ class NEMOTRON(Provider):
|
|
|
131
132
|
timeout=self.timeout
|
|
132
133
|
) as response:
|
|
133
134
|
response.raise_for_status()
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
135
|
+
buffer = ""
|
|
136
|
+
chunk_size = 32
|
|
137
|
+
for chunk in response.iter_content(chunk_size=chunk_size):
|
|
138
|
+
if not chunk:
|
|
139
|
+
continue
|
|
140
|
+
text = chunk.decode(errors="ignore")
|
|
141
|
+
buffer += text
|
|
142
|
+
while len(buffer) >= chunk_size:
|
|
143
|
+
out = buffer[:chunk_size]
|
|
144
|
+
buffer = buffer[chunk_size:]
|
|
145
|
+
if out.strip():
|
|
146
|
+
if raw:
|
|
147
|
+
yield out
|
|
148
|
+
else:
|
|
149
|
+
yield out
|
|
150
|
+
if buffer.strip():
|
|
151
|
+
if raw:
|
|
152
|
+
yield buffer
|
|
153
|
+
else:
|
|
154
|
+
yield buffer
|
|
138
155
|
else:
|
|
139
156
|
response = self.session.post(
|
|
140
157
|
self.url,
|
|
@@ -143,7 +160,10 @@ class NEMOTRON(Provider):
|
|
|
143
160
|
timeout=self.timeout
|
|
144
161
|
)
|
|
145
162
|
response.raise_for_status()
|
|
146
|
-
|
|
163
|
+
if raw:
|
|
164
|
+
yield response.text
|
|
165
|
+
else:
|
|
166
|
+
yield response.text
|
|
147
167
|
|
|
148
168
|
except requests.exceptions.RequestException as e:
|
|
149
169
|
raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
|
|
@@ -167,13 +187,20 @@ class NEMOTRON(Provider):
|
|
|
167
187
|
raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
|
|
168
188
|
|
|
169
189
|
def for_stream():
|
|
170
|
-
for text in self._make_request(conversation_prompt, stream=True):
|
|
171
|
-
|
|
190
|
+
for text in self._make_request(conversation_prompt, stream=True, raw=raw):
|
|
191
|
+
if raw:
|
|
192
|
+
yield text
|
|
193
|
+
else:
|
|
194
|
+
yield {"text": text}
|
|
172
195
|
|
|
173
196
|
def for_non_stream():
|
|
174
|
-
response_text = next(self._make_request(conversation_prompt, stream=False))
|
|
175
|
-
|
|
176
|
-
|
|
197
|
+
response_text = next(self._make_request(conversation_prompt, stream=False, raw=raw))
|
|
198
|
+
if raw:
|
|
199
|
+
self.last_response = response_text
|
|
200
|
+
return response_text
|
|
201
|
+
else:
|
|
202
|
+
self.last_response = {"text": response_text}
|
|
203
|
+
return self.last_response
|
|
177
204
|
|
|
178
205
|
return for_stream() if stream else for_non_stream()
|
|
179
206
|
|
|
@@ -214,5 +241,6 @@ class NEMOTRON(Provider):
|
|
|
214
241
|
if __name__ == "__main__":
|
|
215
242
|
# Example usage
|
|
216
243
|
nemotron = NEMOTRON()
|
|
217
|
-
response = nemotron.chat("
|
|
218
|
-
|
|
244
|
+
response = nemotron.chat("write me about humans in points", stream=True)
|
|
245
|
+
for part in response:
|
|
246
|
+
print(part, end="", flush=True)
|
webscout/Provider/Netwrck.py
CHANGED
|
@@ -14,23 +14,20 @@ class Netwrck(Provider):
|
|
|
14
14
|
greeting = """Hello! I'm a helpful assistant. How can I help you today?"""
|
|
15
15
|
|
|
16
16
|
AVAILABLE_MODELS = [
|
|
17
|
-
"
|
|
18
|
-
"x-ai/grok-2",
|
|
19
|
-
"anthropic/claude-3-7-sonnet-20250219",
|
|
17
|
+
"thedrummer/valkyrie-49b-v1",
|
|
20
18
|
"sao10k/l3-euryale-70b",
|
|
19
|
+
"deepseek/deepseek-chat",
|
|
20
|
+
"deepseek/deepseek-r1",
|
|
21
|
+
"anthropic/claude-sonnet-4-20250514",
|
|
21
22
|
"openai/gpt-4.1-mini",
|
|
22
23
|
"gryphe/mythomax-l2-13b",
|
|
23
|
-
"google/gemini-pro-1.5",
|
|
24
24
|
"google/gemini-2.5-flash-preview-04-17",
|
|
25
25
|
"nvidia/llama-3.1-nemotron-70b-instruct",
|
|
26
|
-
"deepseek/deepseek-r1",
|
|
27
|
-
"deepseek/deepseek-chat"
|
|
28
|
-
|
|
29
26
|
]
|
|
30
27
|
|
|
31
28
|
def __init__(
|
|
32
29
|
self,
|
|
33
|
-
model: str = "anthropic/claude-
|
|
30
|
+
model: str = "anthropic/claude-sonnet-4-20250514",
|
|
34
31
|
is_conversation: bool = True,
|
|
35
32
|
max_tokens: int = 4096, # Note: max_tokens is not used by this API
|
|
36
33
|
timeout: int = 30,
|
|
@@ -127,75 +124,58 @@ class Netwrck(Provider):
|
|
|
127
124
|
|
|
128
125
|
def for_stream():
|
|
129
126
|
try:
|
|
130
|
-
# Use curl_cffi session post with impersonate
|
|
131
127
|
response = self.session.post(
|
|
132
128
|
"https://netwrck.com/api/chatpred_or",
|
|
133
129
|
json=payload,
|
|
134
|
-
# headers are set on the session
|
|
135
|
-
# proxies are set on the session
|
|
136
130
|
timeout=self.timeout,
|
|
137
131
|
stream=True,
|
|
138
|
-
impersonate="chrome110"
|
|
132
|
+
impersonate="chrome110"
|
|
139
133
|
)
|
|
140
|
-
response.raise_for_status()
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
134
|
+
response.raise_for_status()
|
|
135
|
+
buffer = ""
|
|
136
|
+
chunk_size = 32
|
|
137
|
+
for chunk in response.iter_content(chunk_size=chunk_size):
|
|
138
|
+
if not chunk:
|
|
139
|
+
continue
|
|
140
|
+
text = chunk.decode(errors="ignore")
|
|
141
|
+
buffer += text
|
|
142
|
+
while len(buffer) >= chunk_size:
|
|
143
|
+
out = buffer[:chunk_size]
|
|
144
|
+
buffer = buffer[chunk_size:]
|
|
145
|
+
if out.strip():
|
|
146
|
+
if raw:
|
|
147
|
+
yield out
|
|
148
|
+
else:
|
|
149
|
+
yield {"text": out}
|
|
150
|
+
if buffer.strip():
|
|
151
|
+
if raw:
|
|
152
|
+
yield buffer
|
|
153
|
+
else:
|
|
154
|
+
yield {"text": buffer}
|
|
155
|
+
self.last_response = {"text": buffer}
|
|
156
|
+
self.conversation.update_chat_history(payload["query"], buffer)
|
|
157
|
+
except CurlError as e:
|
|
160
158
|
raise exceptions.ProviderConnectionError(f"Network error (CurlError): {str(e)}") from e
|
|
161
|
-
except Exception as e:
|
|
159
|
+
except Exception as e:
|
|
162
160
|
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
163
161
|
raise exceptions.ProviderConnectionError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
164
162
|
|
|
165
163
|
def for_non_stream():
|
|
166
164
|
try:
|
|
167
|
-
# Use curl_cffi session post with impersonate
|
|
168
165
|
response = self.session.post(
|
|
169
166
|
"https://netwrck.com/api/chatpred_or",
|
|
170
167
|
json=payload,
|
|
171
|
-
# headers are set on the session
|
|
172
|
-
# proxies are set on the session
|
|
173
168
|
timeout=self.timeout,
|
|
174
|
-
impersonate="chrome110"
|
|
169
|
+
impersonate="chrome110"
|
|
175
170
|
)
|
|
176
|
-
response.raise_for_status()
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
data=response_text_raw,
|
|
183
|
-
intro_value=None,
|
|
184
|
-
to_json=False,
|
|
185
|
-
content_extractor=self._netwrck_extractor
|
|
186
|
-
)
|
|
187
|
-
# Aggregate the single result
|
|
188
|
-
text = "".join(list(processed_stream))
|
|
189
|
-
|
|
190
|
-
self.last_response = {"text": text} # Store processed text
|
|
191
|
-
self.conversation.update_chat_history(prompt, text)
|
|
192
|
-
|
|
193
|
-
# Return dict or raw string
|
|
194
|
-
return text if raw else self.last_response
|
|
195
|
-
|
|
196
|
-
except CurlError as e: # Catch CurlError
|
|
171
|
+
response.raise_for_status()
|
|
172
|
+
response_text_raw = response.text
|
|
173
|
+
self.last_response = {"text": response_text_raw}
|
|
174
|
+
self.conversation.update_chat_history(prompt, response_text_raw)
|
|
175
|
+
return response_text_raw if raw else self.last_response
|
|
176
|
+
except CurlError as e:
|
|
197
177
|
raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
|
|
198
|
-
except Exception as e:
|
|
178
|
+
except Exception as e:
|
|
199
179
|
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
200
180
|
raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
201
181
|
|
webscout/Provider/OLLAMA.py
CHANGED
|
@@ -1,14 +1,13 @@
|
|
|
1
1
|
from webscout.AIutel import Optimizers
|
|
2
2
|
from webscout.AIutel import Conversation
|
|
3
|
-
from webscout.AIutel import AwesomePrompts
|
|
4
|
-
from webscout.AIbase import Provider
|
|
5
|
-
from
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
from ollama import AsyncClient, Client, ResponseError
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
from pathlib import Path
|
|
3
|
+
from webscout.AIutel import AwesomePrompts
|
|
4
|
+
from webscout.AIbase import Provider
|
|
5
|
+
from typing import AsyncGenerator, Dict, List, Optional, Union
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
from ollama import AsyncClient, Client, ResponseError
|
|
9
|
+
except ImportError as e:
|
|
10
|
+
pass
|
|
12
11
|
|
|
13
12
|
class OLLAMA(Provider):
|
|
14
13
|
def __init__(
|