webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +367 -41
- webscout/Bard.py +2 -22
- webscout/Bing_search.py +1 -2
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/scira_search.py +24 -11
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/Deepinfra.py +75 -57
- webscout/Provider/ExaChat.py +93 -63
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +39 -59
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +31 -30
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +4 -2
- webscout/Provider/OPENAI/autoproxy.py +753 -18
- webscout/Provider/OPENAI/base.py +7 -76
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +96 -132
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +82 -49
- webscout/Provider/OPENAI/textpollinations.py +13 -12
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/typegpt.py +4 -4
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/Qodo.py +454 -0
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +91 -82
- webscout/Provider/TogetherAI.py +32 -48
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/__init__.py +6 -6
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +257 -104
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +43 -48
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +325 -299
- webscout/Provider/yep.py +79 -96
- webscout/__init__.py +7 -2
- webscout/auth/__init__.py +12 -1
- webscout/auth/providers.py +27 -5
- webscout/auth/routes.py +146 -105
- webscout/auth/server.py +367 -312
- webscout/client.py +121 -116
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/version.py +1 -1
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- webscout/auth/static/favicon.svg +0 -11
- webscout/auth/swagger_ui.py +0 -203
- webscout/auth/templates/components/authentication.html +0 -237
- webscout/auth/templates/components/base.html +0 -103
- webscout/auth/templates/components/endpoints.html +0 -750
- webscout/auth/templates/components/examples.html +0 -491
- webscout/auth/templates/components/footer.html +0 -75
- webscout/auth/templates/components/header.html +0 -27
- webscout/auth/templates/components/models.html +0 -286
- webscout/auth/templates/components/navigation.html +0 -70
- webscout/auth/templates/static/api.js +0 -455
- webscout/auth/templates/static/icons.js +0 -168
- webscout/auth/templates/static/main.js +0 -784
- webscout/auth/templates/static/particles.js +0 -201
- webscout/auth/templates/static/styles.css +0 -3353
- webscout/auth/templates/static/ui.js +0 -374
- webscout/auth/templates/swagger_ui.html +0 -170
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,454 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
4
|
+
import uuid
|
|
5
|
+
import json
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
|
+
|
|
14
|
+
class QodoAI(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the Qodo AI API.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
AVAILABLE_MODELS = [
|
|
20
|
+
"gpt-4.1",
|
|
21
|
+
"gpt-4o",
|
|
22
|
+
"o3",
|
|
23
|
+
"o4-mini",
|
|
24
|
+
"claude-4-sonnet",
|
|
25
|
+
"gemini-2.5-pro",
|
|
26
|
+
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
@staticmethod
|
|
30
|
+
def _qodo_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
31
|
+
"""Extracts content from Qodo stream JSON objects."""
|
|
32
|
+
if isinstance(chunk, dict):
|
|
33
|
+
data = chunk.get("data", {})
|
|
34
|
+
if isinstance(data, dict):
|
|
35
|
+
tool_args = data.get("tool_args", {})
|
|
36
|
+
if isinstance(tool_args, dict):
|
|
37
|
+
return tool_args.get("content")
|
|
38
|
+
return None
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
api_key: str = None,
|
|
43
|
+
is_conversation: bool = True,
|
|
44
|
+
max_tokens: int = 2049,
|
|
45
|
+
timeout: int = 30,
|
|
46
|
+
intro: str = None,
|
|
47
|
+
filepath: str = None,
|
|
48
|
+
update_file: bool = True,
|
|
49
|
+
proxies: dict = {},
|
|
50
|
+
history_offset: int = 10250,
|
|
51
|
+
act: str = None,
|
|
52
|
+
model: str = "claude-4-sonnet",
|
|
53
|
+
browser: str = "chrome"
|
|
54
|
+
):
|
|
55
|
+
"""Initializes the Qodo AI API client."""
|
|
56
|
+
if model not in self.AVAILABLE_MODELS:
|
|
57
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
58
|
+
|
|
59
|
+
self.url = "https://api.cli.qodo.ai/v2/agentic/start-task"
|
|
60
|
+
self.info_url = "https://api.cli.qodo.ai/v2/info/get-things"
|
|
61
|
+
|
|
62
|
+
# Initialize LitAgent for user agent generation
|
|
63
|
+
self.agent = LitAgent()
|
|
64
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
65
|
+
|
|
66
|
+
# Store API key
|
|
67
|
+
self.api_key = api_key or "sk-dS7U-extxMWUxc8SbYYOuncqGUIE8-y2OY8oMCpu0eI-qnSUyH9CYWO_eAMpqwfMo7pXU3QNrclfZYMO0M6BJTM"
|
|
68
|
+
|
|
69
|
+
# Generate session ID dynamically from API
|
|
70
|
+
self.session_id = self._get_session_id()
|
|
71
|
+
self.request_id = str(uuid.uuid4())
|
|
72
|
+
|
|
73
|
+
# Use the fingerprint for headers
|
|
74
|
+
self.headers = {
|
|
75
|
+
"Accept": "text/plain",
|
|
76
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
77
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
78
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
79
|
+
"Connection": "close",
|
|
80
|
+
"Content-Type": "application/json",
|
|
81
|
+
"host": "api.cli.qodo.ai",
|
|
82
|
+
"Request-id": self.request_id,
|
|
83
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
# Initialize curl_cffi Session
|
|
87
|
+
self.session = Session()
|
|
88
|
+
# Add Session-id to headers after getting it from API
|
|
89
|
+
self.headers["Session-id"] = self.session_id
|
|
90
|
+
self.session.headers.update(self.headers)
|
|
91
|
+
self.session.proxies.update(proxies)
|
|
92
|
+
|
|
93
|
+
self.is_conversation = is_conversation
|
|
94
|
+
self.max_tokens_to_sample = max_tokens
|
|
95
|
+
self.timeout = timeout
|
|
96
|
+
self.last_response = {}
|
|
97
|
+
self.model = model
|
|
98
|
+
|
|
99
|
+
self.__available_optimizers = (
|
|
100
|
+
method
|
|
101
|
+
for method in dir(Optimizers)
|
|
102
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
103
|
+
)
|
|
104
|
+
Conversation.intro = (
|
|
105
|
+
AwesomePrompts().get_act(
|
|
106
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
107
|
+
)
|
|
108
|
+
if act
|
|
109
|
+
else intro or Conversation.intro
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
self.conversation = Conversation(
|
|
113
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
114
|
+
)
|
|
115
|
+
self.conversation.history_offset = history_offset
|
|
116
|
+
|
|
117
|
+
def refresh_identity(self, browser: str = None):
|
|
118
|
+
"""
|
|
119
|
+
Refreshes the browser identity fingerprint.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
browser: Specific browser to use for the new fingerprint
|
|
123
|
+
"""
|
|
124
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
125
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
126
|
+
|
|
127
|
+
# Update headers with new fingerprint
|
|
128
|
+
self.headers.update({
|
|
129
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
130
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
131
|
+
})
|
|
132
|
+
|
|
133
|
+
# Update session headers
|
|
134
|
+
for header, value in self.headers.items():
|
|
135
|
+
self.session.headers[header] = value
|
|
136
|
+
|
|
137
|
+
return self.fingerprint
|
|
138
|
+
|
|
139
|
+
def _build_payload(self, prompt: str):
|
|
140
|
+
"""Build the payload for Qodo AI API."""
|
|
141
|
+
return {
|
|
142
|
+
"agent_type": "cli",
|
|
143
|
+
"session_id": self.session_id,
|
|
144
|
+
"user_data": {
|
|
145
|
+
"extension_version": "0.7.2",
|
|
146
|
+
"os_platform": "win32",
|
|
147
|
+
"os_version": "v23.9.0",
|
|
148
|
+
"editor_type": "cli"
|
|
149
|
+
},
|
|
150
|
+
"tools": {
|
|
151
|
+
"web_search": [
|
|
152
|
+
{
|
|
153
|
+
"name": "web_search",
|
|
154
|
+
"description": "Searches the web and returns results based on the user's query (Powered by Nimble).",
|
|
155
|
+
"inputSchema": {
|
|
156
|
+
"type": "object",
|
|
157
|
+
"properties": {
|
|
158
|
+
"llm_description": {
|
|
159
|
+
"default": "Searches the web and returns results based on the user's query.",
|
|
160
|
+
"title": "Llm Description",
|
|
161
|
+
"type": "string"
|
|
162
|
+
},
|
|
163
|
+
"query": {
|
|
164
|
+
"description": "The search query to execute",
|
|
165
|
+
"title": "Query",
|
|
166
|
+
"type": "string"
|
|
167
|
+
}
|
|
168
|
+
},
|
|
169
|
+
"required": ["query"],
|
|
170
|
+
"title": "NimbleWebSearch"
|
|
171
|
+
},
|
|
172
|
+
"be_tool": True,
|
|
173
|
+
"autoApproved": True
|
|
174
|
+
},
|
|
175
|
+
{
|
|
176
|
+
"name": "web_fetch",
|
|
177
|
+
"description": "Fetches content from a given URL (Powered by Nimble).",
|
|
178
|
+
"inputSchema": {
|
|
179
|
+
"type": "object",
|
|
180
|
+
"properties": {
|
|
181
|
+
"llm_description": {
|
|
182
|
+
"default": "Fetches content from a given URL.",
|
|
183
|
+
"title": "Llm Description",
|
|
184
|
+
"type": "string"
|
|
185
|
+
},
|
|
186
|
+
"url": {
|
|
187
|
+
"description": "The URL to fetch content from",
|
|
188
|
+
"title": "Url",
|
|
189
|
+
"type": "string"
|
|
190
|
+
}
|
|
191
|
+
},
|
|
192
|
+
"required": ["url"],
|
|
193
|
+
"title": "NimbleWebFetch"
|
|
194
|
+
},
|
|
195
|
+
"be_tool": True,
|
|
196
|
+
"autoApproved": True
|
|
197
|
+
}
|
|
198
|
+
]
|
|
199
|
+
},
|
|
200
|
+
# "projects_root_path": ["C:\\Users\\koula"],
|
|
201
|
+
# "cwd": "C:\\Users\\koula",
|
|
202
|
+
"user_request": prompt,
|
|
203
|
+
"execution_strategy": "act",
|
|
204
|
+
"custom_model": self.model,
|
|
205
|
+
"stream": True
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
def ask(
|
|
209
|
+
self,
|
|
210
|
+
prompt: str,
|
|
211
|
+
stream: bool = False,
|
|
212
|
+
raw: bool = False,
|
|
213
|
+
optimizer: str = None,
|
|
214
|
+
conversationally: bool = False,
|
|
215
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
216
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
217
|
+
if optimizer:
|
|
218
|
+
if optimizer in self.__available_optimizers:
|
|
219
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
220
|
+
conversation_prompt if conversationally else prompt
|
|
221
|
+
)
|
|
222
|
+
else:
|
|
223
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
224
|
+
|
|
225
|
+
payload = self._build_payload(conversation_prompt)
|
|
226
|
+
payload["stream"] = stream
|
|
227
|
+
|
|
228
|
+
def for_stream():
|
|
229
|
+
try:
|
|
230
|
+
response = self.session.post(
|
|
231
|
+
self.url,
|
|
232
|
+
json=payload,
|
|
233
|
+
stream=True,
|
|
234
|
+
timeout=self.timeout,
|
|
235
|
+
impersonate=self.fingerprint.get("browser_type", "chrome110")
|
|
236
|
+
)
|
|
237
|
+
if response.status_code == 401:
|
|
238
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
239
|
+
"Invalid API key. You need to provide your own API key.\n"
|
|
240
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
241
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
242
|
+
)
|
|
243
|
+
elif response.status_code != 200:
|
|
244
|
+
raise exceptions.FailedToGenerateResponseError(f"HTTP {response.status_code}: {response.text}")
|
|
245
|
+
|
|
246
|
+
streaming_text = ""
|
|
247
|
+
processed_stream = sanitize_stream(
|
|
248
|
+
data=response.iter_content(chunk_size=None),
|
|
249
|
+
intro_value="",
|
|
250
|
+
to_json=True,
|
|
251
|
+
skip_markers=["[DONE]"],
|
|
252
|
+
content_extractor=self._qodo_extractor,
|
|
253
|
+
yield_raw_on_error=True,
|
|
254
|
+
raw=raw
|
|
255
|
+
)
|
|
256
|
+
for content_chunk in processed_stream:
|
|
257
|
+
if content_chunk:
|
|
258
|
+
yield content_chunk if raw else {"text": content_chunk}
|
|
259
|
+
if not raw:
|
|
260
|
+
streaming_text += content_chunk
|
|
261
|
+
|
|
262
|
+
self.last_response = {"text": streaming_text}
|
|
263
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
264
|
+
except CurlError as e:
|
|
265
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
266
|
+
except Exception as e:
|
|
267
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
268
|
+
|
|
269
|
+
def for_non_stream():
|
|
270
|
+
try:
|
|
271
|
+
payload["stream"] = False
|
|
272
|
+
response = self.session.post(
|
|
273
|
+
self.url,
|
|
274
|
+
json=payload,
|
|
275
|
+
timeout=self.timeout,
|
|
276
|
+
impersonate=self.fingerprint.get("browser_type", "chrome110")
|
|
277
|
+
)
|
|
278
|
+
if response.status_code == 401:
|
|
279
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
280
|
+
"Invalid API key. You need to provide your own API key.\n"
|
|
281
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
282
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
283
|
+
)
|
|
284
|
+
elif response.status_code != 200:
|
|
285
|
+
raise exceptions.FailedToGenerateResponseError(f"HTTP {response.status_code}: {response.text}")
|
|
286
|
+
|
|
287
|
+
response_text = response.text
|
|
288
|
+
processed_stream = sanitize_stream(
|
|
289
|
+
data=response_text.splitlines(),
|
|
290
|
+
intro_value=None,
|
|
291
|
+
to_json=True,
|
|
292
|
+
content_extractor=self._qodo_extractor,
|
|
293
|
+
yield_raw_on_error=True,
|
|
294
|
+
raw=raw
|
|
295
|
+
)
|
|
296
|
+
full_response = ""
|
|
297
|
+
for content in processed_stream:
|
|
298
|
+
if content:
|
|
299
|
+
full_response += content
|
|
300
|
+
|
|
301
|
+
self.last_response = {"text": full_response}
|
|
302
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
303
|
+
return {"text": full_response} if not raw else full_response
|
|
304
|
+
except CurlError as e:
|
|
305
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
306
|
+
except Exception as e:
|
|
307
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
|
|
308
|
+
|
|
309
|
+
return for_stream() if stream else for_non_stream()
|
|
310
|
+
|
|
311
|
+
def chat(
|
|
312
|
+
self,
|
|
313
|
+
prompt: str,
|
|
314
|
+
stream: bool = False,
|
|
315
|
+
optimizer: str = None,
|
|
316
|
+
conversationally: bool = False,
|
|
317
|
+
raw: bool = False,
|
|
318
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
319
|
+
def for_stream():
|
|
320
|
+
for response in self.ask(
|
|
321
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
322
|
+
):
|
|
323
|
+
if raw:
|
|
324
|
+
yield response
|
|
325
|
+
else:
|
|
326
|
+
yield response.get("text", "")
|
|
327
|
+
|
|
328
|
+
def for_non_stream():
|
|
329
|
+
result = self.ask(
|
|
330
|
+
prompt, False, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
331
|
+
)
|
|
332
|
+
if raw:
|
|
333
|
+
return result
|
|
334
|
+
else:
|
|
335
|
+
return self.get_message(result)
|
|
336
|
+
|
|
337
|
+
return for_stream() if stream else for_non_stream()
|
|
338
|
+
|
|
339
|
+
def get_message(self, response: dict) -> str:
|
|
340
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
341
|
+
text = response.get("text", "")
|
|
342
|
+
return text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
343
|
+
|
|
344
|
+
def _get_session_id(self) -> str:
|
|
345
|
+
"""Get session ID from Qodo API."""
|
|
346
|
+
try:
|
|
347
|
+
# Create temporary session for the info request
|
|
348
|
+
temp_session = Session()
|
|
349
|
+
temp_headers = {
|
|
350
|
+
"Accept": "text/plain",
|
|
351
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
352
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
353
|
+
"Connection": "close",
|
|
354
|
+
"Content-Type": "application/json",
|
|
355
|
+
"host": "api.cli.qodo.ai",
|
|
356
|
+
"Request-id": str(uuid.uuid4()),
|
|
357
|
+
"User-Agent": self.fingerprint["user_agent"] if hasattr(self, 'fingerprint') else "axios/1.10.0",
|
|
358
|
+
}
|
|
359
|
+
temp_session.headers.update(temp_headers)
|
|
360
|
+
|
|
361
|
+
response = temp_session.get(
|
|
362
|
+
self.info_url,
|
|
363
|
+
timeout=self.timeout if hasattr(self, 'timeout') else 30,
|
|
364
|
+
impersonate="chrome110"
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
if response.status_code == 200:
|
|
368
|
+
data = response.json()
|
|
369
|
+
session_id = data.get("session-id")
|
|
370
|
+
if session_id:
|
|
371
|
+
return session_id
|
|
372
|
+
elif response.status_code == 401:
|
|
373
|
+
# API key is invalid
|
|
374
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
375
|
+
"Invalid API key. You need to provide your own API key.\n"
|
|
376
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
377
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
378
|
+
)
|
|
379
|
+
else:
|
|
380
|
+
# Other HTTP errors
|
|
381
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
382
|
+
f"Failed to authenticate with Qodo API (HTTP {response.status_code}). "
|
|
383
|
+
"You may need to provide your own API key.\n"
|
|
384
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
385
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
# Fallback to generated session ID if API call fails
|
|
389
|
+
return f"20250630-{str(uuid.uuid4())}"
|
|
390
|
+
|
|
391
|
+
except exceptions.FailedToGenerateResponseError:
|
|
392
|
+
# Re-raise our custom exceptions
|
|
393
|
+
raise
|
|
394
|
+
except Exception as e:
|
|
395
|
+
# For other errors, show the API key message
|
|
396
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
397
|
+
f"Failed to connect to Qodo API: {e}\n"
|
|
398
|
+
"You may need to provide your own API key.\n"
|
|
399
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
400
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
def refresh_session(self):
|
|
404
|
+
"""
|
|
405
|
+
Refreshes the session ID by calling the Qodo API.
|
|
406
|
+
|
|
407
|
+
Returns:
|
|
408
|
+
str: The new session ID
|
|
409
|
+
"""
|
|
410
|
+
old_session_id = self.session_id
|
|
411
|
+
self.session_id = self._get_session_id()
|
|
412
|
+
|
|
413
|
+
# Update headers with new session ID
|
|
414
|
+
self.headers["Session-id"] = self.session_id
|
|
415
|
+
self.session.headers["Session-id"] = self.session_id
|
|
416
|
+
|
|
417
|
+
return self.session_id
|
|
418
|
+
|
|
419
|
+
def get_available_models(self) -> Dict[str, Any]:
|
|
420
|
+
"""
|
|
421
|
+
Get available models and info from Qodo API.
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
Dict containing models, default_model, version, and session info
|
|
425
|
+
"""
|
|
426
|
+
try:
|
|
427
|
+
response = self.session.get(
|
|
428
|
+
self.info_url,
|
|
429
|
+
timeout=self.timeout,
|
|
430
|
+
impersonate=self.fingerprint.get("browser_type", "chrome110")
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
if response.status_code == 200:
|
|
434
|
+
return response.json()
|
|
435
|
+
elif response.status_code == 401:
|
|
436
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
437
|
+
"Invalid API key. You need to provide your own API key.\n"
|
|
438
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
439
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
440
|
+
)
|
|
441
|
+
else:
|
|
442
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get models: HTTP {response.status_code}")
|
|
443
|
+
|
|
444
|
+
except CurlError as e:
|
|
445
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
446
|
+
except Exception as e:
|
|
447
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get models ({type(e).__name__}): {e}")
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
if __name__ == "__main__":
|
|
451
|
+
ai = QodoAI() # u will need to give your API key here to get api install qodo cli via https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart
|
|
452
|
+
response = ai.chat("write a poem about india", raw=False, stream=True)
|
|
453
|
+
for chunk in response:
|
|
454
|
+
print(chunk, end='', flush=True)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from curl_cffi.requests import Session
|
|
2
2
|
import uuid
|
|
3
3
|
import re
|
|
4
|
-
from typing import Any, Dict, Optional, Union
|
|
4
|
+
from typing import Any, Dict, Generator, Optional, Union
|
|
5
5
|
from webscout.AIutel import Optimizers
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
@@ -166,7 +166,8 @@ class StandardInputAI(Provider):
|
|
|
166
166
|
prompt: str,
|
|
167
167
|
optimizer: str = None,
|
|
168
168
|
conversationally: bool = False,
|
|
169
|
-
|
|
169
|
+
raw: bool = False, # Added raw parameter
|
|
170
|
+
) -> Union[Dict[str, Any], Generator[str, None, None]]:
|
|
170
171
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
171
172
|
if optimizer:
|
|
172
173
|
if optimizer in self.__available_optimizers:
|
|
@@ -233,9 +234,12 @@ class StandardInputAI(Provider):
|
|
|
233
234
|
for content_chunk in processed_stream:
|
|
234
235
|
if content_chunk and isinstance(content_chunk, str):
|
|
235
236
|
full_response += content_chunk
|
|
236
|
-
|
|
237
|
+
if raw:
|
|
238
|
+
yield content_chunk
|
|
237
239
|
self.last_response = {"text": full_response}
|
|
238
240
|
self.conversation.update_chat_history(prompt, full_response)
|
|
241
|
+
if raw:
|
|
242
|
+
return full_response
|
|
239
243
|
return {"text": full_response}
|
|
240
244
|
except Exception as e:
|
|
241
245
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
@@ -245,17 +249,32 @@ class StandardInputAI(Provider):
|
|
|
245
249
|
prompt: str,
|
|
246
250
|
optimizer: str = None,
|
|
247
251
|
conversationally: bool = False,
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
+
raw: bool = False, # Added raw parameter
|
|
253
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
254
|
+
def for_stream():
|
|
255
|
+
gen = self.ask(
|
|
256
|
+
prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
|
|
252
257
|
)
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
258
|
+
if hasattr(gen, '__iter__') and not isinstance(gen, dict):
|
|
259
|
+
for chunk in gen:
|
|
260
|
+
if raw:
|
|
261
|
+
yield chunk
|
|
262
|
+
else:
|
|
263
|
+
yield self.get_message({"text": chunk})
|
|
264
|
+
else:
|
|
265
|
+
if raw:
|
|
266
|
+
yield gen if isinstance(gen, str) else self.get_message(gen)
|
|
267
|
+
else:
|
|
268
|
+
yield self.get_message(gen)
|
|
269
|
+
def for_non_stream():
|
|
270
|
+
result = self.ask(
|
|
271
|
+
prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
|
|
272
|
+
)
|
|
273
|
+
if raw:
|
|
274
|
+
return result if isinstance(result, str) else self.get_message(result)
|
|
275
|
+
else:
|
|
276
|
+
return self.get_message(result)
|
|
277
|
+
return for_stream() if raw else for_non_stream()
|
|
259
278
|
|
|
260
279
|
if __name__ == "__main__":
|
|
261
280
|
print("-" * 100)
|
webscout/Provider/TTI/README.md
CHANGED
|
@@ -14,14 +14,14 @@ These providers allow you to easily generate AI‑created art from text prompts
|
|
|
14
14
|
|
|
15
15
|
## 📦 Supported Providers
|
|
16
16
|
|
|
17
|
-
| Provider
|
|
18
|
-
|
|
19
|
-
| `AIArta`
|
|
20
|
-
| `
|
|
21
|
-
| `MagicStudioAI`
|
|
22
|
-
| `PixelMuse`
|
|
23
|
-
| `PiclumenAI`
|
|
24
|
-
| `PollinationsAI
|
|
17
|
+
| Provider | Available Models (examples) |
|
|
18
|
+
| ---------------- | ----------------------------------------- |
|
|
19
|
+
| `AIArta` | `flux`, `medieval`, `dreamshaper_xl`, ... |
|
|
20
|
+
| `InfipAI` | `img3`, `img4`, `uncen` |
|
|
21
|
+
| `MagicStudioAI` | `magicstudio` |
|
|
22
|
+
| `PixelMuse` | `flux-schnell`, `imagen-3`, `recraft-v3` |
|
|
23
|
+
| `PiclumenAI` | `piclumen-v1` |
|
|
24
|
+
| `PollinationsAI` | `flux`, `turbo`, `gptimage` |
|
|
25
25
|
|
|
26
26
|
> **Note**: Some providers require the `Pillow` package for image processing.
|
|
27
27
|
|
|
@@ -71,7 +71,7 @@ response = client.images.create(
|
|
|
71
71
|
## 🔧 Provider Specifics
|
|
72
72
|
|
|
73
73
|
- **AIArta** – Uses Firebase authentication tokens and supports many tattoo‑style models.
|
|
74
|
-
- **
|
|
74
|
+
- **InfipAI** – Offers various models for different image styles.
|
|
75
75
|
- **MagicStudioAI** – Generates images through MagicStudio's public endpoint.
|
|
76
76
|
- **PixelMuse** – Supports several models and converts images from WebP.
|
|
77
77
|
- **PiclumenAI** – Returns JPEG images directly from the API.
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
from .pollinations import *
|
|
2
2
|
from .piclumen import *
|
|
3
3
|
from .magicstudio import *
|
|
4
|
-
from .fastflux import *
|
|
5
4
|
from .pixelmuse import *
|
|
6
5
|
from .aiarta import *
|
|
7
6
|
from .gpt1image import *
|
|
8
7
|
from .imagen import *
|
|
9
8
|
from .together import *
|
|
10
9
|
from .bing import *
|
|
10
|
+
from .infip import *
|
|
11
|
+
from .monochat import *
|