webscout 8.3.3__py3-none-any.whl → 8.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +53 -800
- webscout/Bard.py +2 -22
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +26 -11
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +81 -57
- webscout/Provider/ExaChat.py +9 -5
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/Netwrck.py +5 -8
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/README.md +1 -1
- webscout/Provider/OPENAI/TogetherAI.py +57 -48
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +1 -3
- webscout/Provider/OPENAI/autoproxy.py +1 -1
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +60 -24
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/monochat.py +3 -3
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +86 -49
- webscout/Provider/OPENAI/textpollinations.py +19 -14
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +478 -0
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/monochat.py +3 -3
- webscout/Provider/TTI/together.py +7 -6
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TextPollinationsAI.py +19 -14
- webscout/Provider/TogetherAI.py +57 -44
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +4 -10
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/scira_chat.py +115 -21
- webscout/Provider/toolbaz.py +5 -10
- webscout/Provider/typefully.py +1 -11
- webscout/Provider/x0gpt.py +325 -315
- webscout/__init__.py +4 -11
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +119 -5
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/sanitize.py +1074 -0
- webscout/version.py +1 -1
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -150
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/RECORD +70 -72
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.3.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,630 @@
|
|
|
1
|
+
"""
|
|
2
|
+
QodoAI Provider for OpenAI-compatible API
|
|
3
|
+
|
|
4
|
+
This module provides a QodoAI implementation that follows the OpenAI API interface.
|
|
5
|
+
QodoAI offers access to various models including GPT-4, Claude, and others through a unified API.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from webscout.Provider.OPENAI.qodo import QodoAI
|
|
9
|
+
|
|
10
|
+
# Initialize with API key
|
|
11
|
+
client = QodoAI(api_key="your_qodo_api_key_here")
|
|
12
|
+
|
|
13
|
+
# List available models
|
|
14
|
+
models = client.models
|
|
15
|
+
print("Available models:", models)
|
|
16
|
+
|
|
17
|
+
# Create chat completion
|
|
18
|
+
response = client.chat.completions.create(
|
|
19
|
+
model="claude-4-sonnet",
|
|
20
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
21
|
+
stream=False
|
|
22
|
+
)
|
|
23
|
+
print(response.choices[0].message.content)
|
|
24
|
+
|
|
25
|
+
# Streaming example
|
|
26
|
+
for chunk in client.chat.completions.create(
|
|
27
|
+
model="gpt-4o",
|
|
28
|
+
messages=[{"role": "user", "content": "Count from 1 to 5"}],
|
|
29
|
+
stream=True
|
|
30
|
+
):
|
|
31
|
+
if chunk.choices[0].delta.content:
|
|
32
|
+
print(chunk.choices[0].delta.content, end="")
|
|
33
|
+
|
|
34
|
+
Getting API Key:
|
|
35
|
+
To get a QodoAI API key, follow the instructions at:
|
|
36
|
+
https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
import json
|
|
40
|
+
import time
|
|
41
|
+
import uuid
|
|
42
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
43
|
+
|
|
44
|
+
# Import curl_cffi for improved request handling
|
|
45
|
+
from curl_cffi.requests import Session
|
|
46
|
+
from curl_cffi import CurlError
|
|
47
|
+
|
|
48
|
+
# Import base classes and utility structures
|
|
49
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions, Tool
|
|
50
|
+
from webscout.Provider.OPENAI.utils import (
|
|
51
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
52
|
+
ChatCompletionMessage, CompletionUsage, ModelData, ModelList
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Import webscout utilities
|
|
56
|
+
from webscout.AIutel import sanitize_stream
|
|
57
|
+
from webscout import exceptions
|
|
58
|
+
|
|
59
|
+
# Attempt to import LitAgent, fallback if not available
|
|
60
|
+
try:
|
|
61
|
+
from webscout.litagent import LitAgent
|
|
62
|
+
except ImportError:
|
|
63
|
+
LitAgent = None
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class Completions(BaseCompletions):
|
|
67
|
+
def __init__(self, client: 'QodoAI'):
|
|
68
|
+
self._client = client
|
|
69
|
+
|
|
70
|
+
def create(
|
|
71
|
+
self,
|
|
72
|
+
*,
|
|
73
|
+
model: str,
|
|
74
|
+
messages: List[Dict[str, Any]],
|
|
75
|
+
max_tokens: Optional[int] = 2049,
|
|
76
|
+
stream: bool = False,
|
|
77
|
+
temperature: Optional[float] = None,
|
|
78
|
+
top_p: Optional[float] = None,
|
|
79
|
+
tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None,
|
|
80
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
81
|
+
timeout: Optional[int] = None,
|
|
82
|
+
proxies: Optional[dict] = None,
|
|
83
|
+
**kwargs: Any
|
|
84
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
85
|
+
"""
|
|
86
|
+
Creates a model response for the given chat conversation.
|
|
87
|
+
Mimics openai.chat.completions.create
|
|
88
|
+
"""
|
|
89
|
+
# Get the last user message for the prompt
|
|
90
|
+
user_prompt = ""
|
|
91
|
+
for message in reversed(messages):
|
|
92
|
+
if message.get("role") == "user":
|
|
93
|
+
user_prompt = message.get("content", "")
|
|
94
|
+
break
|
|
95
|
+
|
|
96
|
+
if not user_prompt:
|
|
97
|
+
raise ValueError("No user message found in messages")
|
|
98
|
+
|
|
99
|
+
# Build payload for Qodo API
|
|
100
|
+
payload = self._client._build_payload(user_prompt, model)
|
|
101
|
+
payload["stream"] = stream
|
|
102
|
+
payload["custom_model"] = model
|
|
103
|
+
|
|
104
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
105
|
+
created_time = int(time.time())
|
|
106
|
+
|
|
107
|
+
if stream:
|
|
108
|
+
return self._create_stream(request_id, created_time, model, payload, user_prompt)
|
|
109
|
+
else:
|
|
110
|
+
return self._create_non_stream(request_id, created_time, model, payload, user_prompt)
|
|
111
|
+
|
|
112
|
+
def _create_stream(
|
|
113
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], user_prompt: str
|
|
114
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
115
|
+
try:
|
|
116
|
+
response = self._client.session.post(
|
|
117
|
+
self._client.url,
|
|
118
|
+
json=payload,
|
|
119
|
+
stream=True,
|
|
120
|
+
timeout=self._client.timeout,
|
|
121
|
+
impersonate=self._client.fingerprint.get("browser_type", "chrome110")
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
if response.status_code == 401:
|
|
125
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
126
|
+
"Invalid API key. You need to provide your own API key.\n"
|
|
127
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
128
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
129
|
+
)
|
|
130
|
+
elif response.status_code != 200:
|
|
131
|
+
raise IOError(f"Qodo request failed with status code {response.status_code}: {response.text}")
|
|
132
|
+
|
|
133
|
+
# Track token usage
|
|
134
|
+
prompt_tokens = len(user_prompt.split())
|
|
135
|
+
completion_tokens = 0
|
|
136
|
+
|
|
137
|
+
processed_stream = sanitize_stream(
|
|
138
|
+
data=response.iter_content(chunk_size=None),
|
|
139
|
+
intro_value="",
|
|
140
|
+
to_json=True,
|
|
141
|
+
skip_markers=["[DONE]"],
|
|
142
|
+
content_extractor=QodoAI._qodo_extractor,
|
|
143
|
+
yield_raw_on_error=True,
|
|
144
|
+
raw=False
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
for content_chunk in processed_stream:
|
|
148
|
+
if content_chunk:
|
|
149
|
+
completion_tokens += len(content_chunk.split())
|
|
150
|
+
|
|
151
|
+
# Create the delta object
|
|
152
|
+
delta = ChoiceDelta(
|
|
153
|
+
content=content_chunk,
|
|
154
|
+
role="assistant"
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# Create the choice object
|
|
158
|
+
choice = Choice(
|
|
159
|
+
index=0,
|
|
160
|
+
delta=delta,
|
|
161
|
+
finish_reason=None
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# Create the chunk object
|
|
165
|
+
chunk = ChatCompletionChunk(
|
|
166
|
+
id=request_id,
|
|
167
|
+
choices=[choice],
|
|
168
|
+
created=created_time,
|
|
169
|
+
model=model,
|
|
170
|
+
usage={
|
|
171
|
+
"prompt_tokens": prompt_tokens,
|
|
172
|
+
"completion_tokens": completion_tokens,
|
|
173
|
+
"total_tokens": prompt_tokens + completion_tokens
|
|
174
|
+
}
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
yield chunk
|
|
178
|
+
|
|
179
|
+
# Send final chunk with finish_reason
|
|
180
|
+
final_choice = Choice(
|
|
181
|
+
index=0,
|
|
182
|
+
delta=ChoiceDelta(),
|
|
183
|
+
finish_reason="stop"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
final_chunk = ChatCompletionChunk(
|
|
187
|
+
id=request_id,
|
|
188
|
+
choices=[final_choice],
|
|
189
|
+
created=created_time,
|
|
190
|
+
model=model,
|
|
191
|
+
usage={
|
|
192
|
+
"prompt_tokens": prompt_tokens,
|
|
193
|
+
"completion_tokens": completion_tokens,
|
|
194
|
+
"total_tokens": prompt_tokens + completion_tokens
|
|
195
|
+
}
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
yield final_chunk
|
|
199
|
+
|
|
200
|
+
except CurlError as e:
|
|
201
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
202
|
+
except Exception as e:
|
|
203
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
204
|
+
|
|
205
|
+
def _create_non_stream(
|
|
206
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], user_prompt: str
|
|
207
|
+
) -> ChatCompletion:
|
|
208
|
+
try:
|
|
209
|
+
payload["stream"] = False
|
|
210
|
+
response = self._client.session.post(
|
|
211
|
+
self._client.url,
|
|
212
|
+
json=payload,
|
|
213
|
+
timeout=self._client.timeout,
|
|
214
|
+
impersonate=self._client.fingerprint.get("browser_type", "chrome110")
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
if response.status_code == 401:
|
|
218
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
219
|
+
"Invalid API key. You need to provide your own API key.\n"
|
|
220
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
221
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
222
|
+
)
|
|
223
|
+
elif response.status_code != 200:
|
|
224
|
+
raise IOError(f"Qodo request failed with status code {response.status_code}: {response.text}")
|
|
225
|
+
|
|
226
|
+
response_text = response.text
|
|
227
|
+
|
|
228
|
+
# Parse multiple JSON objects from the response
|
|
229
|
+
full_response = ""
|
|
230
|
+
|
|
231
|
+
# Try to split by line breaks and parse each potential JSON object
|
|
232
|
+
lines = response_text.replace('}\n{', '}\n{').split('\n')
|
|
233
|
+
json_objects = []
|
|
234
|
+
|
|
235
|
+
current_json = ""
|
|
236
|
+
brace_count = 0
|
|
237
|
+
|
|
238
|
+
for line in lines:
|
|
239
|
+
line = line.strip()
|
|
240
|
+
if line:
|
|
241
|
+
current_json += line
|
|
242
|
+
|
|
243
|
+
# Count braces to detect complete JSON objects
|
|
244
|
+
brace_count += line.count('{') - line.count('}')
|
|
245
|
+
|
|
246
|
+
if brace_count == 0 and current_json:
|
|
247
|
+
json_objects.append(current_json)
|
|
248
|
+
current_json = ""
|
|
249
|
+
|
|
250
|
+
# Add any remaining JSON
|
|
251
|
+
if current_json and brace_count == 0:
|
|
252
|
+
json_objects.append(current_json)
|
|
253
|
+
|
|
254
|
+
for json_str in json_objects:
|
|
255
|
+
if json_str.strip():
|
|
256
|
+
try:
|
|
257
|
+
json_obj = json.loads(json_str)
|
|
258
|
+
content = QodoAI._qodo_extractor(json_obj)
|
|
259
|
+
if content:
|
|
260
|
+
full_response += content
|
|
261
|
+
except json.JSONDecodeError:
|
|
262
|
+
# Silently skip malformed JSON
|
|
263
|
+
pass
|
|
264
|
+
|
|
265
|
+
# Calculate token usage
|
|
266
|
+
prompt_tokens = len(user_prompt.split())
|
|
267
|
+
completion_tokens = len(full_response.split())
|
|
268
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
269
|
+
|
|
270
|
+
# Create the message object
|
|
271
|
+
message = ChatCompletionMessage(
|
|
272
|
+
role="assistant",
|
|
273
|
+
content=full_response
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# Create the choice object
|
|
277
|
+
choice = Choice(
|
|
278
|
+
index=0,
|
|
279
|
+
message=message,
|
|
280
|
+
finish_reason="stop"
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
# Create the usage object
|
|
284
|
+
usage = CompletionUsage(
|
|
285
|
+
prompt_tokens=prompt_tokens,
|
|
286
|
+
completion_tokens=completion_tokens,
|
|
287
|
+
total_tokens=total_tokens
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
# Create the completion object
|
|
291
|
+
completion = ChatCompletion(
|
|
292
|
+
id=request_id,
|
|
293
|
+
choices=[choice],
|
|
294
|
+
created=created_time,
|
|
295
|
+
model=model,
|
|
296
|
+
usage=usage
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
return completion
|
|
300
|
+
|
|
301
|
+
except CurlError as e:
|
|
302
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
303
|
+
except Exception as e:
|
|
304
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
class Chat(BaseChat):
|
|
308
|
+
def __init__(self, client: 'QodoAI'):
|
|
309
|
+
self.completions = Completions(client)
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
class QodoAI(OpenAICompatibleProvider):
|
|
313
|
+
"""
|
|
314
|
+
OpenAI-compatible client for Qodo AI API.
|
|
315
|
+
|
|
316
|
+
Usage:
|
|
317
|
+
client = QodoAI(api_key="your_api_key")
|
|
318
|
+
response = client.chat.completions.create(
|
|
319
|
+
model="claude-4-sonnet",
|
|
320
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
321
|
+
)
|
|
322
|
+
"""
|
|
323
|
+
|
|
324
|
+
AVAILABLE_MODELS = [
|
|
325
|
+
"gpt-4.1",
|
|
326
|
+
"gpt-4o",
|
|
327
|
+
"o3",
|
|
328
|
+
"o4-mini",
|
|
329
|
+
"claude-4-sonnet",
|
|
330
|
+
"gemini-2.5-pro"
|
|
331
|
+
]
|
|
332
|
+
|
|
333
|
+
def __init__(
|
|
334
|
+
self,
|
|
335
|
+
api_key: Optional[str] = None,
|
|
336
|
+
tools: Optional[List[Tool]] = None,
|
|
337
|
+
proxies: Optional[dict] = None,
|
|
338
|
+
disable_auto_proxy: bool = False,
|
|
339
|
+
timeout: int = 30,
|
|
340
|
+
browser: str = "chrome",
|
|
341
|
+
**kwargs: Any
|
|
342
|
+
):
|
|
343
|
+
# Initialize parent class
|
|
344
|
+
super().__init__(api_key=api_key, tools=tools, proxies=proxies, disable_auto_proxy=disable_auto_proxy, **kwargs)
|
|
345
|
+
|
|
346
|
+
self.url = "https://api.cli.qodo.ai/v2/agentic/start-task"
|
|
347
|
+
self.info_url = "https://api.cli.qodo.ai/v2/info/get-things"
|
|
348
|
+
self.timeout = timeout
|
|
349
|
+
# Store API key
|
|
350
|
+
self.api_key = api_key or "sk-dS7U-extxMWUxc8SbYYOuncqGUIE8-y2OY8oMCpu0eI-qnSUyH9CYWO_eAMpqwfMo7pXU3QNrclfZYMO0M6BJTM"
|
|
351
|
+
|
|
352
|
+
# Initialize LitAgent for user agent generation
|
|
353
|
+
if LitAgent:
|
|
354
|
+
self.agent = LitAgent()
|
|
355
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
356
|
+
else:
|
|
357
|
+
self.fingerprint = {"user_agent": "axios/1.10.0", "browser_type": "chrome"}
|
|
358
|
+
|
|
359
|
+
# Generate session ID dynamically from API
|
|
360
|
+
self.session_id = self._get_session_id()
|
|
361
|
+
self.request_id = str(uuid.uuid4())
|
|
362
|
+
|
|
363
|
+
# Use the fingerprint for headers
|
|
364
|
+
self.headers = {
|
|
365
|
+
"Accept": "text/plain",
|
|
366
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
367
|
+
"Accept-Language": self.fingerprint.get("accept_language", "en-US,en;q=0.9"),
|
|
368
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
369
|
+
"Connection": "close",
|
|
370
|
+
"Content-Type": "application/json",
|
|
371
|
+
"host": "api.cli.qodo.ai",
|
|
372
|
+
"Request-id": self.request_id,
|
|
373
|
+
"Session-id": self.session_id,
|
|
374
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
# Initialize curl_cffi Session
|
|
378
|
+
self.session = Session()
|
|
379
|
+
self.session.headers.update(self.headers)
|
|
380
|
+
if proxies:
|
|
381
|
+
self.session.proxies.update(proxies)
|
|
382
|
+
|
|
383
|
+
# Initialize OpenAI-compatible interface
|
|
384
|
+
self.chat = Chat(self)
|
|
385
|
+
|
|
386
|
+
@property
|
|
387
|
+
def models(self):
|
|
388
|
+
"""Property that returns an object with a .list() method returning available models."""
|
|
389
|
+
class _ModelList:
|
|
390
|
+
def list(inner_self):
|
|
391
|
+
return type(self).AVAILABLE_MODELS
|
|
392
|
+
return _ModelList()
|
|
393
|
+
|
|
394
|
+
@staticmethod
|
|
395
|
+
def _qodo_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
396
|
+
"""Extracts content from Qodo stream JSON objects."""
|
|
397
|
+
if isinstance(chunk, dict):
|
|
398
|
+
# Check for Qodo's specific response format
|
|
399
|
+
data = chunk.get("data", {})
|
|
400
|
+
if isinstance(data, dict):
|
|
401
|
+
tool_args = data.get("tool_args", {})
|
|
402
|
+
if isinstance(tool_args, dict):
|
|
403
|
+
content = tool_args.get("content")
|
|
404
|
+
if content:
|
|
405
|
+
return content
|
|
406
|
+
|
|
407
|
+
# Check for direct content in data
|
|
408
|
+
if "content" in data:
|
|
409
|
+
return data["content"]
|
|
410
|
+
|
|
411
|
+
# Check for OpenAI-like format (choices)
|
|
412
|
+
if "choices" in chunk:
|
|
413
|
+
choices = chunk["choices"]
|
|
414
|
+
if isinstance(choices, list) and len(choices) > 0:
|
|
415
|
+
choice = choices[0]
|
|
416
|
+
if isinstance(choice, dict):
|
|
417
|
+
# Check delta content
|
|
418
|
+
delta = choice.get("delta", {})
|
|
419
|
+
if isinstance(delta, dict) and "content" in delta:
|
|
420
|
+
return delta["content"]
|
|
421
|
+
|
|
422
|
+
# Check message content
|
|
423
|
+
message = choice.get("message", {})
|
|
424
|
+
if isinstance(message, dict) and "content" in message:
|
|
425
|
+
return message["content"]
|
|
426
|
+
|
|
427
|
+
elif isinstance(chunk, str):
|
|
428
|
+
# Try to parse as JSON if it's a string
|
|
429
|
+
try:
|
|
430
|
+
parsed = json.loads(chunk)
|
|
431
|
+
return QodoAI._qodo_extractor(parsed)
|
|
432
|
+
except json.JSONDecodeError:
|
|
433
|
+
# If it's not JSON, it might be direct content
|
|
434
|
+
if chunk.strip():
|
|
435
|
+
return chunk.strip()
|
|
436
|
+
|
|
437
|
+
return None
|
|
438
|
+
|
|
439
|
+
def _get_session_id(self) -> str:
|
|
440
|
+
"""Get session ID from Qodo API."""
|
|
441
|
+
try:
|
|
442
|
+
# Create temporary session for the info request
|
|
443
|
+
temp_session = Session()
|
|
444
|
+
temp_headers = {
|
|
445
|
+
"Accept": "text/plain",
|
|
446
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
447
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
448
|
+
"Connection": "close",
|
|
449
|
+
"Content-Type": "application/json",
|
|
450
|
+
"host": "api.cli.qodo.ai",
|
|
451
|
+
"Request-id": str(uuid.uuid4()),
|
|
452
|
+
"User-Agent": self.fingerprint.get("user_agent", "axios/1.10.0"),
|
|
453
|
+
}
|
|
454
|
+
temp_session.headers.update(temp_headers)
|
|
455
|
+
|
|
456
|
+
response = temp_session.get(
|
|
457
|
+
self.info_url,
|
|
458
|
+
timeout=self.timeout,
|
|
459
|
+
impersonate="chrome110"
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
if response.status_code == 200:
|
|
463
|
+
data = response.json()
|
|
464
|
+
session_id = data.get("session-id")
|
|
465
|
+
if session_id:
|
|
466
|
+
return session_id
|
|
467
|
+
elif response.status_code == 401:
|
|
468
|
+
# API key is invalid
|
|
469
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
470
|
+
"Invalid API key. You need to provide your own API key.\n"
|
|
471
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
472
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
473
|
+
)
|
|
474
|
+
else:
|
|
475
|
+
# Other HTTP errors
|
|
476
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
477
|
+
f"Failed to authenticate with Qodo API (HTTP {response.status_code}). "
|
|
478
|
+
"You may need to provide your own API key.\n"
|
|
479
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
480
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
# Fallback to generated session ID if API call fails
|
|
484
|
+
return f"20250630-{str(uuid.uuid4())}"
|
|
485
|
+
|
|
486
|
+
except exceptions.FailedToGenerateResponseError:
|
|
487
|
+
# Re-raise our custom exceptions
|
|
488
|
+
raise
|
|
489
|
+
except Exception as e:
|
|
490
|
+
# For other errors, show the API key message
|
|
491
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
492
|
+
f"Failed to connect to Qodo API: {e}\n"
|
|
493
|
+
"You may need to provide your own API key.\n"
|
|
494
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
495
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
def _build_payload(self, prompt: str, model: str = "claude-4-sonnet"):
|
|
499
|
+
"""Build the payload for Qodo AI API."""
|
|
500
|
+
return {
|
|
501
|
+
"agent_type": "cli",
|
|
502
|
+
"session_id": self.session_id,
|
|
503
|
+
"user_data": {
|
|
504
|
+
"extension_version": "0.7.2",
|
|
505
|
+
"os_platform": "win32",
|
|
506
|
+
"os_version": "v23.9.0",
|
|
507
|
+
"editor_type": "cli"
|
|
508
|
+
},
|
|
509
|
+
"tools": {
|
|
510
|
+
"web_search": [
|
|
511
|
+
{
|
|
512
|
+
"name": "web_search",
|
|
513
|
+
"description": "Searches the web and returns results based on the user's query (Powered by Nimble).",
|
|
514
|
+
"inputSchema": {
|
|
515
|
+
"type": "object",
|
|
516
|
+
"properties": {
|
|
517
|
+
"llm_description": {
|
|
518
|
+
"default": "Searches the web and returns results based on the user's query.",
|
|
519
|
+
"title": "Llm Description",
|
|
520
|
+
"type": "string"
|
|
521
|
+
},
|
|
522
|
+
"query": {
|
|
523
|
+
"description": "The search query to execute",
|
|
524
|
+
"title": "Query",
|
|
525
|
+
"type": "string"
|
|
526
|
+
}
|
|
527
|
+
},
|
|
528
|
+
"required": ["query"],
|
|
529
|
+
"title": "NimbleWebSearch"
|
|
530
|
+
},
|
|
531
|
+
"be_tool": True,
|
|
532
|
+
"autoApproved": True
|
|
533
|
+
},
|
|
534
|
+
{
|
|
535
|
+
"name": "web_fetch",
|
|
536
|
+
"description": "Fetches content from a given URL (Powered by Nimble).",
|
|
537
|
+
"inputSchema": {
|
|
538
|
+
"type": "object",
|
|
539
|
+
"properties": {
|
|
540
|
+
"llm_description": {
|
|
541
|
+
"default": "Fetches content from a given URL.",
|
|
542
|
+
"title": "Llm Description",
|
|
543
|
+
"type": "string"
|
|
544
|
+
},
|
|
545
|
+
"url": {
|
|
546
|
+
"description": "The URL to fetch content from",
|
|
547
|
+
"title": "Url",
|
|
548
|
+
"type": "string"
|
|
549
|
+
}
|
|
550
|
+
},
|
|
551
|
+
"required": ["url"],
|
|
552
|
+
"title": "NimbleWebFetch"
|
|
553
|
+
},
|
|
554
|
+
"be_tool": True,
|
|
555
|
+
"autoApproved": True
|
|
556
|
+
}
|
|
557
|
+
]
|
|
558
|
+
},
|
|
559
|
+
"user_request": prompt,
|
|
560
|
+
"execution_strategy": "act",
|
|
561
|
+
"custom_model": model,
|
|
562
|
+
"stream": True
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
def get_available_models(self) -> Dict[str, Any]:
|
|
566
|
+
"""
|
|
567
|
+
Get available models and info from Qodo API.
|
|
568
|
+
|
|
569
|
+
Returns:
|
|
570
|
+
Dict containing models, default_model, version, and session info
|
|
571
|
+
"""
|
|
572
|
+
try:
|
|
573
|
+
response = self.session.get(
|
|
574
|
+
self.info_url,
|
|
575
|
+
timeout=self.timeout,
|
|
576
|
+
impersonate=self.fingerprint.get("browser_type", "chrome110")
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
if response.status_code == 200:
|
|
580
|
+
return response.json()
|
|
581
|
+
elif response.status_code == 401:
|
|
582
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
583
|
+
"Invalid API key. You need to provide your own API key.\n"
|
|
584
|
+
"Usage: QodoAI(api_key='your_api_key_here')\n"
|
|
585
|
+
"To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
|
|
586
|
+
)
|
|
587
|
+
else:
|
|
588
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get models: HTTP {response.status_code}")
|
|
589
|
+
|
|
590
|
+
except CurlError as e:
|
|
591
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
592
|
+
except Exception as e:
|
|
593
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get models ({type(e).__name__}): {e}")
|
|
594
|
+
|
|
595
|
+
|
|
596
|
+
if __name__ == "__main__":
|
|
597
|
+
# Example usage
|
|
598
|
+
client = QodoAI() # You will need to provide your API key here
|
|
599
|
+
|
|
600
|
+
# List available models
|
|
601
|
+
models = client.models.list()
|
|
602
|
+
print("Available models:")
|
|
603
|
+
for model in models:
|
|
604
|
+
print(f" - {model}")
|
|
605
|
+
|
|
606
|
+
# Create a chat completion
|
|
607
|
+
response = client.chat.completions.create(
|
|
608
|
+
model="claude-4-sonnet",
|
|
609
|
+
messages=[
|
|
610
|
+
{"role": "user", "content": "Write a short poem about AI"}
|
|
611
|
+
],
|
|
612
|
+
stream=False
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
print(f"\nResponse: {response}")
|
|
616
|
+
|
|
617
|
+
# Example with streaming
|
|
618
|
+
print("\nStreaming response:")
|
|
619
|
+
stream = client.chat.completions.create(
|
|
620
|
+
model="claude-4-sonnet",
|
|
621
|
+
messages=[
|
|
622
|
+
{"role": "user", "content": "Count from 1 to 5"}
|
|
623
|
+
],
|
|
624
|
+
stream=True
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
for chunk in stream:
|
|
628
|
+
if chunk.choices[0].delta.content:
|
|
629
|
+
print(chunk.choices[0].delta.content, end='', flush=True)
|
|
630
|
+
print()
|