webscout 8.3.3__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (46) hide show
  1. webscout/AIutel.py +221 -4
  2. webscout/Bard.py +2 -22
  3. webscout/Provider/AISEARCH/scira_search.py +24 -11
  4. webscout/Provider/Deepinfra.py +75 -57
  5. webscout/Provider/ExaChat.py +9 -5
  6. webscout/Provider/Flowith.py +1 -1
  7. webscout/Provider/FreeGemini.py +2 -2
  8. webscout/Provider/Gemini.py +3 -10
  9. webscout/Provider/GeminiProxy.py +31 -5
  10. webscout/Provider/LambdaChat.py +39 -31
  11. webscout/Provider/Netwrck.py +5 -8
  12. webscout/Provider/OLLAMA.py +8 -9
  13. webscout/Provider/OPENAI/README.md +1 -1
  14. webscout/Provider/OPENAI/__init__.py +1 -1
  15. webscout/Provider/OPENAI/autoproxy.py +1 -1
  16. webscout/Provider/OPENAI/copilot.py +73 -26
  17. webscout/Provider/OPENAI/deepinfra.py +54 -24
  18. webscout/Provider/OPENAI/exachat.py +9 -5
  19. webscout/Provider/OPENAI/monochat.py +3 -3
  20. webscout/Provider/OPENAI/netwrck.py +4 -7
  21. webscout/Provider/OPENAI/qodo.py +630 -0
  22. webscout/Provider/OPENAI/scirachat.py +82 -49
  23. webscout/Provider/OPENAI/textpollinations.py +13 -12
  24. webscout/Provider/OPENAI/typegpt.py +3 -3
  25. webscout/Provider/Qodo.py +454 -0
  26. webscout/Provider/TTI/monochat.py +3 -3
  27. webscout/Provider/TextPollinationsAI.py +13 -12
  28. webscout/Provider/__init__.py +4 -4
  29. webscout/Provider/copilot.py +58 -61
  30. webscout/Provider/freeaichat.py +64 -55
  31. webscout/Provider/monochat.py +275 -0
  32. webscout/Provider/scira_chat.py +111 -21
  33. webscout/Provider/typegpt.py +2 -2
  34. webscout/Provider/x0gpt.py +325 -315
  35. webscout/__init__.py +7 -2
  36. webscout/auth/routes.py +20 -3
  37. webscout/version.py +1 -1
  38. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/METADATA +1 -2
  39. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/RECORD +43 -43
  40. webscout/Provider/AI21.py +0 -177
  41. webscout/Provider/HuggingFaceChat.py +0 -469
  42. webscout/Provider/OPENAI/freeaichat.py +0 -363
  43. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  44. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  45. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  46. {webscout-8.3.3.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -321,36 +321,88 @@ class SciraChat(OpenAICompatibleProvider):
321
321
  messages=[{"role": "user", "content": "Hello!"}]
322
322
  )
323
323
  """
324
- # List of model display names for registration (aliases)
325
- AVAILABLE_MODELS = [
326
- "Grok3-mini (thinking)",
327
- "Grok3",
328
- "Claude 4 Sonnet",
329
- "Claude 4 Sonnet Thinking",
330
- "Grok2-Vision (vision)",
331
- "GPT4o",
332
- "QWQ-32B",
333
- "o4-mini",
334
- "Gemini 2.5 Flash Thinking",
335
- "Gemini 2.5 Pro",
336
- "Llama 4 Maverick",
337
- ]
338
- # Mapping from display name to internal model key
339
- MODEL_NAME_MAP = {
340
- "Grok3-mini (thinking)": "scira-default",
341
- "Grok3": "scira-grok-3",
342
- "Claude 4 Sonnet": "scira-anthropic",
343
- "Claude 4 Sonnet Thinking": "scira-anthropic-thinking",
344
- "Grok2-Vision (vision)": "scira-vision",
345
- "GPT4o": "scira-4o",
346
- "QWQ-32B": "scira-qwq",
347
- "o4-mini": "scira-o4-mini",
348
- "Gemini 2.5 Flash Thinking": "scira-google",
349
- "Gemini 2.5 Pro": "scira-google-pro",
350
- "Llama 4 Maverick": "scira-llama-4",
324
+ # Model mapping: actual model names to Scira API format
325
+ MODEL_MAPPING = {
326
+ "grok-3-mini": "scira-default",
327
+ "grok-3-mini-fast": "scira-x-fast-mini",
328
+ "grok-3-fast": "scira-x-fast",
329
+ "gpt-4.1-nano": "scira-nano",
330
+ "grok-3": "scira-grok-3",
331
+ "grok-4": "scira-grok-4",
332
+ "grok-2-vision-1212": "scira-vision",
333
+ "grok-2-latest": "scira-g2",
334
+ "gpt-4o-mini": "scira-4o-mini",
335
+ "o4-mini-2025-04-16": "scira-o4-mini",
336
+ "o3": "scira-o3",
337
+ "qwen/qwen3-32b": "scira-qwen-32b",
338
+ "qwen3-30b-a3b": "scira-qwen-30b",
339
+ "deepseek-v3-0324": "scira-deepseek-v3",
340
+ "claude-3-5-haiku-20241022": "scira-haiku",
341
+ "mistral-small-latest": "scira-mistral",
342
+ "gemini-2.5-flash-lite-preview-06-17": "scira-google-lite",
343
+ "gemini-2.5-flash": "scira-google",
344
+ "gemini-2.5-pro": "scira-google-pro",
345
+ "claude-sonnet-4-20250514": "scira-anthropic",
346
+ "claude-sonnet-4-20250514-thinking": "scira-anthropic-thinking",
347
+ "claude-4-opus-20250514": "scira-opus",
348
+ "claude-4-opus-20250514-pro": "scira-opus-pro",
349
+ "meta-llama/llama-4-maverick-17b-128e-instruct": "scira-llama-4",
351
350
  }
351
+ # Reverse mapping: Scira format to actual model names
352
+ SCIRA_TO_MODEL = {v: k for k, v in MODEL_MAPPING.items()}
353
+ # Add special cases for aliases and duplicate mappings
354
+ SCIRA_TO_MODEL["scira-anthropic-thinking"] = "claude-sonnet-4-20250514"
355
+ SCIRA_TO_MODEL["scira-opus-pro"] = "claude-4-opus-20250514"
356
+ SCIRA_TO_MODEL["scira-x-fast"] = "grok-3-fast"
357
+ SCIRA_TO_MODEL["scira-x-fast-mini"] = "grok-3-mini-fast"
358
+ SCIRA_TO_MODEL["scira-nano"] = "gpt-4.1-nano"
359
+ SCIRA_TO_MODEL["scira-qwen-32b"] = "qwen/qwen3-32b"
360
+ SCIRA_TO_MODEL["scira-qwen-30b"] = "qwen3-30b-a3b"
361
+ SCIRA_TO_MODEL["scira-deepseek-v3"] = "deepseek-v3-0324"
362
+ SCIRA_TO_MODEL["scira-grok-4"] = "grok-4"
363
+ MODEL_MAPPING["claude-4-opus-20250514-pro"] = "scira-opus-pro"
364
+ # Available models list (actual model names + scira aliases)
365
+ AVAILABLE_MODELS = list(MODEL_MAPPING.keys()) + list(SCIRA_TO_MODEL.keys())
352
366
  # Optional: pretty display names for UI (reverse mapping)
353
- MODEL_DISPLAY_NAMES = {v: k for k, v in MODEL_NAME_MAP.items()}
367
+ MODEL_DISPLAY_NAMES = {v: k for k, v in MODEL_MAPPING.items()}
368
+
369
+ @classmethod
370
+ def _resolve_model(cls, model: str) -> str:
371
+ """
372
+ Resolve a model name to its Scira API format.
373
+
374
+ Args:
375
+ model: Either an actual model name or a Scira alias
376
+
377
+ Returns:
378
+ The Scira API format model name
379
+
380
+ Raises:
381
+ ValueError: If the model is not supported
382
+ """
383
+ # If it's already a Scira format, return as-is
384
+ if model in cls.SCIRA_TO_MODEL:
385
+ return model
386
+ # If it's an actual model name, convert to Scira format
387
+ if model in cls.MODEL_MAPPING:
388
+ return cls.MODEL_MAPPING[model]
389
+ # Model not found
390
+ raise ValueError(f"Invalid model: {model}. Choose from: {cls.AVAILABLE_MODELS}")
391
+
392
+ def convert_model_name(self, model: str) -> str:
393
+ """
394
+ Convert model display names or internal keys to ones supported by SciraChat.
395
+ Args:
396
+ model: Model name or alias to convert
397
+ Returns:
398
+ SciraChat model name
399
+ """
400
+ # Use the new _resolve_model logic
401
+ try:
402
+ return self._resolve_model(model)
403
+ except Exception as e:
404
+ print(f"Warning: {e} Using 'scira-default' instead.")
405
+ return "scira-default"
354
406
 
355
407
  def __init__(
356
408
  self,
@@ -447,25 +499,6 @@ class SciraChat(OpenAICompatibleProvider):
447
499
  print(f"Warning: Error formatting text: {e}")
448
500
  return text
449
501
 
450
- def convert_model_name(self, model: str) -> str:
451
- """
452
- Convert model display names or internal keys to ones supported by SciraChat.
453
-
454
- Args:
455
- model: Model name or alias to convert
456
-
457
- Returns:
458
- SciraChat model name
459
- """
460
- # If model is a display name (alias), map to internal key
461
- if model in self.MODEL_NAME_MAP:
462
- return self.MODEL_NAME_MAP[model]
463
- # If model is already an internal key, return it if valid
464
- if model in self.MODEL_DISPLAY_NAMES:
465
- return model
466
- # Default to scira-default if model not found
467
- print(f"Warning: Unknown model '{model}'. Using 'scira-default' instead.")
468
- return "scira-default"
469
502
 
470
503
  @property
471
504
  def models(self):
@@ -478,9 +511,9 @@ class SciraChat(OpenAICompatibleProvider):
478
511
  if __name__ == "__main__":
479
512
  ai = SciraChat()
480
513
  response = ai.chat.completions.create(
481
- model="Gemini 2.5 Pro",
514
+ model="grok-3-mini-fast-latest",
482
515
  messages=[
483
- {"role": "user", "content": "who is pm of india?"}
516
+ {"role": "user", "content": "who are u?"}
484
517
  ],
485
518
  stream=True
486
519
  )
@@ -279,25 +279,26 @@ class TextPollinations(OpenAICompatibleProvider):
279
279
  "openai",
280
280
  "openai-fast",
281
281
  "openai-large",
282
+ "openai-reasoning",
282
283
  "openai-roblox",
283
- "qwen-coder",
284
- "llama",
284
+ "openai-audio",
285
+ "deepseek",
286
+ "deepseek-reasoning",
287
+ "grok",
285
288
  "llamascout",
286
289
  "mistral",
287
- "unity",
288
- "mirexa",
289
- "midijourney",
290
- "rtist",
290
+ "phi",
291
+ "qwen-coder",
291
292
  "searchgpt",
293
+ "bidara",
294
+ "elixposearch",
292
295
  "evil",
293
- "deepseek-reasoning",
294
- "phi",
295
- "hormoz",
296
296
  "hypnosis-tracy",
297
- "deepseek",
297
+ "midijourney",
298
+ "mirexa",
299
+ "rtist",
298
300
  "sur",
299
- "bidara",
300
- "openai-audio",
301
+ "unity",
301
302
  ]
302
303
 
303
304
  def __init__(
@@ -288,10 +288,10 @@ class TypeGPT(OpenAICompatibleProvider):
288
288
  # Working Models (based on testing)
289
289
  "gpt-4o-mini",
290
290
  "chatgpt-4o-latest",
291
- "deepseek-r1",
291
+ # "deepseek-r1",
292
292
  "deepseek-v3",
293
- "uncensored-r1",
294
- "Image-Generator",
293
+ # "uncensored-r1",
294
+ # "Image-Generator",
295
295
  ]
296
296
 
297
297
  def __init__(
@@ -0,0 +1,454 @@
1
+ from curl_cffi.requests import Session
2
+ from curl_cffi import CurlError
3
+ from typing import Any, Dict, Optional, Generator, Union
4
+ import uuid
5
+ import json
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class QodoAI(Provider):
15
+ """
16
+ A class to interact with the Qodo AI API.
17
+ """
18
+
19
+ AVAILABLE_MODELS = [
20
+ "gpt-4.1",
21
+ "gpt-4o",
22
+ "o3",
23
+ "o4-mini",
24
+ "claude-4-sonnet",
25
+ "gemini-2.5-pro",
26
+
27
+ ]
28
+
29
+ @staticmethod
30
+ def _qodo_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
31
+ """Extracts content from Qodo stream JSON objects."""
32
+ if isinstance(chunk, dict):
33
+ data = chunk.get("data", {})
34
+ if isinstance(data, dict):
35
+ tool_args = data.get("tool_args", {})
36
+ if isinstance(tool_args, dict):
37
+ return tool_args.get("content")
38
+ return None
39
+
40
+ def __init__(
41
+ self,
42
+ api_key: str = None,
43
+ is_conversation: bool = True,
44
+ max_tokens: int = 2049,
45
+ timeout: int = 30,
46
+ intro: str = None,
47
+ filepath: str = None,
48
+ update_file: bool = True,
49
+ proxies: dict = {},
50
+ history_offset: int = 10250,
51
+ act: str = None,
52
+ model: str = "claude-4-sonnet",
53
+ browser: str = "chrome"
54
+ ):
55
+ """Initializes the Qodo AI API client."""
56
+ if model not in self.AVAILABLE_MODELS:
57
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
58
+
59
+ self.url = "https://api.cli.qodo.ai/v2/agentic/start-task"
60
+ self.info_url = "https://api.cli.qodo.ai/v2/info/get-things"
61
+
62
+ # Initialize LitAgent for user agent generation
63
+ self.agent = LitAgent()
64
+ self.fingerprint = self.agent.generate_fingerprint(browser)
65
+
66
+ # Store API key
67
+ self.api_key = api_key or "sk-dS7U-extxMWUxc8SbYYOuncqGUIE8-y2OY8oMCpu0eI-qnSUyH9CYWO_eAMpqwfMo7pXU3QNrclfZYMO0M6BJTM"
68
+
69
+ # Generate session ID dynamically from API
70
+ self.session_id = self._get_session_id()
71
+ self.request_id = str(uuid.uuid4())
72
+
73
+ # Use the fingerprint for headers
74
+ self.headers = {
75
+ "Accept": "text/plain",
76
+ "Accept-Encoding": "gzip, deflate, br, zstd",
77
+ "Accept-Language": self.fingerprint["accept_language"],
78
+ "Authorization": f"Bearer {self.api_key}",
79
+ "Connection": "close",
80
+ "Content-Type": "application/json",
81
+ "host": "api.cli.qodo.ai",
82
+ "Request-id": self.request_id,
83
+ "User-Agent": self.fingerprint["user_agent"],
84
+ }
85
+
86
+ # Initialize curl_cffi Session
87
+ self.session = Session()
88
+ # Add Session-id to headers after getting it from API
89
+ self.headers["Session-id"] = self.session_id
90
+ self.session.headers.update(self.headers)
91
+ self.session.proxies.update(proxies)
92
+
93
+ self.is_conversation = is_conversation
94
+ self.max_tokens_to_sample = max_tokens
95
+ self.timeout = timeout
96
+ self.last_response = {}
97
+ self.model = model
98
+
99
+ self.__available_optimizers = (
100
+ method
101
+ for method in dir(Optimizers)
102
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
103
+ )
104
+ Conversation.intro = (
105
+ AwesomePrompts().get_act(
106
+ act, raise_not_found=True, default=None, case_insensitive=True
107
+ )
108
+ if act
109
+ else intro or Conversation.intro
110
+ )
111
+
112
+ self.conversation = Conversation(
113
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
114
+ )
115
+ self.conversation.history_offset = history_offset
116
+
117
+ def refresh_identity(self, browser: str = None):
118
+ """
119
+ Refreshes the browser identity fingerprint.
120
+
121
+ Args:
122
+ browser: Specific browser to use for the new fingerprint
123
+ """
124
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
125
+ self.fingerprint = self.agent.generate_fingerprint(browser)
126
+
127
+ # Update headers with new fingerprint
128
+ self.headers.update({
129
+ "Accept-Language": self.fingerprint["accept_language"],
130
+ "User-Agent": self.fingerprint["user_agent"],
131
+ })
132
+
133
+ # Update session headers
134
+ for header, value in self.headers.items():
135
+ self.session.headers[header] = value
136
+
137
+ return self.fingerprint
138
+
139
+ def _build_payload(self, prompt: str):
140
+ """Build the payload for Qodo AI API."""
141
+ return {
142
+ "agent_type": "cli",
143
+ "session_id": self.session_id,
144
+ "user_data": {
145
+ "extension_version": "0.7.2",
146
+ "os_platform": "win32",
147
+ "os_version": "v23.9.0",
148
+ "editor_type": "cli"
149
+ },
150
+ "tools": {
151
+ "web_search": [
152
+ {
153
+ "name": "web_search",
154
+ "description": "Searches the web and returns results based on the user's query (Powered by Nimble).",
155
+ "inputSchema": {
156
+ "type": "object",
157
+ "properties": {
158
+ "llm_description": {
159
+ "default": "Searches the web and returns results based on the user's query.",
160
+ "title": "Llm Description",
161
+ "type": "string"
162
+ },
163
+ "query": {
164
+ "description": "The search query to execute",
165
+ "title": "Query",
166
+ "type": "string"
167
+ }
168
+ },
169
+ "required": ["query"],
170
+ "title": "NimbleWebSearch"
171
+ },
172
+ "be_tool": True,
173
+ "autoApproved": True
174
+ },
175
+ {
176
+ "name": "web_fetch",
177
+ "description": "Fetches content from a given URL (Powered by Nimble).",
178
+ "inputSchema": {
179
+ "type": "object",
180
+ "properties": {
181
+ "llm_description": {
182
+ "default": "Fetches content from a given URL.",
183
+ "title": "Llm Description",
184
+ "type": "string"
185
+ },
186
+ "url": {
187
+ "description": "The URL to fetch content from",
188
+ "title": "Url",
189
+ "type": "string"
190
+ }
191
+ },
192
+ "required": ["url"],
193
+ "title": "NimbleWebFetch"
194
+ },
195
+ "be_tool": True,
196
+ "autoApproved": True
197
+ }
198
+ ]
199
+ },
200
+ # "projects_root_path": ["C:\\Users\\koula"],
201
+ # "cwd": "C:\\Users\\koula",
202
+ "user_request": prompt,
203
+ "execution_strategy": "act",
204
+ "custom_model": self.model,
205
+ "stream": True
206
+ }
207
+
208
+ def ask(
209
+ self,
210
+ prompt: str,
211
+ stream: bool = False,
212
+ raw: bool = False,
213
+ optimizer: str = None,
214
+ conversationally: bool = False,
215
+ ) -> Union[Dict[str, Any], Generator]:
216
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
217
+ if optimizer:
218
+ if optimizer in self.__available_optimizers:
219
+ conversation_prompt = getattr(Optimizers, optimizer)(
220
+ conversation_prompt if conversationally else prompt
221
+ )
222
+ else:
223
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
224
+
225
+ payload = self._build_payload(conversation_prompt)
226
+ payload["stream"] = stream
227
+
228
+ def for_stream():
229
+ try:
230
+ response = self.session.post(
231
+ self.url,
232
+ json=payload,
233
+ stream=True,
234
+ timeout=self.timeout,
235
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
236
+ )
237
+ if response.status_code == 401:
238
+ raise exceptions.FailedToGenerateResponseError(
239
+ "Invalid API key. You need to provide your own API key.\n"
240
+ "Usage: QodoAI(api_key='your_api_key_here')\n"
241
+ "To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
242
+ )
243
+ elif response.status_code != 200:
244
+ raise exceptions.FailedToGenerateResponseError(f"HTTP {response.status_code}: {response.text}")
245
+
246
+ streaming_text = ""
247
+ processed_stream = sanitize_stream(
248
+ data=response.iter_content(chunk_size=None),
249
+ intro_value="",
250
+ to_json=True,
251
+ skip_markers=["[DONE]"],
252
+ content_extractor=self._qodo_extractor,
253
+ yield_raw_on_error=True,
254
+ raw=raw
255
+ )
256
+ for content_chunk in processed_stream:
257
+ if content_chunk:
258
+ yield content_chunk if raw else {"text": content_chunk}
259
+ if not raw:
260
+ streaming_text += content_chunk
261
+
262
+ self.last_response = {"text": streaming_text}
263
+ self.conversation.update_chat_history(prompt, streaming_text)
264
+ except CurlError as e:
265
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
266
+ except Exception as e:
267
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
268
+
269
+ def for_non_stream():
270
+ try:
271
+ payload["stream"] = False
272
+ response = self.session.post(
273
+ self.url,
274
+ json=payload,
275
+ timeout=self.timeout,
276
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
277
+ )
278
+ if response.status_code == 401:
279
+ raise exceptions.FailedToGenerateResponseError(
280
+ "Invalid API key. You need to provide your own API key.\n"
281
+ "Usage: QodoAI(api_key='your_api_key_here')\n"
282
+ "To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
283
+ )
284
+ elif response.status_code != 200:
285
+ raise exceptions.FailedToGenerateResponseError(f"HTTP {response.status_code}: {response.text}")
286
+
287
+ response_text = response.text
288
+ processed_stream = sanitize_stream(
289
+ data=response_text.splitlines(),
290
+ intro_value=None,
291
+ to_json=True,
292
+ content_extractor=self._qodo_extractor,
293
+ yield_raw_on_error=True,
294
+ raw=raw
295
+ )
296
+ full_response = ""
297
+ for content in processed_stream:
298
+ if content:
299
+ full_response += content
300
+
301
+ self.last_response = {"text": full_response}
302
+ self.conversation.update_chat_history(prompt, full_response)
303
+ return {"text": full_response} if not raw else full_response
304
+ except CurlError as e:
305
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
306
+ except Exception as e:
307
+ raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e}")
308
+
309
+ return for_stream() if stream else for_non_stream()
310
+
311
+ def chat(
312
+ self,
313
+ prompt: str,
314
+ stream: bool = False,
315
+ optimizer: str = None,
316
+ conversationally: bool = False,
317
+ raw: bool = False,
318
+ ) -> Union[str, Generator[str, None, None]]:
319
+ def for_stream():
320
+ for response in self.ask(
321
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
322
+ ):
323
+ if raw:
324
+ yield response
325
+ else:
326
+ yield response.get("text", "")
327
+
328
+ def for_non_stream():
329
+ result = self.ask(
330
+ prompt, False, raw=raw, optimizer=optimizer, conversationally=conversationally
331
+ )
332
+ if raw:
333
+ return result
334
+ else:
335
+ return self.get_message(result)
336
+
337
+ return for_stream() if stream else for_non_stream()
338
+
339
+ def get_message(self, response: dict) -> str:
340
+ assert isinstance(response, dict), "Response should be of dict data-type only"
341
+ text = response.get("text", "")
342
+ return text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
343
+
344
+ def _get_session_id(self) -> str:
345
+ """Get session ID from Qodo API."""
346
+ try:
347
+ # Create temporary session for the info request
348
+ temp_session = Session()
349
+ temp_headers = {
350
+ "Accept": "text/plain",
351
+ "Accept-Encoding": "gzip, deflate, br",
352
+ "Authorization": f"Bearer {self.api_key}",
353
+ "Connection": "close",
354
+ "Content-Type": "application/json",
355
+ "host": "api.cli.qodo.ai",
356
+ "Request-id": str(uuid.uuid4()),
357
+ "User-Agent": self.fingerprint["user_agent"] if hasattr(self, 'fingerprint') else "axios/1.10.0",
358
+ }
359
+ temp_session.headers.update(temp_headers)
360
+
361
+ response = temp_session.get(
362
+ self.info_url,
363
+ timeout=self.timeout if hasattr(self, 'timeout') else 30,
364
+ impersonate="chrome110"
365
+ )
366
+
367
+ if response.status_code == 200:
368
+ data = response.json()
369
+ session_id = data.get("session-id")
370
+ if session_id:
371
+ return session_id
372
+ elif response.status_code == 401:
373
+ # API key is invalid
374
+ raise exceptions.FailedToGenerateResponseError(
375
+ "Invalid API key. You need to provide your own API key.\n"
376
+ "Usage: QodoAI(api_key='your_api_key_here')\n"
377
+ "To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
378
+ )
379
+ else:
380
+ # Other HTTP errors
381
+ raise exceptions.FailedToGenerateResponseError(
382
+ f"Failed to authenticate with Qodo API (HTTP {response.status_code}). "
383
+ "You may need to provide your own API key.\n"
384
+ "Usage: QodoAI(api_key='your_api_key_here')\n"
385
+ "To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
386
+ )
387
+
388
+ # Fallback to generated session ID if API call fails
389
+ return f"20250630-{str(uuid.uuid4())}"
390
+
391
+ except exceptions.FailedToGenerateResponseError:
392
+ # Re-raise our custom exceptions
393
+ raise
394
+ except Exception as e:
395
+ # For other errors, show the API key message
396
+ raise exceptions.FailedToGenerateResponseError(
397
+ f"Failed to connect to Qodo API: {e}\n"
398
+ "You may need to provide your own API key.\n"
399
+ "Usage: QodoAI(api_key='your_api_key_here')\n"
400
+ "To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
401
+ )
402
+
403
+ def refresh_session(self):
404
+ """
405
+ Refreshes the session ID by calling the Qodo API.
406
+
407
+ Returns:
408
+ str: The new session ID
409
+ """
410
+ old_session_id = self.session_id
411
+ self.session_id = self._get_session_id()
412
+
413
+ # Update headers with new session ID
414
+ self.headers["Session-id"] = self.session_id
415
+ self.session.headers["Session-id"] = self.session_id
416
+
417
+ return self.session_id
418
+
419
+ def get_available_models(self) -> Dict[str, Any]:
420
+ """
421
+ Get available models and info from Qodo API.
422
+
423
+ Returns:
424
+ Dict containing models, default_model, version, and session info
425
+ """
426
+ try:
427
+ response = self.session.get(
428
+ self.info_url,
429
+ timeout=self.timeout,
430
+ impersonate=self.fingerprint.get("browser_type", "chrome110")
431
+ )
432
+
433
+ if response.status_code == 200:
434
+ return response.json()
435
+ elif response.status_code == 401:
436
+ raise exceptions.FailedToGenerateResponseError(
437
+ "Invalid API key. You need to provide your own API key.\n"
438
+ "Usage: QodoAI(api_key='your_api_key_here')\n"
439
+ "To get an API key, install Qodo CLI via: https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart"
440
+ )
441
+ else:
442
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get models: HTTP {response.status_code}")
443
+
444
+ except CurlError as e:
445
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
446
+ except Exception as e:
447
+ raise exceptions.FailedToGenerateResponseError(f"Failed to get models ({type(e).__name__}): {e}")
448
+
449
+
450
+ if __name__ == "__main__":
451
+ ai = QodoAI() # u will need to give your API key here to get api install qodo cli via https://docs.qodo.ai/qodo-documentation/qodo-gen-cli/getting-started/setup-and-quickstart
452
+ response = ai.chat("write a poem about india", raw=False, stream=True)
453
+ for chunk in response:
454
+ print(chunk, end='', flush=True)