webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (117) hide show
  1. webscout/AIutel.py +367 -41
  2. webscout/Bard.py +2 -22
  3. webscout/Bing_search.py +1 -2
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/scira_search.py +24 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/Deepinfra.py +75 -57
  8. webscout/Provider/ExaChat.py +93 -63
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/HeckAI.py +85 -80
  14. webscout/Provider/Jadve.py +56 -50
  15. webscout/Provider/LambdaChat.py +39 -31
  16. webscout/Provider/MiniMax.py +207 -0
  17. webscout/Provider/Nemotron.py +41 -13
  18. webscout/Provider/Netwrck.py +39 -59
  19. webscout/Provider/OLLAMA.py +8 -9
  20. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  21. webscout/Provider/OPENAI/MiniMax.py +298 -0
  22. webscout/Provider/OPENAI/README.md +31 -30
  23. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  24. webscout/Provider/OPENAI/__init__.py +4 -2
  25. webscout/Provider/OPENAI/autoproxy.py +753 -18
  26. webscout/Provider/OPENAI/base.py +7 -76
  27. webscout/Provider/OPENAI/copilot.py +73 -26
  28. webscout/Provider/OPENAI/deepinfra.py +96 -132
  29. webscout/Provider/OPENAI/exachat.py +9 -5
  30. webscout/Provider/OPENAI/flowith.py +179 -166
  31. webscout/Provider/OPENAI/friendli.py +233 -0
  32. webscout/Provider/OPENAI/monochat.py +329 -0
  33. webscout/Provider/OPENAI/netwrck.py +4 -7
  34. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  35. webscout/Provider/OPENAI/qodo.py +630 -0
  36. webscout/Provider/OPENAI/scirachat.py +82 -49
  37. webscout/Provider/OPENAI/textpollinations.py +13 -12
  38. webscout/Provider/OPENAI/toolbaz.py +1 -0
  39. webscout/Provider/OPENAI/typegpt.py +4 -4
  40. webscout/Provider/OPENAI/utils.py +19 -42
  41. webscout/Provider/OPENAI/x0gpt.py +14 -2
  42. webscout/Provider/OpenGPT.py +54 -32
  43. webscout/Provider/PI.py +58 -84
  44. webscout/Provider/Qodo.py +454 -0
  45. webscout/Provider/StandardInput.py +32 -13
  46. webscout/Provider/TTI/README.md +9 -9
  47. webscout/Provider/TTI/__init__.py +2 -1
  48. webscout/Provider/TTI/aiarta.py +92 -78
  49. webscout/Provider/TTI/infip.py +212 -0
  50. webscout/Provider/TTI/monochat.py +220 -0
  51. webscout/Provider/TeachAnything.py +11 -3
  52. webscout/Provider/TextPollinationsAI.py +91 -82
  53. webscout/Provider/TogetherAI.py +32 -48
  54. webscout/Provider/Venice.py +37 -46
  55. webscout/Provider/VercelAI.py +27 -24
  56. webscout/Provider/WiseCat.py +35 -35
  57. webscout/Provider/WrDoChat.py +22 -26
  58. webscout/Provider/WritingMate.py +26 -22
  59. webscout/Provider/__init__.py +6 -6
  60. webscout/Provider/copilot.py +58 -61
  61. webscout/Provider/freeaichat.py +64 -55
  62. webscout/Provider/granite.py +48 -57
  63. webscout/Provider/koala.py +51 -39
  64. webscout/Provider/learnfastai.py +49 -64
  65. webscout/Provider/llmchat.py +79 -93
  66. webscout/Provider/llmchatco.py +63 -78
  67. webscout/Provider/monochat.py +275 -0
  68. webscout/Provider/multichat.py +51 -40
  69. webscout/Provider/oivscode.py +1 -1
  70. webscout/Provider/scira_chat.py +257 -104
  71. webscout/Provider/scnet.py +13 -13
  72. webscout/Provider/searchchat.py +13 -13
  73. webscout/Provider/sonus.py +12 -11
  74. webscout/Provider/toolbaz.py +25 -8
  75. webscout/Provider/turboseek.py +41 -42
  76. webscout/Provider/typefully.py +27 -12
  77. webscout/Provider/typegpt.py +43 -48
  78. webscout/Provider/uncovr.py +55 -90
  79. webscout/Provider/x0gpt.py +325 -299
  80. webscout/Provider/yep.py +79 -96
  81. webscout/__init__.py +7 -2
  82. webscout/auth/__init__.py +12 -1
  83. webscout/auth/providers.py +27 -5
  84. webscout/auth/routes.py +146 -105
  85. webscout/auth/server.py +367 -312
  86. webscout/client.py +121 -116
  87. webscout/litagent/Readme.md +68 -55
  88. webscout/litagent/agent.py +99 -9
  89. webscout/version.py +1 -1
  90. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
  91. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
  92. webscout/Provider/AI21.py +0 -177
  93. webscout/Provider/HuggingFaceChat.py +0 -469
  94. webscout/Provider/OPENAI/freeaichat.py +0 -363
  95. webscout/Provider/TTI/fastflux.py +0 -233
  96. webscout/Provider/Writecream.py +0 -246
  97. webscout/auth/static/favicon.svg +0 -11
  98. webscout/auth/swagger_ui.py +0 -203
  99. webscout/auth/templates/components/authentication.html +0 -237
  100. webscout/auth/templates/components/base.html +0 -103
  101. webscout/auth/templates/components/endpoints.html +0 -750
  102. webscout/auth/templates/components/examples.html +0 -491
  103. webscout/auth/templates/components/footer.html +0 -75
  104. webscout/auth/templates/components/header.html +0 -27
  105. webscout/auth/templates/components/models.html +0 -286
  106. webscout/auth/templates/components/navigation.html +0 -70
  107. webscout/auth/templates/static/api.js +0 -455
  108. webscout/auth/templates/static/icons.js +0 -168
  109. webscout/auth/templates/static/main.js +0 -784
  110. webscout/auth/templates/static/particles.js +0 -201
  111. webscout/auth/templates/static/styles.css +0 -3353
  112. webscout/auth/templates/static/ui.js +0 -374
  113. webscout/auth/templates/swagger_ui.html +0 -170
  114. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  115. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  116. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  117. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -43,17 +43,30 @@ class Scira(AISearch):
43
43
  """
44
44
 
45
45
  AVAILABLE_MODELS = {
46
- "scira-default": "Grok3-mini", # thinking model
47
- "scira-grok-3": "Grok3",
48
- "scira-anthropic": "Claude 4 Sonnet",
49
- "scira-anthropic-thinking": "Claude 4 Sonnet Thinking", # thinking model
50
- "scira-vision" : "Grok2-Vision", # vision model
51
- "scira-4o": "GPT4o",
52
- "scira-qwq": "QWQ-32B",
53
- "scira-o4-mini": "o4-mini",
54
- "scira-google": "gemini 2.5 flash Thinking", # thinking model
55
- "scira-google-pro": "gemini 2.5 pro",
56
- "scira-llama-4": "llama 4 Maverick",
46
+ "scira-default": "grok-3-mini", # thinking model
47
+ "scira-x-fast-mini": "grok-3-mini-fast",
48
+ "scira-x-fast": "grok-3-fast",
49
+ "scira-nano": "gpt-4.1-nano",
50
+ "scira-grok-3": "grok-3",
51
+ "scira-grok-4": "grok-4",
52
+ "scira-vision": "grok-2-vision-1212",
53
+ "scira-g2": "grok-2-latest",
54
+ "scira-4o-mini": "gpt-4o-mini",
55
+ "scira-o4-mini": "o4-mini-2025-04-16",
56
+ "scira-o3": "o3",
57
+ "scira-qwen-32b": "qwen/qwen3-32b",
58
+ "scira-qwen-30b": "qwen3-30b-a3b",
59
+ "scira-deepseek-v3": "deepseek-v3-0324",
60
+ "scira-haiku": "claude-3-5-haiku-20241022",
61
+ "scira-mistral": "mistral-small-latest",
62
+ "scira-google-lite": "gemini-2.5-flash-lite-preview-06-17",
63
+ "scira-google": "gemini-2.5-flash",
64
+ "scira-google-pro": "gemini-2.5-pro",
65
+ "scira-anthropic": "claude-sonnet-4-20250514",
66
+ "scira-anthropic-thinking": "claude-sonnet-4-20250514",
67
+ "scira-opus": "claude-4-opus-20250514",
68
+ "scira-opus-pro": "claude-4-opus-20250514",
69
+ "scira-llama-4": "meta-llama/llama-4-maverick-17b-128e-instruct",
57
70
  }
58
71
  def __init__(
59
72
  self,
@@ -0,0 +1,132 @@
1
+ import requests
2
+ import re
3
+ from typing import Dict, Optional, Generator, Union, Any
4
+ from webscout.AIbase import AISearch, SearchResponse
5
+ from webscout import exceptions
6
+ from webscout.litagent import LitAgent
7
+ from webscout.AIutel import sanitize_stream
8
+
9
+ class Stellar(AISearch):
10
+ """AI Search provider for stellar.chatastra.ai"""
11
+ def __init__(self, timeout: int = 30, proxies: Optional[dict] = None):
12
+ self.api_endpoint = "https://stellar.chatastra.ai/search/x1GUVzl"
13
+ self.timeout = timeout
14
+ self.proxies = proxies
15
+ self.session = requests.Session()
16
+ self.headers = {
17
+ "accept": "text/x-component",
18
+ "accept-encoding": "gzip, deflate, br, zstd",
19
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
20
+ "content-type": "multipart/form-data; boundary=----WebKitFormBoundaryQsWD5Qs3QqDkNBPH",
21
+ "dnt": "1",
22
+ "next-action": "efc2643ed9bafe182a010b58ebea17f068ad3985",
23
+ "next-router-state-tree": "%5B%22%22%2C%7B%22children%22%3A%5B%22__PAGE__%22%2C%7B%7D%2C%22%2F%22%2C%22refresh%22%5D%7D%2Cnull%2Cnull%2Ctrue%5D",
24
+ "origin": "https://stellar.chatastra.ai",
25
+ "priority": "u=1, i",
26
+ "referer": "https://stellar.chatastra.ai/search/x1GUVzl",
27
+ "sec-ch-ua": '"Microsoft Edge";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
28
+ "sec-ch-ua-mobile": "?0",
29
+ "sec-ch-ua-platform": '"Windows"',
30
+ "sec-fetch-dest": "empty",
31
+ "sec-fetch-mode": "cors",
32
+ "sec-fetch-site": "same-origin",
33
+ "sec-gpc": "1",
34
+ "user-agent": LitAgent().random(),
35
+ "cookie": "__client_uat=0; __client_uat_K90aduOv=0",
36
+ }
37
+ self.session.headers.update(self.headers)
38
+ if proxies:
39
+ self.session.proxies = proxies
40
+
41
+ def _make_payload(self, prompt: str) -> bytes: # This is a static payload for the demo; in production, generate dynamically as needed
42
+ boundary = "----WebKitFormBoundaryQsWD5Qs3QqDkNBPH"
43
+ parts = [
44
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"1\"\r\n\r\n{{\"id\":\"71bb616ba5b7cbcac2308fe0c249a9f2d51825b7\",\"bound\":null}}\r\n",
45
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"2\"\r\n\r\n{{\"id\":\"8bcca1d0cb933b14fefde88dacb2865be3d1d525\",\"bound\":null}}\r\n",
46
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_input\"\r\n\r\n{prompt}\r\n",
47
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_id\"\r\n\r\nx1GUVzl\r\n",
48
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"3_userId\"\r\n\r\nnull\r\n",
49
+ f"--{boundary}\r\nContent-Disposition: form-data; name=\"0\"\r\n\r\n[{{\"action\":\"$F1\",\"options\":{{\"onSetAIState\":\"$F2\"}}}},{{\"messages\":[],\"chatId\":\"\"}},\"$K3\"]\r\n",
50
+ f"--{boundary}--\r\n"
51
+ ]
52
+ return "".join(parts).encode("utf-8")
53
+
54
+ @staticmethod
55
+ def _stellar_extractor(chunk: Union[str, bytes, Dict[str, Any]]) -> Optional[str]:
56
+ """Extracts content from the Stellar stream format with hex keys and diff arrays. Handles both str and bytes input."""
57
+ if isinstance(chunk, bytes):
58
+ try:
59
+ chunk = chunk.decode('utf-8', errors='replace')
60
+ except Exception:
61
+ return None
62
+ if not isinstance(chunk, str):
63
+ return None
64
+ # Match patterns like 6e:{"diff":[0," empathy"],"next":"$@6f"}
65
+ pattern = r'[0-9a-f]+:\{"diff":\[0,"([^"\\]*)"\]'
66
+ matches = re.findall(pattern, chunk)
67
+ if matches:
68
+ extracted_text = ''.join(matches)
69
+ # Fix escaped newlines
70
+ extracted_text = extracted_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
71
+ return extracted_text if extracted_text.strip() else None
72
+ return None
73
+
74
+ def search(self, prompt: str, stream: bool = False, raw: bool = False) -> Union[SearchResponse, Generator[Union[Dict[str, str], SearchResponse, str], None, None]]:
75
+ payload = self._make_payload(prompt)
76
+ try:
77
+ response = self.session.post(
78
+ self.api_endpoint,
79
+ data=payload,
80
+ timeout=self.timeout,
81
+ proxies=self.proxies,
82
+ stream=stream,
83
+ )
84
+ if not response.ok:
85
+ raise exceptions.APIConnectionError(f"Failed to get response: {response.status_code} {response.text}")
86
+
87
+ def _yield_stream():
88
+ # Use sanitize_stream for real-time extraction from the response iterator
89
+ processed_stream = sanitize_stream(
90
+ data=response.iter_lines(decode_unicode=True),
91
+ intro_value=None,
92
+ to_json=False,
93
+ content_extractor=self._stellar_extractor
94
+ )
95
+ full_response = ""
96
+ for content in processed_stream:
97
+ if content and isinstance(content, str):
98
+ full_response += content
99
+ if raw:
100
+ yield {"text": content}
101
+ else:
102
+ yield content
103
+ # Do NOT yield SearchResponse(full_response) in streaming mode to avoid duplicate output
104
+
105
+ if stream:
106
+ return _yield_stream()
107
+ else:
108
+ # Use sanitize_stream for the full response text
109
+ processed_stream = sanitize_stream(
110
+ data=response.text.splitlines(),
111
+ intro_value=None,
112
+ to_json=False,
113
+ content_extractor=self._stellar_extractor
114
+ )
115
+ full_response = ""
116
+ for content in processed_stream:
117
+ if content and isinstance(content, str):
118
+ full_response += content
119
+ if raw:
120
+ return {"text": full_response}
121
+ else:
122
+ return SearchResponse(full_response)
123
+ except requests.RequestException as e:
124
+ raise exceptions.APIConnectionError(f"Request failed: {e}")
125
+
126
+ if __name__ == "__main__":
127
+ from rich import print
128
+ ai = Stellar()
129
+ user_query = input(">>> ")
130
+ response = ai.search(user_query, stream=True, raw=False)
131
+ for chunk in response:
132
+ print(chunk, end="", flush=True)
@@ -17,62 +17,66 @@ class DeepInfra(Provider):
17
17
  """
18
18
 
19
19
  AVAILABLE_MODELS = [
20
- # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
21
- "deepseek-ai/DeepSeek-R1-0528",
22
- "deepseek-ai/DeepSeek-R1",
23
- "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
24
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
25
- "deepseek-ai/DeepSeek-R1-Turbo",
26
- "deepseek-ai/DeepSeek-V3",
20
+ "anthropic/claude-4-opus",
21
+ "anthropic/claude-4-sonnet",
22
+ "deepseek-ai/DeepSeek-R1-0528-Turbo",
23
+ "Qwen/Qwen3-235B-A22B",
24
+ "Qwen/Qwen3-30B-A3B",
25
+ "Qwen/Qwen3-32B",
26
+ "Qwen/Qwen3-14B",
27
+ "deepseek-ai/DeepSeek-V3-0324-Turbo",
27
28
  "deepseek-ai/DeepSeek-Prover-V2-671B",
28
- "google/gemma-2-27b-it",
29
- "google/gemma-2-9b-it",
30
- "google/gemma-3-12b-it",
29
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo",
30
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
31
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
32
+ "deepseek-ai/DeepSeek-R1-0528",
33
+ "deepseek-ai/DeepSeek-V3-0324",
34
+ "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
35
+ "microsoft/phi-4-reasoning-plus",
36
+ "Qwen/QwQ-32B",
37
+ "google/gemini-2.5-flash",
38
+ "google/gemini-2.5-pro",
31
39
  "google/gemma-3-27b-it",
40
+ "google/gemma-3-12b-it",
32
41
  "google/gemma-3-4b-it",
33
- "meta-llama/Llama-3.3-70B-Instruct",
42
+ "microsoft/Phi-4-multimodal-instruct",
43
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
44
+ "deepseek-ai/DeepSeek-V3",
34
45
  "meta-llama/Llama-3.3-70B-Instruct-Turbo",
35
- "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
36
- "meta-llama/Llama-4-Scout-17B-16E-Instruct",
37
- "meta-llama/Llama-Guard-4-12B",
46
+ "meta-llama/Llama-3.3-70B-Instruct",
47
+ "microsoft/phi-4",
48
+ "Gryphe/MythoMax-L2-13b",
49
+ "NousResearch/Hermes-3-Llama-3.1-405B",
50
+ "NousResearch/Hermes-3-Llama-3.1-70B",
51
+ "NovaSky-AI/Sky-T1-32B-Preview",
52
+ "Qwen/Qwen2.5-72B-Instruct",
53
+ "Qwen/Qwen2.5-7B-Instruct",
54
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
55
+ "Sao10K/L3-8B-Lunaris-v1-Turbo",
56
+ "Sao10K/L3.1-70B-Euryale-v2.2",
57
+ "Sao10K/L3.3-70B-Euryale-v2.3",
58
+ "anthropic/claude-3-7-sonnet-latest",
59
+ "deepseek-ai/DeepSeek-R1",
60
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
61
+ "deepseek-ai/DeepSeek-R1-Turbo",
62
+ "google/gemini-2.0-flash-001",
63
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
64
+ "meta-llama/Llama-3.2-1B-Instruct",
65
+ "meta-llama/Llama-3.2-3B-Instruct",
66
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
67
+ "meta-llama/Meta-Llama-3-70B-Instruct",
68
+ "meta-llama/Meta-Llama-3-8B-Instruct",
69
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
38
70
  "meta-llama/Meta-Llama-3.1-8B-Instruct",
39
71
  "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
40
- "microsoft/Phi-4-multimodal-instruct",
41
72
  "microsoft/WizardLM-2-8x22B",
42
- "microsoft/phi-4",
43
- "microsoft/phi-4-reasoning-plus",
73
+ "mistralai/Devstral-Small-2505",
74
+ "mistralai/Mistral-7B-Instruct-v0.3",
75
+ "mistralai/Mistral-Nemo-Instruct-2407",
44
76
  "mistralai/Mistral-Small-24B-Instruct-2501",
77
+ "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
78
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
45
79
  "nvidia/Llama-3.1-Nemotron-70B-Instruct",
46
- "Qwen/QwQ-32B",
47
- "Qwen/Qwen2.5-72B-Instruct",
48
- "Qwen/Qwen2.5-Coder-32B-Instruct",
49
- "Qwen/Qwen3-14B",
50
- "Qwen/Qwen3-30B-A3B",
51
- "Qwen/Qwen3-32B",
52
- "Qwen/Qwen3-235B-A22B",
53
- # "google/gemini-1.5-flash", # >>>> NOT WORKING
54
- # "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
55
- # "google/gemini-2.0-flash-001", # >>>> NOT WORKING
56
-
57
- # "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
58
-
59
- # "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
60
- # "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
61
- # "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
62
- # "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
63
- # "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
64
- # "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
65
- # "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
66
- # "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
67
- # "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
68
- # "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
69
- # "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
70
- # "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
71
- # "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
72
- # "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
73
- # "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
74
- # "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
75
- # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
76
80
  ]
77
81
 
78
82
  @staticmethod
@@ -84,6 +88,7 @@ class DeepInfra(Provider):
84
88
 
85
89
  def __init__(
86
90
  self,
91
+ api_key: Optional[str] = None,
87
92
  is_conversation: bool = True,
88
93
  max_tokens: int = 2049,
89
94
  timeout: int = 30,
@@ -107,21 +112,34 @@ class DeepInfra(Provider):
107
112
  self.agent = LitAgent()
108
113
  # Fingerprint generation might be less relevant with impersonate
109
114
  self.fingerprint = self.agent.generate_fingerprint(browser)
110
-
115
+ self.api = api_key
111
116
  # Use the fingerprint for headers (keep relevant ones)
112
117
  self.headers = {
113
- "Accept": self.fingerprint["accept"], # Keep Accept
114
- "Accept-Language": self.fingerprint["accept_language"], # Keep Accept-Language
118
+ "Accept": self.fingerprint["accept"],
119
+ "Accept-Language": self.fingerprint["accept_language"],
115
120
  "Content-Type": "application/json",
116
- "Cache-Control": "no-cache", # Keep Cache-Control
117
- "Origin": "https://deepinfra.com", # Keep Origin
118
- "Pragma": "no-cache", # Keep Pragma
119
- "Referer": "https://deepinfra.com/", # Keep Referer
120
- "Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-*
121
+ "Cache-Control": "no-cache",
122
+ "Origin": "https://deepinfra.com",
123
+ "Pragma": "no-cache",
124
+ "Referer": "https://deepinfra.com/",
125
+ "Sec-Fetch-Dest": "empty",
121
126
  "Sec-Fetch-Mode": "cors",
122
127
  "Sec-Fetch-Site": "same-site",
123
- "X-Deepinfra-Source": "web-embed", # Keep custom headers
128
+ "X-Deepinfra-Source": "web-embed",
129
+ # Additional headers from LitAgent.generate_fingerprint
130
+ "User-Agent": self.fingerprint.get("user_agent", ""),
131
+ "Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
132
+ "Sec-CH-UA-Mobile": "?0",
133
+ "Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
134
+ "X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
135
+ "X-Real-IP": self.fingerprint.get("x-real-ip", ""),
136
+ "X-Client-IP": self.fingerprint.get("x-client-ip", ""),
137
+ "Forwarded": self.fingerprint.get("forwarded", ""),
138
+ "X-Forwarded-Proto": self.fingerprint.get("x-forwarded-proto", ""),
139
+ "X-Request-Id": self.fingerprint.get("x-request-id", ""),
124
140
  }
141
+ if self.api is not None:
142
+ self.headers["Authorization"] = f"Bearer {self.api}"
125
143
 
126
144
  # Initialize curl_cffi Session
127
145
  self.session = Session()
@@ -321,7 +339,7 @@ if __name__ == "__main__":
321
339
 
322
340
  for model in DeepInfra.AVAILABLE_MODELS:
323
341
  try:
324
- test_ai = DeepInfra(model=model, timeout=60)
342
+ test_ai = DeepInfra(model=model, timeout=60, api_key="jwt:eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJnaDoxNTg5ODg0NzgiLCJleHAiOjE3NTI3NDI5NDV9.qM93p6bPZYi_ejaOo1Dbe4UjYXrFiM7XvBLN4-9BWag")
325
343
  response = test_ai.chat("Say 'Hello' in one word", stream=True)
326
344
  response_text = ""
327
345
  for chunk in response:
@@ -2,7 +2,7 @@ from curl_cffi import CurlError
2
2
  from curl_cffi.requests import Session, Response # Import Response
3
3
  import json
4
4
  import uuid
5
- from typing import Any, Dict, Union, Optional, List
5
+ from typing import Any, Dict, Union, Optional, List, Generator
6
6
  from datetime import datetime
7
7
  from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
8
8
  from webscout.AIbase import Provider
@@ -21,9 +21,9 @@ MODEL_CONFIGS = {
21
21
  "gemini-2.0-flash",
22
22
  "gemini-2.0-flash-exp-image-generation",
23
23
  "gemini-2.0-flash-thinking-exp-01-21",
24
- "gemini-2.5-pro-exp-03-25",
24
+ "gemini-2.5-flash-lite-preview-06-17",
25
25
  "gemini-2.0-pro-exp-02-05",
26
- "gemini-2.5-flash-preview-04-17",
26
+ "gemini-2.5-flash",
27
27
 
28
28
 
29
29
  ],
@@ -62,7 +62,9 @@ MODEL_CONFIGS = {
62
62
  "endpoint": "https://ayle.chat/api/cerebras",
63
63
  "models": [
64
64
  "llama3.1-8b",
65
- "llama-3.3-70b"
65
+ "llama-3.3-70b",
66
+ "llama-4-scout-17b-16e-instruct",
67
+ "qwen-3-32b"
66
68
  ],
67
69
  },
68
70
  "xai": {
@@ -88,9 +90,9 @@ class ExaChat(Provider):
88
90
  "gemini-2.0-flash",
89
91
  "gemini-2.0-flash-exp-image-generation",
90
92
  "gemini-2.0-flash-thinking-exp-01-21",
91
- "gemini-2.5-pro-exp-03-25",
92
93
  "gemini-2.0-pro-exp-02-05",
93
- "gemini-2.5-flash-preview-04-17",
94
+ "gemini-2.5-flash",
95
+ "gemini-2.5-flash-lite-preview-06-17",
94
96
 
95
97
  # OpenRouter Models
96
98
  "mistralai/mistral-small-3.1-24b-instruct:free",
@@ -120,6 +122,8 @@ class ExaChat(Provider):
120
122
  # Cerebras Models
121
123
  "llama3.1-8b",
122
124
  "llama-3.3-70b",
125
+ "llama-4-scout-17b-16e-instruct",
126
+ "qwen-3-32b",
123
127
 
124
128
  ]
125
129
 
@@ -264,10 +268,11 @@ class ExaChat(Provider):
264
268
  def ask(
265
269
  self,
266
270
  prompt: str,
271
+ stream: bool = False,
267
272
  raw: bool = False,
268
273
  optimizer: str = None,
269
274
  conversationally: bool = False,
270
- ) -> Dict[str, Any]:
275
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
271
276
  """Sends a prompt to the API and returns the response."""
272
277
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
273
278
  if optimizer:
@@ -281,78 +286,103 @@ class ExaChat(Provider):
281
286
 
282
287
  payload = self._build_payload(conversation_prompt)
283
288
  response = self._make_request(payload)
284
-
285
- try:
289
+ processed_stream = sanitize_stream(
290
+ data=response.iter_content(chunk_size=None),
291
+ intro_value=None,
292
+ to_json=True,
293
+ content_extractor=self._exachat_extractor,
294
+ yield_raw_on_error=False,
295
+ raw=raw
296
+ )
297
+ if stream:
298
+ streaming_text = ""
299
+ for content_chunk in processed_stream:
300
+ if content_chunk and isinstance(content_chunk, str):
301
+ content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
302
+ if raw:
303
+ if content_chunk and isinstance(content_chunk, str):
304
+ streaming_text += content_chunk
305
+ yield content_chunk
306
+ else:
307
+ if content_chunk and isinstance(content_chunk, str):
308
+ streaming_text += content_chunk
309
+ yield dict(text=content_chunk)
310
+ self.last_response = {"text": streaming_text}
311
+ self.conversation.update_chat_history(prompt, streaming_text)
312
+ else:
286
313
  full_response = ""
287
- # Use sanitize_stream to process the response
288
- processed_stream = sanitize_stream(
289
- data=response.iter_content(chunk_size=None), # Pass byte iterator
290
- intro_value=None, # API doesn't seem to use 'data:' prefix
291
- to_json=True, # Stream sends JSON lines
292
- content_extractor=self._exachat_extractor, # Use the specific extractor
293
- yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
294
- )
295
-
296
314
  for content_chunk in processed_stream:
297
- # content_chunk is the string extracted by _exachat_extractor
298
315
  if content_chunk and isinstance(content_chunk, str):
299
- full_response += content_chunk
300
-
316
+ content_chunk = content_chunk.replace('\\\\', '\\').replace('\\"', '"')
317
+ if raw:
318
+ if content_chunk and isinstance(content_chunk, str):
319
+ full_response += content_chunk
320
+ else:
321
+ if content_chunk and isinstance(content_chunk, str):
322
+ full_response += content_chunk
301
323
  self.last_response = {"text": full_response}
302
324
  self.conversation.update_chat_history(prompt, full_response)
303
- return self.last_response if not raw else full_response # Return dict or raw string
304
-
305
- except json.JSONDecodeError as e:
306
- raise exceptions.FailedToGenerateResponseError(f"Invalid JSON response: {e}") from e
325
+ return self.last_response if not raw else full_response
307
326
 
308
327
  def chat(
309
328
  self,
310
329
  prompt: str,
330
+ stream: bool = False,
311
331
  optimizer: str = None,
312
332
  conversationally: bool = False,
313
- ) -> str:
314
- """Generate response."""
315
- response = self.ask(
316
- prompt, optimizer=optimizer, conversationally=conversationally
317
- )
318
- return self.get_message(response)
333
+ raw: bool = False,
334
+ ) -> Union[str, Generator[str, None, None]]:
335
+ def for_stream():
336
+ for response in self.ask(
337
+ prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
338
+ ):
339
+ if raw:
340
+ yield response
341
+ else:
342
+ yield self.get_message(response)
343
+ def for_non_stream():
344
+ result = self.ask(
345
+ prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
346
+ )
347
+ if raw:
348
+ return result if isinstance(result, str) else str(result)
349
+ return self.get_message(result)
350
+ return for_stream() if stream else for_non_stream()
319
351
 
320
352
  def get_message(self, response: Union[Dict[str, Any], str]) -> str:
321
- """
322
- Retrieves message from response.
323
-
324
- Args:
325
- response (Union[Dict[str, Any], str]): The response to extract the message from
326
-
327
- Returns:
328
- str: The extracted message text
329
- """
330
353
  if isinstance(response, dict):
331
- return response.get("text", "")
332
- return str(response)
354
+ text = response.get("text", "")
355
+ else:
356
+ text = str(response)
357
+ return text.replace('\\\\', '\\').replace('\\"', '"')
333
358
 
334
359
  if __name__ == "__main__":
335
- print("-" * 80)
336
- print(f"{'Model':<50} {'Status':<10} {'Response'}")
337
- print("-" * 80)
360
+ # print("-" * 80)
361
+ # print(f"{'Model':<50} {'Status':<10} {'Response'}")
362
+ # print("-" * 80)
338
363
 
339
- # Test all available models
340
- working = 0
341
- total = len(ExaChat.AVAILABLE_MODELS)
364
+ # # Test all available models
365
+ # working = 0
366
+ # total = len(ExaChat.AVAILABLE_MODELS)
342
367
 
343
- for model in ExaChat.AVAILABLE_MODELS:
344
- try:
345
- test_ai = ExaChat(model=model, timeout=60)
346
- response = test_ai.chat("Say 'Hello' in one word")
347
- response_text = response
368
+ # for model in ExaChat.AVAILABLE_MODELS:
369
+ # try:
370
+ # test_ai = ExaChat(model=model, timeout=60)
371
+ # response = test_ai.chat("Say 'Hello' in one word")
372
+ # response_text = response
348
373
 
349
- if response_text and len(response_text.strip()) > 0:
350
- status = "✓"
351
- # Truncate response if too long
352
- display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
353
- else:
354
- status = "✗"
355
- display_text = "Empty or invalid response"
356
- print(f"{model:<50} {status:<10} {display_text}")
357
- except Exception as e:
358
- print(f"{model:<50} {'✗':<10} {str(e)}")
374
+ # if response_text and len(response_text.strip()) > 0:
375
+ # status = "✓"
376
+ # # Truncate response if too long
377
+ # display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
378
+ # else:
379
+ # status = "✗"
380
+ # display_text = "Empty or invalid response"
381
+ # print(f"{model:<50} {status:<10} {display_text}")
382
+ # except Exception as e:
383
+ # print(f"{model:<50} {'✗':<10} {str(e)}")
384
+ from rich import print
385
+ ai = ExaChat(model="gemini-2.0-flash")
386
+ response = ai.chat("tell me a joke", stream=True, raw=False)
387
+ for chunk in response:
388
+ print(chunk, end='', flush=True)
@@ -15,7 +15,7 @@ class Flowith(Provider):
15
15
  """
16
16
  A provider class for interacting with the Flowith API.
17
17
  """
18
- AVAILABLE_MODELS = ["gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku", "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"]
18
+ AVAILABLE_MODELS = ["gpt-4.1-nano", "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku", "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"]
19
19
 
20
20
  def __init__(
21
21
  self,
@@ -83,7 +83,7 @@ class FreeGemini(Provider):
83
83
  self.last_response = {}
84
84
  self.system_prompt = system_prompt # Stored for consistency
85
85
 
86
- self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
86
+ self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.5-flash:streamGenerateContent?alt=sse"
87
87
 
88
88
  self.agent = LitAgent()
89
89
  self.headers = {
@@ -246,5 +246,5 @@ class FreeGemini(Provider):
246
246
  if __name__ == "__main__":
247
247
  # Example usage
248
248
  free_gemini = FreeGemini()
249
- response = free_gemini.chat("What is the capital of France?", stream=False)
249
+ response = free_gemini.chat("how many r in strawberry", stream=False)
250
250
  print(response) # Should print the response from the API
@@ -10,22 +10,15 @@ from ..Bard import Chatbot, Model
10
10
 
11
11
  warnings.simplefilter("ignore", category=UserWarning)
12
12
 
13
- # Define model aliases for easy usage
13
+ # Define model aliases for easy usage (only supported models)
14
14
  MODEL_ALIASES: Dict[str, Model] = {
15
15
  "unspecified": Model.UNSPECIFIED,
16
- "gemini-2.0-flash": Model.G_2_0_FLASH,
17
- "gemini-2.0-flash-thinking": Model.G_2_0_FLASH_THINKING,
18
- "gemini-2.5-pro": Model.G_2_5_PRO,
19
- "gemini-2.0-exp-advanced": Model.G_2_0_EXP_ADVANCED,
20
- "gemini-2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
21
16
  "gemini-2.5-flash": Model.G_2_5_FLASH,
17
+ "gemini-2.5-pro": Model.G_2_5_PRO,
22
18
  # Add shorter aliases for convenience
23
- "flash": Model.G_2_0_FLASH,
24
19
  "flash-2.5": Model.G_2_5_FLASH,
25
- "thinking": Model.G_2_0_FLASH_THINKING,
26
20
  "pro": Model.G_2_5_PRO,
27
- "advanced": Model.G_2_0_EXP_ADVANCED,
28
- "advanced-2.5": Model.G_2_5_EXP_ADVANCED,
21
+ "unspecified": Model.UNSPECIFIED,
29
22
  }
30
23
 
31
24
  # List of available models (friendly names)