webscout 7.7__py3-none-any.whl → 7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (134) hide show
  1. webscout/AIutel.py +2 -1
  2. webscout/Bard.py +12 -29
  3. webscout/DWEBS.py +477 -461
  4. webscout/Extra/__init__.py +2 -0
  5. webscout/Extra/autocoder/__init__.py +9 -9
  6. webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -790
  7. webscout/Extra/autocoder/autocoder_utiles.py +332 -194
  8. webscout/Extra/gguf.py +682 -682
  9. webscout/Extra/tempmail/__init__.py +26 -0
  10. webscout/Extra/tempmail/async_utils.py +141 -0
  11. webscout/Extra/tempmail/base.py +156 -0
  12. webscout/Extra/tempmail/cli.py +187 -0
  13. webscout/Extra/tempmail/mail_tm.py +361 -0
  14. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  15. webscout/Provider/AI21.py +1 -1
  16. webscout/Provider/AISEARCH/DeepFind.py +2 -2
  17. webscout/Provider/AISEARCH/ISou.py +2 -2
  18. webscout/Provider/AISEARCH/felo_search.py +6 -6
  19. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  20. webscout/Provider/Aitopia.py +292 -0
  21. webscout/Provider/AllenAI.py +1 -1
  22. webscout/Provider/Andi.py +3 -3
  23. webscout/Provider/C4ai.py +1 -1
  24. webscout/Provider/ChatGPTES.py +3 -5
  25. webscout/Provider/ChatGPTGratis.py +4 -4
  26. webscout/Provider/Chatify.py +2 -2
  27. webscout/Provider/Cloudflare.py +3 -2
  28. webscout/Provider/DeepSeek.py +2 -2
  29. webscout/Provider/Deepinfra.py +288 -286
  30. webscout/Provider/ElectronHub.py +709 -634
  31. webscout/Provider/ExaChat.py +325 -0
  32. webscout/Provider/Free2GPT.py +2 -2
  33. webscout/Provider/Gemini.py +167 -179
  34. webscout/Provider/GithubChat.py +1 -1
  35. webscout/Provider/Glider.py +4 -4
  36. webscout/Provider/Groq.py +41 -27
  37. webscout/Provider/HF_space/qwen_qwen2.py +1 -1
  38. webscout/Provider/HeckAI.py +1 -1
  39. webscout/Provider/HuggingFaceChat.py +1 -1
  40. webscout/Provider/Hunyuan.py +1 -1
  41. webscout/Provider/Jadve.py +3 -3
  42. webscout/Provider/Koboldai.py +3 -3
  43. webscout/Provider/LambdaChat.py +3 -2
  44. webscout/Provider/Llama.py +3 -5
  45. webscout/Provider/Llama3.py +4 -12
  46. webscout/Provider/Marcus.py +3 -3
  47. webscout/Provider/OLLAMA.py +8 -8
  48. webscout/Provider/Openai.py +7 -3
  49. webscout/Provider/PI.py +1 -1
  50. webscout/Provider/Perplexitylabs.py +1 -1
  51. webscout/Provider/Phind.py +1 -1
  52. webscout/Provider/PizzaGPT.py +1 -1
  53. webscout/Provider/QwenLM.py +4 -7
  54. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +3 -1
  55. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +3 -3
  56. webscout/Provider/TTI/ImgSys/__init__.py +23 -0
  57. webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
  58. webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
  59. webscout/Provider/TTI/__init__.py +3 -1
  60. webscout/Provider/TTI/artbit/async_artbit.py +1 -1
  61. webscout/Provider/TTI/artbit/sync_artbit.py +1 -1
  62. webscout/Provider/TTI/huggingface/async_huggingface.py +1 -1
  63. webscout/Provider/TTI/huggingface/sync_huggingface.py +1 -1
  64. webscout/Provider/TTI/piclumen/__init__.py +22 -22
  65. webscout/Provider/TTI/piclumen/sync_piclumen.py +232 -232
  66. webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
  67. webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
  68. webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
  69. webscout/Provider/TTI/talkai/sync_talkai.py +1 -1
  70. webscout/Provider/TTS/utils.py +1 -1
  71. webscout/Provider/TeachAnything.py +1 -1
  72. webscout/Provider/TextPollinationsAI.py +232 -230
  73. webscout/Provider/TwoAI.py +1 -2
  74. webscout/Provider/Venice.py +4 -2
  75. webscout/Provider/VercelAI.py +234 -0
  76. webscout/Provider/WebSim.py +3 -2
  77. webscout/Provider/WiseCat.py +10 -12
  78. webscout/Provider/Youchat.py +1 -1
  79. webscout/Provider/__init__.py +10 -4
  80. webscout/Provider/ai4chat.py +1 -1
  81. webscout/Provider/aimathgpt.py +2 -6
  82. webscout/Provider/akashgpt.py +1 -1
  83. webscout/Provider/askmyai.py +4 -4
  84. webscout/Provider/{DARKAI.py → asksteve.py} +56 -77
  85. webscout/Provider/bagoodex.py +2 -2
  86. webscout/Provider/cerebras.py +1 -1
  87. webscout/Provider/chatglm.py +4 -4
  88. webscout/Provider/cleeai.py +1 -0
  89. webscout/Provider/copilot.py +21 -9
  90. webscout/Provider/elmo.py +1 -1
  91. webscout/Provider/flowith.py +1 -1
  92. webscout/Provider/freeaichat.py +64 -31
  93. webscout/Provider/gaurish.py +3 -5
  94. webscout/Provider/geminiprorealtime.py +1 -1
  95. webscout/Provider/granite.py +4 -4
  96. webscout/Provider/hermes.py +5 -5
  97. webscout/Provider/julius.py +1 -1
  98. webscout/Provider/koala.py +1 -1
  99. webscout/Provider/lepton.py +1 -1
  100. webscout/Provider/llama3mitril.py +4 -4
  101. webscout/Provider/llamatutor.py +1 -1
  102. webscout/Provider/llmchat.py +3 -3
  103. webscout/Provider/meta.py +1 -1
  104. webscout/Provider/multichat.py +10 -10
  105. webscout/Provider/promptrefine.py +1 -1
  106. webscout/Provider/searchchat.py +293 -0
  107. webscout/Provider/sonus.py +2 -2
  108. webscout/Provider/talkai.py +2 -2
  109. webscout/Provider/turboseek.py +1 -1
  110. webscout/Provider/tutorai.py +1 -1
  111. webscout/Provider/typegpt.py +5 -42
  112. webscout/Provider/uncovr.py +312 -297
  113. webscout/Provider/x0gpt.py +1 -1
  114. webscout/Provider/yep.py +64 -12
  115. webscout/__init__.py +3 -1
  116. webscout/cli.py +59 -98
  117. webscout/conversation.py +350 -17
  118. webscout/litprinter/__init__.py +59 -667
  119. webscout/optimizers.py +419 -419
  120. webscout/tempid.py +11 -11
  121. webscout/update_checker.py +14 -12
  122. webscout/utils.py +2 -2
  123. webscout/version.py +1 -1
  124. webscout/webscout_search.py +146 -87
  125. webscout/webscout_search_async.py +148 -27
  126. {webscout-7.7.dist-info → webscout-7.9.dist-info}/METADATA +92 -66
  127. webscout-7.9.dist-info/RECORD +248 -0
  128. webscout/Provider/EDITEE.py +0 -192
  129. webscout/litprinter/colors.py +0 -54
  130. webscout-7.7.dist-info/RECORD +0 -234
  131. {webscout-7.7.dist-info → webscout-7.9.dist-info}/LICENSE.md +0 -0
  132. {webscout-7.7.dist-info → webscout-7.9.dist-info}/WHEEL +0 -0
  133. {webscout-7.7.dist-info → webscout-7.9.dist-info}/entry_points.txt +0 -0
  134. {webscout-7.7.dist-info → webscout-7.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,234 @@
1
+ import re
2
+ import time
3
+ import requests
4
+ import json
5
+ from typing import Union, Any, Dict, Generator, Optional
6
+ import uuid
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+
15
+
16
+ class VercelAI(Provider):
17
+ """
18
+ A class to interact with the Vercel AI API.
19
+ """
20
+
21
+ AVAILABLE_MODELS = [
22
+ "chat-model",
23
+ "chat-model-reasoning"
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 600,
30
+ timeout: int = 30,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ model: str = "chat-model",
38
+ system_prompt: str = "You are a helpful AI assistant."
39
+ ):
40
+ """Initializes the Vercel AI API client."""
41
+
42
+ if model not in self.AVAILABLE_MODELS:
43
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
44
+
45
+ self.session = requests.Session()
46
+ self.is_conversation = is_conversation
47
+ self.max_tokens_to_sample = max_tokens
48
+ self.api_endpoint = "https://chat.vercel.ai/api/chat"
49
+ self.stream_chunk_size = 64
50
+ self.timeout = timeout
51
+ self.last_response = {}
52
+ self.model = model
53
+ self.system_prompt = system_prompt
54
+ self.litagent = LitAgent()
55
+ self.headers = self.litagent.generate_fingerprint()
56
+ self.session.headers.update(self.headers)
57
+ self.session.proxies = proxies
58
+
59
+ # Add Vercel AI specific headers
60
+ self.session.headers.update({
61
+ "authority": "chat.vercel.ai",
62
+ "accept": "*/*",
63
+ "accept-encoding": "gzip, deflate, br, zstd",
64
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
65
+ "content-type": "application/json",
66
+ "dnt": "1",
67
+ "origin": "https://chat.vercel.ai",
68
+ "priority": "u=1, i",
69
+ "referer": "https://chat.vercel.ai/",
70
+ "sec-ch-ua-mobile": "?0",
71
+ "sec-ch-ua-platform": '"Windows"',
72
+ "sec-fetch-dest": "empty",
73
+ "sec-fetch-mode": "cors",
74
+ "sec-fetch-site": "same-origin",
75
+ "sec-gpc": "1",
76
+ "x-kpsdk-c": "1-Cl4OUDwFNA",
77
+ "x-kpsdk-cd": json.dumps({
78
+ "workTime": int(time.time() * 1000),
79
+ "id": str(uuid.uuid4()),
80
+ "answers": [5, 5],
81
+ "duration": 26.9,
82
+ "d": 1981,
83
+ "st": int(time.time() * 1000) - 1000,
84
+ "rst": int(time.time() * 1000) - 500
85
+ }),
86
+ "x-kpsdk-ct": str(uuid.uuid4()),
87
+ "x-kpsdk-r": "1-B1NfB2A",
88
+ "x-kpsdk-v": "j-1.0.0"
89
+ })
90
+
91
+ # Add cookies
92
+ self.session.cookies.update({
93
+ "KP_UIDz": str(uuid.uuid4()),
94
+ "KP_UIDz-ssn": str(uuid.uuid4())
95
+ })
96
+
97
+ self.__available_optimizers = (
98
+ method
99
+ for method in dir(Optimizers)
100
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
101
+ )
102
+ Conversation.intro = (
103
+ AwesomePrompts().get_act(
104
+ act, raise_not_found=True, default=None, case_insensitive=True
105
+ )
106
+ if act
107
+ else intro or Conversation.intro
108
+ )
109
+ self.conversation = Conversation(
110
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
111
+ )
112
+ self.conversation.history_offset = history_offset
113
+
114
+ def ask(
115
+ self,
116
+ prompt: str,
117
+ stream: bool = False,
118
+ raw: bool = False,
119
+ optimizer: str = None,
120
+ conversationally: bool = False,
121
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
122
+ """Chat with AI"""
123
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
124
+ if optimizer:
125
+ if optimizer in self.__available_optimizers:
126
+ conversation_prompt = getattr(Optimizers, optimizer)(
127
+ conversation_prompt if conversationally else prompt
128
+ )
129
+ else:
130
+ raise Exception(
131
+ f"Optimizer is not one of {self.__available_optimizers}"
132
+ )
133
+
134
+ payload = {
135
+ "id": "guest",
136
+ "messages": [
137
+ {
138
+ "id": str(uuid.uuid4()),
139
+ "createdAt": "2025-03-29T09:13:16.992Z",
140
+ "role": "user",
141
+ "content": conversation_prompt,
142
+ "parts": [{"type": "text", "text": conversation_prompt}]
143
+ }
144
+ ],
145
+ "selectedChatModelId": self.model
146
+ }
147
+
148
+ def for_stream():
149
+ response = self.session.post(
150
+ self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
151
+ )
152
+ if not response.ok:
153
+ error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
154
+ raise exceptions.FailedToGenerateResponseError(error_msg)
155
+
156
+ streaming_response = ""
157
+ for line in response.iter_lines(decode_unicode=True):
158
+ if line:
159
+ match = re.search(r'0:"(.*?)"', line)
160
+ if match:
161
+ content = match.group(1)
162
+ streaming_response += content
163
+ yield content if raw else dict(text=content)
164
+ self.last_response.update(dict(text=streaming_response))
165
+ self.conversation.update_chat_history(
166
+ prompt, self.get_message(self.last_response)
167
+ )
168
+
169
+ def for_non_stream():
170
+ for _ in for_stream():
171
+ pass
172
+ return self.last_response
173
+
174
+ return for_stream() if stream else for_non_stream()
175
+
176
+ def chat(
177
+ self,
178
+ prompt: str,
179
+ stream: bool = False,
180
+ optimizer: str = None,
181
+ conversationally: bool = False,
182
+ ) -> str:
183
+ """Generate response `str`"""
184
+ def for_stream():
185
+ for response in self.ask(
186
+ prompt, True, optimizer=optimizer, conversationally=conversationally
187
+ ):
188
+ yield self.get_message(response)
189
+
190
+ def for_non_stream():
191
+ return self.get_message(
192
+ self.ask(
193
+ prompt,
194
+ False,
195
+ optimizer=optimizer,
196
+ conversationally=conversationally,
197
+ )
198
+ )
199
+
200
+ return for_stream() if stream else for_non_stream()
201
+
202
+ def get_message(self, response: dict) -> str:
203
+ """Retrieves message only from response"""
204
+ assert isinstance(response, dict), "Response should be of dict data-type only"
205
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
206
+
207
+ if __name__ == "__main__":
208
+ print("-" * 80)
209
+ print(f"{'Model':<50} {'Status':<10} {'Response'}")
210
+ print("-" * 80)
211
+
212
+ # Test all available models
213
+ working = 0
214
+ total = len(VercelAI.AVAILABLE_MODELS)
215
+
216
+ for model in VercelAI.AVAILABLE_MODELS:
217
+ try:
218
+ test_ai = VercelAI(model=model, timeout=60)
219
+ response = test_ai.chat("Say 'Hello' in one word", stream=True)
220
+ response_text = ""
221
+ for chunk in response:
222
+ response_text += chunk
223
+ print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
224
+
225
+ if response_text and len(response_text.strip()) > 0:
226
+ status = "✓"
227
+ # Truncate response if too long
228
+ display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
229
+ else:
230
+ status = "✗"
231
+ display_text = "Empty or invalid response"
232
+ print(f"\r{model:<50} {status:<10} {display_text}")
233
+ except Exception as e:
234
+ print(f"\r{model:<50} {'✗':<10} {str(e)}")
@@ -9,6 +9,7 @@ from webscout.AIutel import Conversation
9
9
  from webscout.AIutel import AwesomePrompts
10
10
  from webscout.AIbase import Provider
11
11
  from webscout import exceptions
12
+ from webscout.litagent import LitAgent
12
13
 
13
14
  class WebSim(Provider):
14
15
  """
@@ -58,13 +59,13 @@ class WebSim(Provider):
58
59
  """Initializes the WebSim API client."""
59
60
  if model not in self.AVAILABLE_MODELS:
60
61
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
61
-
62
+ self.agent = LitAgent()
62
63
  self.headers = {
63
64
  'accept': '*/*',
64
65
  'accept-language': 'en-US,en;q=0.9',
65
66
  'content-type': 'text/plain;charset=UTF-8',
66
67
  'origin': 'https://websim.ai',
67
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
68
+ 'user-agent': self.agent.random(),
68
69
  'websim-flags;': ''
69
70
  }
70
71
 
@@ -1,13 +1,14 @@
1
+ import re
1
2
  import requests
2
3
  import json
3
- from typing import Any, Dict, Generator, Optional
4
+ from typing import Union, Any, Dict, Generator, Optional
4
5
 
5
6
  from webscout.AIutel import Optimizers
6
7
  from webscout.AIutel import Conversation
7
8
  from webscout.AIutel import AwesomePrompts
8
9
  from webscout.AIbase import Provider
9
10
  from webscout import exceptions
10
- from webscout import LitAgent
11
+ from webscout.litagent import LitAgent
11
12
 
12
13
 
13
14
  class WiseCat(Provider):
@@ -49,11 +50,8 @@ class WiseCat(Provider):
49
50
  self.last_response = {}
50
51
  self.model = model
51
52
  self.system_prompt = system_prompt
52
- self.headers = {
53
- "Content-Type": "application/json",
54
- "Accept": "*/*",
55
- "User-Agent": LitAgent().random()
56
- }
53
+ self.litagent = LitAgent()
54
+ self.headers = self.litagent.generate_fingerprint()
57
55
  self.session.headers.update(self.headers)
58
56
  self.session.proxies = proxies
59
57
 
@@ -81,7 +79,7 @@ class WiseCat(Provider):
81
79
  raw: bool = False,
82
80
  optimizer: str = None,
83
81
  conversationally: bool = False,
84
- ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
82
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
85
83
  """Chat with AI"""
86
84
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
87
85
  if optimizer:
@@ -120,11 +118,11 @@ class WiseCat(Provider):
120
118
  streaming_response = ""
121
119
  for line in response.iter_lines(decode_unicode=True):
122
120
  if line:
123
- if line.startswith("0:"):
124
- content = line[2:].strip('"')
121
+ match = re.search(r'0:"(.*?)"', line)
122
+ if match:
123
+ content = match.group(1)
125
124
  streaming_response += content
126
125
  yield content if raw else dict(text=content)
127
-
128
126
  self.last_response.update(dict(text=streaming_response))
129
127
  self.conversation.update_chat_history(
130
128
  prompt, self.get_message(self.last_response)
@@ -166,7 +164,7 @@ class WiseCat(Provider):
166
164
  def get_message(self, response: dict) -> str:
167
165
  """Retrieves message only from response"""
168
166
  assert isinstance(response, dict), "Response should be of dict data-type only"
169
- return response["text"]
167
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
170
168
 
171
169
  if __name__ == "__main__":
172
170
  print("-" * 80)
@@ -8,7 +8,7 @@ from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
- from typing import Any, AsyncGenerator, Dict
11
+ from typing import Union, Any, AsyncGenerator, Dict
12
12
 
13
13
  import cloudscraper
14
14
 
@@ -20,7 +20,6 @@ from .OLLAMA import OLLAMA
20
20
  from .Andi import AndiSearch
21
21
  from .PizzaGPT import *
22
22
  from .Llama3 import *
23
- from .DARKAI import *
24
23
  from .koala import *
25
24
  from .meta import *
26
25
  from .julius import *
@@ -29,7 +28,6 @@ from .yep import *
29
28
  from .Cloudflare import *
30
29
  from .turboseek import *
31
30
  from .Free2GPT import *
32
- from .EDITEE import *
33
31
  from .TeachAnything import *
34
32
  from .AI21 import *
35
33
  from .Chatify import *
@@ -84,6 +82,11 @@ from .labyrinth import *
84
82
  from .WebSim import *
85
83
  from .LambdaChat import *
86
84
  from .ChatGPTClone import *
85
+ from .VercelAI import *
86
+ from .ExaChat import *
87
+ from .asksteve import *
88
+ from .Aitopia import *
89
+ from .searchchat import *
87
90
  __all__ = [
88
91
  'LLAMA',
89
92
  'LabyrinthAI',
@@ -123,7 +126,6 @@ __all__ = [
123
126
  'AndiSearch',
124
127
  'PIZZAGPT',
125
128
  'Sambanova',
126
- 'DARKAI',
127
129
  'KOALA',
128
130
  'Meta',
129
131
  'AskMyAI',
@@ -133,7 +135,6 @@ __all__ = [
133
135
  'YEPCHAT',
134
136
  'Cloudflare',
135
137
  'TurboSeek',
136
- 'Editee',
137
138
  'TeachAnything',
138
139
  'AI21',
139
140
  'Chatify',
@@ -171,4 +172,9 @@ __all__ = [
171
172
  'GithubChat',
172
173
  'UncovrAI',
173
174
  'WebSim',
175
+ 'VercelAI',
176
+ 'ExaChat',
177
+ 'AskSteve',
178
+ 'Aitopia',
179
+ 'SearchChatAI',
174
180
  ]
@@ -2,7 +2,7 @@ import requests
2
2
  import json
3
3
  import html
4
4
  import re
5
- from typing import Any, Dict
5
+ from typing import Union, Any, Dict
6
6
 
7
7
  from webscout.AIutel import Optimizers
8
8
  from webscout.AIutel import Conversation
@@ -8,7 +8,7 @@ from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
-
11
+ from webscout.litagent import LitAgent
12
12
 
13
13
  class AIMathGPT(Provider):
14
14
  """
@@ -58,11 +58,7 @@ class AIMathGPT(Provider):
58
58
  "sec-fetch-dest": "empty",
59
59
  "sec-fetch-mode": "cors",
60
60
  "sec-fetch-site": "same-origin",
61
- "user-agent": (
62
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
63
- "AppleWebKit/537.36 (KHTML, like Gecko) "
64
- "Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
65
- ),
61
+ "user-agent": LitAgent().random(),
66
62
  }
67
63
  self.session = requests.Session()
68
64
  self.session.headers.update(self.headers)
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Generator
1
+ from typing import Union, Any, Dict, Generator
2
2
  from uuid import uuid4
3
3
  import requests
4
4
  import re
@@ -1,14 +1,14 @@
1
1
  import requests
2
2
  import json
3
3
  import re
4
- from typing import Any, Dict, Optional, Generator
4
+ from typing import Union, Any, Dict, Optional, Generator
5
5
 
6
6
  from webscout.AIutel import Optimizers
7
7
  from webscout.AIutel import Conversation
8
8
  from webscout.AIutel import AwesomePrompts
9
9
  from webscout.AIbase import Provider
10
10
  from webscout import exceptions
11
- from webscout import LitAgent as UserAgent
11
+ from webscout.litagent import LitAgent as UserAgent
12
12
 
13
13
  class AskMyAI(Provider):
14
14
  """
@@ -68,7 +68,7 @@ class AskMyAI(Provider):
68
68
  raw: bool = False,
69
69
  optimizer: str = None,
70
70
  conversationally: bool = False,
71
- ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
71
+ ) -> Union[Dict[str, Any], Generator[Any, None, None]]:
72
72
  """Sends a prompt to the askmyai.chat API and returns the response."""
73
73
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
74
74
  if optimizer:
@@ -125,7 +125,7 @@ class AskMyAI(Provider):
125
125
  stream: bool = False,
126
126
  optimizer: str = None,
127
127
  conversationally: bool = False,
128
- ) -> str | Generator[str, None, None]:
128
+ ) -> Union[str, Generator[str, None, None]]:
129
129
  """Generates a response from the AskMyAI API."""
130
130
 
131
131
  def for_stream():
@@ -1,24 +1,15 @@
1
- import json
2
- from typing import Any, Dict, Optional
1
+ import requests
3
2
  from webscout.AIutel import Optimizers
4
3
  from webscout.AIutel import Conversation
5
- from webscout.AIutel import AwesomePrompts, sanitize_stream
4
+ from webscout.AIutel import AwesomePrompts
6
5
  from webscout.AIbase import Provider
7
- from webscout import exceptions, LitAgent
8
- import requests
6
+ from webscout.litagent import LitAgent
9
7
 
10
- class DARKAI(Provider):
8
+ class AskSteve(Provider):
11
9
  """
12
- A class to interact with the DarkAI API.
10
+ A class to interact with the AskSteve API.
13
11
  """
14
-
15
- AVAILABLE_MODELS = [
16
- "llama-3-70b", # Uncensored
17
- "llama-3-405b",
18
- "gpt-3.5-turbo",
19
- "gpt-4o"
20
- ]
21
-
12
+ AVAILABLE_MODELS = ["Gemini"]
22
13
  def __init__(
23
14
  self,
24
15
  is_conversation: bool = True,
@@ -30,51 +21,38 @@ class DARKAI(Provider):
30
21
  proxies: dict = {},
31
22
  history_offset: int = 10250,
32
23
  act: str = None,
33
- model: str = "gpt-4o",
34
24
  ) -> None:
35
- """
36
- Initializes the DARKAI API with given parameters.
25
+ """Instantiates AskSteve
37
26
 
38
27
  Args:
39
28
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
40
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion.
41
- Defaults to 600.
29
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
42
30
  timeout (int, optional): Http request timeout. Defaults to 30.
43
31
  intro (str, optional): Conversation introductory prompt. Defaults to None.
44
32
  filepath (str, optional): Path to file containing conversation history. Defaults to None.
45
33
  update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
46
34
  proxies (dict, optional): Http request proxies. Defaults to {}.
47
- history_offset (int, optional): Limit conversation history to this number of last texts.
48
- Defaults to 10250.
35
+ history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
49
36
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
50
- model (str, optional): AI model to use. Defaults to "gpt-4o".
51
- Options: "llama-3-70b" (uncensored), "llama-3-405b", "gpt-3.5-turbo", "gpt-4o"
37
+ system_prompt (str, optional): System prompt for AskSteve. Defaults to the provided string.
52
38
  """
53
- if model not in self.AVAILABLE_MODELS:
54
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
55
-
56
39
  self.session = requests.Session()
57
40
  self.is_conversation = is_conversation
58
41
  self.max_tokens_to_sample = max_tokens
59
- self.api_endpoint = "https://darkai.foundation/chat"
60
- self.stream_chunk_size = 64
42
+ self.api_endpoint = "https://quickstart.asksteve.to/quickStartRequest"
61
43
  self.timeout = timeout
62
44
  self.last_response = {}
63
- self.model = model
64
45
  self.headers = {
65
- "accept": "text/event-stream",
46
+ "accept": "*/*",
66
47
  "accept-encoding": "gzip, deflate, br, zstd",
67
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
68
- "content-type": "application/json",
69
- "dnt": "1",
70
- "origin": "https://www.aiuncensored.info",
71
- "referer": "https://www.aiuncensored.info/",
72
- "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
73
- "sec-ch-ua-mobile": "?0",
74
- "sec-ch-ua-platform": '"Windows"',
48
+ "accept-language": "en-US,en;q=0.9",
49
+ "content-type": "text/plain;charset=UTF-8",
50
+ "origin": "chrome-extension://gldebcpkoojijledacjeboaehblhfbjg",
51
+ "priority": "u=1, i",
75
52
  "sec-fetch-dest": "empty",
76
53
  "sec-fetch-mode": "cors",
77
- "sec-fetch-site": "cross-site",
54
+ "sec-fetch-site": "none",
55
+ "sec-fetch-storage-access": "active",
78
56
  "user-agent": LitAgent().random(),
79
57
  }
80
58
 
@@ -104,19 +82,22 @@ class DARKAI(Provider):
104
82
  raw: bool = False,
105
83
  optimizer: str = None,
106
84
  conversationally: bool = False,
107
- ) -> Dict[str, Any]:
108
- """
109
- Sends a prompt to the DarkAI API and returns the response.
85
+ ) -> dict:
86
+ """Chat with AI
110
87
 
111
88
  Args:
112
- prompt: The text prompt to generate text from.
113
- stream (bool, optional): Whether to stream the response. Defaults to False.
114
- raw (bool, optional): Whether to return the raw response. Defaults to False.
115
- optimizer (str, optional): The name of the optimizer to use. Defaults to None.
116
- conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
117
-
89
+ prompt (str): Prompt to be send.
90
+ stream (bool, optional): Flag for streaming response. Defaults to False.
91
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
92
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
93
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
118
94
  Returns:
119
- The response from the API.
95
+ dict : {}
96
+ ```json
97
+ {
98
+ "text" : "How may I assist you today?"
99
+ }
100
+ ```
120
101
  """
121
102
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
122
103
  if optimizer:
@@ -128,41 +109,37 @@ class DARKAI(Provider):
128
109
  raise Exception(
129
110
  f"Optimizer is not one of {self.__available_optimizers}"
130
111
  )
131
-
112
+
132
113
  payload = {
133
- "query": conversation_prompt,
134
- "model": self.model
114
+ "key": "asksteve",
115
+ "prompt": conversation_prompt
135
116
  }
136
117
 
137
118
  def for_stream():
138
119
  response = self.session.post(
139
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
120
+ self.api_endpoint,
121
+ headers=self.headers,
122
+ json=payload,
123
+ stream=True,
124
+ timeout=self.timeout,
140
125
  )
141
-
142
126
  if not response.ok:
143
- raise exceptions.FailedToGenerateResponseError(
144
- f"Failed to generate response - ({response.status_code}, {response.reason})"
127
+ raise Exception(
128
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
145
129
  )
146
130
 
147
- streaming_response = ""
148
- for line in response.iter_lines():
149
- if line:
150
- decoded_line = line.decode('utf-8')
151
- if decoded_line.startswith("data:"):
152
- data = decoded_line[len("data:"):].strip()
153
- if data:
154
- try:
155
- event = json.loads(data)
156
- if event.get("event") == "final-response":
157
- message = event['data'].get('message', '')
158
- streaming_response += message
159
- yield message if raw else dict(text=message)
160
- except json.decoder.JSONDecodeError:
161
- continue
162
- self.last_response.update(dict(text=streaming_response))
131
+ response_data = response.json()
132
+ if "candidates" in response_data and len(response_data["candidates"]) > 0:
133
+ text = response_data["candidates"][0]["content"]["parts"][0]["text"]
134
+ self.last_response.update(dict(text=text))
135
+ yield dict(text=text) if not raw else text
136
+ else:
137
+ raise Exception("No response generated")
138
+
163
139
  self.conversation.update_chat_history(
164
140
  prompt, self.get_message(self.last_response)
165
141
  )
142
+
166
143
  def for_non_stream():
167
144
  for _ in for_stream():
168
145
  pass
@@ -216,9 +193,11 @@ class DARKAI(Provider):
216
193
  """
217
194
  assert isinstance(response, dict), "Response should be of dict data-type only"
218
195
  return response["text"]
219
- if __name__ == '__main__':
196
+
197
+
198
+ if __name__ == "__main__":
220
199
  from rich import print
221
- ai = DARKAI()
222
- response = ai.chat("tell me about india")
200
+ ai = AskSteve()
201
+ response = ai.chat("hi", stream=True)
223
202
  for chunk in response:
224
- print(chunk, end="", flush=True)
203
+ print(chunk, end="", flush=True)