webscout 5.2__py3-none-any.whl → 5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (58) hide show
  1. webscout/AIauto.py +8 -12
  2. webscout/AIutel.py +10 -10
  3. webscout/Agents/Onlinesearcher.py +5 -5
  4. webscout/Agents/functioncall.py +123 -97
  5. webscout/DWEBS.py +99 -77
  6. webscout/Local/_version.py +2 -2
  7. webscout/Provider/Andi.py +1 -21
  8. webscout/Provider/BasedGPT.py +1 -21
  9. webscout/Provider/Blackboxai.py +1 -21
  10. webscout/Provider/Chatify.py +175 -0
  11. webscout/Provider/Cloudflare.py +1 -22
  12. webscout/Provider/Cohere.py +2 -23
  13. webscout/Provider/DARKAI.py +0 -1
  14. webscout/Provider/Deepinfra.py +2 -16
  15. webscout/Provider/EDITEE.py +3 -26
  16. webscout/Provider/Gemini.py +1 -24
  17. webscout/Provider/Groq.py +0 -2
  18. webscout/Provider/Koboldai.py +0 -21
  19. webscout/Provider/Llama.py +4 -21
  20. webscout/Provider/NetFly.py +21 -61
  21. webscout/Provider/OLLAMA.py +0 -17
  22. webscout/Provider/Openai.py +2 -22
  23. webscout/Provider/Perplexity.py +1 -2
  24. webscout/Provider/Phind.py +3 -508
  25. webscout/Provider/RUBIKSAI.py +11 -5
  26. webscout/Provider/Reka.py +4 -21
  27. webscout/Provider/TTS/streamElements.py +1 -22
  28. webscout/Provider/TTS/voicepod.py +11 -8
  29. webscout/Provider/ThinkAnyAI.py +17 -78
  30. webscout/Provider/Youchat.py +3 -20
  31. webscout/Provider/__init__.py +17 -8
  32. webscout/Provider/ai4chat.py +14 -8
  33. webscout/Provider/cerebras.py +199 -0
  34. webscout/Provider/{Berlin4h.py → cleeai.py} +68 -73
  35. webscout/Provider/{liaobots.py → elmo.py} +75 -106
  36. webscout/Provider/felo_search.py +29 -87
  37. webscout/Provider/geminiapi.py +198 -0
  38. webscout/Provider/genspark.py +222 -0
  39. webscout/Provider/julius.py +3 -20
  40. webscout/Provider/koala.py +1 -1
  41. webscout/Provider/lepton.py +194 -0
  42. webscout/Provider/turboseek.py +4 -21
  43. webscout/Provider/x0gpt.py +182 -0
  44. webscout/Provider/xdash.py +2 -22
  45. webscout/Provider/yep.py +391 -149
  46. webscout/YTdownloader.py +2 -3
  47. webscout/__init__.py +2 -2
  48. webscout/exceptions.py +2 -1
  49. webscout/transcriber.py +195 -140
  50. webscout/version.py +1 -1
  51. {webscout-5.2.dist-info → webscout-5.4.dist-info}/METADATA +47 -134
  52. webscout-5.4.dist-info/RECORD +98 -0
  53. webscout/voice.py +0 -34
  54. webscout-5.2.dist-info/RECORD +0 -93
  55. {webscout-5.2.dist-info → webscout-5.4.dist-info}/LICENSE.md +0 -0
  56. {webscout-5.2.dist-info → webscout-5.4.dist-info}/WHEEL +0 -0
  57. {webscout-5.2.dist-info → webscout-5.4.dist-info}/entry_points.txt +0 -0
  58. {webscout-5.2.dist-info → webscout-5.4.dist-info}/top_level.txt +0 -0
@@ -1,28 +1,21 @@
1
1
  import requests
2
2
  import json
3
- import uuid
4
- from typing import Any, Dict, Optional
3
+ from uuid import uuid4
4
+
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts, sanitize_stream
8
- from webscout.AIbase import Provider, AsyncProvider
9
- from webscout import exceptions
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider
10
9
 
11
- class Berlin4h(Provider):
10
+ class Cleeai(Provider):
12
11
  """
13
- A class to interact with the Berlin4h AI API.
12
+ A class to interact with the Cleeai.com API.
14
13
  """
15
14
 
16
15
  def __init__(
17
16
  self,
18
- api_token: str = "3bf369cd84339603f8a5361e964f9ebe",
19
- api_endpoint: str = "https://ai.berlin4h.top/api/chat/completions",
20
- model: str = "gpt-3.5-turbo",
21
- temperature: float = 0.9,
22
- presence_penalty: float = 0,
23
- frequency_penalty: float = 0,
24
- max_tokens: int = 4000,
25
17
  is_conversation: bool = True,
18
+ max_tokens: int = 600,
26
19
  timeout: int = 30,
27
20
  intro: str = None,
28
21
  filepath: str = None,
@@ -31,18 +24,11 @@ class Berlin4h(Provider):
31
24
  history_offset: int = 10250,
32
25
  act: str = None,
33
26
  ) -> None:
34
- """
35
- Initializes the Berlin4h API with given parameters.
27
+ """Instantiates Cleeai
36
28
 
37
29
  Args:
38
- api_token (str): The API token for authentication.
39
- api_endpoint (str): The API endpoint to use for requests.
40
- model (str): The AI model to use for text generation.
41
- temperature (float): The temperature parameter for the model.
42
- presence_penalty (float): The presence penalty parameter for the model.
43
- frequency_penalty (float): The frequency penalty parameter for the model.
44
- max_tokens (int): The maximum number of tokens to generate.
45
30
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
31
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
46
32
  timeout (int, optional): Http request timeout. Defaults to 30.
47
33
  intro (str, optional): Conversation introductory prompt. Defaults to None.
48
34
  filepath (str, optional): Path to file containing conversation history. Defaults to None.
@@ -51,21 +37,31 @@ class Berlin4h(Provider):
51
37
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
52
38
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
53
39
  """
54
- self.api_token = api_token
55
- self.api_endpoint = api_endpoint
56
- self.model = model
57
- self.temperature = temperature
58
- self.presence_penalty = presence_penalty
59
- self.frequency_penalty = frequency_penalty
60
- self.max_tokens = max_tokens
61
- self.parent_message_id: Optional[str] = None
62
40
  self.session = requests.Session()
63
41
  self.is_conversation = is_conversation
64
42
  self.max_tokens_to_sample = max_tokens
65
- self.stream_chunk_size = 1
43
+ self.api_endpoint = "https://qna-api.cleeai.com/open_research"
44
+ self.stream_chunk_size = 64
66
45
  self.timeout = timeout
67
46
  self.last_response = {}
68
- self.headers = {"Content-Type": "application/json", "Token": self.api_token}
47
+ self.headers = {
48
+ "accept": "*/*",
49
+ "accept-encoding": "gzip, deflate, br, zstd",
50
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
51
+ "content-type": "application/json",
52
+ "dnt": "1",
53
+ "origin": "https://www.cleeai.com",
54
+ "priority": "u=1, i",
55
+ "referer": "https://www.cleeai.com/",
56
+ "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
57
+ "sec-ch-ua-mobile": "?0",
58
+ "sec-ch-ua-platform": '"Windows"',
59
+ "sec-fetch-dest": "empty",
60
+ "sec-fetch-mode": "cors",
61
+ "sec-fetch-site": "same-site",
62
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
63
+ }
64
+
69
65
  self.__available_optimizers = (
70
66
  method
71
67
  for method in dir(Optimizers)
@@ -92,19 +88,22 @@ class Berlin4h(Provider):
92
88
  raw: bool = False,
93
89
  optimizer: str = None,
94
90
  conversationally: bool = False,
95
- ) -> Dict[str, Any]:
96
- """
97
- Sends a prompt to the Berlin4h AI API and returns the response.
91
+ ) -> dict:
92
+ """Chat with AI
98
93
 
99
94
  Args:
100
- prompt: The text prompt to generate text from.
101
- stream (bool, optional): Whether to stream the response. Defaults to False.
102
- raw (bool, optional): Whether to return the raw response. Defaults to False.
103
- optimizer (str, optional): The name of the optimizer to use. Defaults to None.
104
- conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
105
-
95
+ prompt (str): Prompt to be send.
96
+ stream (bool, optional): Flag for streaming response. Defaults to False.
97
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
98
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
99
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
106
100
  Returns:
107
- The response from the API.
101
+ dict : {}
102
+ ```json
103
+ {
104
+ "text" : "How may I assist you today?"
105
+ }
106
+ ```
108
107
  """
109
108
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
110
109
  if optimizer:
@@ -117,41 +116,35 @@ class Berlin4h(Provider):
117
116
  f"Optimizer is not one of {self.__available_optimizers}"
118
117
  )
119
118
 
120
- payload: Dict[str, any] = {
121
- "prompt": conversation_prompt,
122
- "parentMessageId": self.parent_message_id or str(uuid.uuid4()),
123
- "options": {
124
- "model": self.model,
125
- "temperature": self.temperature,
126
- "presence_penalty": self.presence_penalty,
127
- "frequency_penalty": self.frequency_penalty,
128
- "max_tokens": self.max_tokens,
129
- },
119
+ payload = {
120
+ "data": {
121
+ "question": conversation_prompt,
122
+ "question_id": 69237,
123
+ "query_id": uuid4().hex,
124
+ "source_list": [],
125
+ "followup_qas": [],
126
+ "with_upload": True,
127
+ }
130
128
  }
131
129
 
132
130
  def for_stream():
133
131
  response = self.session.post(
134
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
132
+ self.api_endpoint,
133
+ headers=self.headers,
134
+ json=payload,
135
+ stream=True,
136
+ timeout=self.timeout,
135
137
  )
136
-
137
138
  if not response.ok:
138
- raise exceptions.FailedToGenerateResponseError(
139
- f"Failed to generate response - ({response.status_code}, {response.reason})"
139
+ raise Exception(
140
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
140
141
  )
142
+ full_response = ''
143
+ for chunk in response.iter_content(chunk_size=self.stream_chunk_size):
144
+ full_response += chunk.decode('utf-8')
145
+ yield chunk.decode('utf-8') if raw else dict(text=full_response)
141
146
 
142
- streaming_response = ""
143
- # Collect the entire line before processing
144
- for line in response.iter_lines(decode_unicode=True):
145
- if line:
146
- try:
147
- json_data = json.loads(line)
148
- content = json_data['content']
149
- if ">" in content: break
150
- streaming_response += content
151
- yield content if raw else dict(text=streaming_response) # Yield accumulated response
152
- except:
153
- continue
154
- self.last_response.update(dict(text=streaming_response))
147
+ self.last_response.update(dict(text=full_response))
155
148
  self.conversation.update_chat_history(
156
149
  prompt, self.get_message(self.last_response)
157
150
  )
@@ -209,9 +202,11 @@ class Berlin4h(Provider):
209
202
  """
210
203
  assert isinstance(response, dict), "Response should be of dict data-type only"
211
204
  return response["text"]
212
- if __name__ == '__main__':
205
+
206
+
207
+ if __name__ == "__main__":
213
208
  from rich import print
214
- ai = Berlin4h()
215
- response = ai.chat("tell me about india")
209
+ ai = Cleeai()
210
+ response = ai.chat(input(">>> "))
216
211
  for chunk in response:
217
212
  print(chunk, end="", flush=True)
@@ -1,40 +1,20 @@
1
- import json
2
- import re
3
- import uuid
4
- import gzip
5
- import zlib
6
- from typing import Any, Dict, Generator, Union
7
-
8
1
  import requests
2
+ import json
3
+ import textwrap
9
4
 
10
5
  from webscout.AIutel import Optimizers
11
6
  from webscout.AIutel import Conversation
12
7
  from webscout.AIutel import AwesomePrompts
13
8
  from webscout.AIbase import Provider
14
- from webscout import exceptions
15
9
 
16
- class LiaoBots(Provider):
10
+
11
+ class Elmo(Provider):
17
12
  """
18
- A class to interact with the LiaoBots API.
13
+ A class to interact with the Elmo.chat API.
19
14
  """
20
15
 
21
- # List of available models
22
- AVAILABLE_MODELS = [
23
- "gpt-4o-mini",
24
- "gpt-4o-free",
25
- "gpt-4o-mini-free",
26
- "gpt-4-turbo-2024-04-09",
27
- "gpt-4o",
28
- "gpt-4-0613",
29
- "claude-3-5-sonnet-20240620",
30
- "gemini-1.5-pro-latest",
31
- "gemini-1.5-flash-latest"
32
- ]
33
-
34
16
  def __init__(
35
17
  self,
36
- auth_code: str = "G3USRn7M5zsXn",
37
- cookie: str = "gkp2=pevIjZCYj8wMcrWPEAq6",
38
18
  is_conversation: bool = True,
39
19
  max_tokens: int = 600,
40
20
  timeout: int = 30,
@@ -44,15 +24,12 @@ class LiaoBots(Provider):
44
24
  proxies: dict = {},
45
25
  history_offset: int = 10250,
46
26
  act: str = None,
47
- model: str = "claude-3-5-sonnet-20240620",
48
- system_prompt: str = "You are a helpful assistant."
27
+ system_prompt: str = "You are a helpful AI assistant. Provide clear, concise, and well-structured information. Organize your responses into paragraphs for better readability.",
28
+
49
29
  ) -> None:
50
- """
51
- Initializes the LiaoBots API with given parameters.
30
+ """Instantiates Elmo
52
31
 
53
32
  Args:
54
- auth_code (str): The auth code for authentication.
55
- cookie (str): The cookie for authentication.
56
33
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
57
34
  max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
58
35
  timeout (int, optional): Http request timeout. Defaults to 30.
@@ -62,44 +39,35 @@ class LiaoBots(Provider):
62
39
  proxies (dict, optional): Http request proxies. Defaults to {}.
63
40
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
64
41
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
65
- model (str, optional): AI model to use for text generation. Defaults to "claude-3-5-sonnet-20240620".
66
- system_prompt (str, optional): System prompt for LiaoBots. Defaults to "You are a helpful assistant.".
42
+ system_prompt (str, optional): System prompt for Elmo. Defaults to the provided string.
43
+ web_search (bool, optional): Enables web search mode when True. Defaults to False.
67
44
  """
68
-
69
- # Check if the chosen model is available
70
- if model not in self.AVAILABLE_MODELS:
71
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
72
-
73
- self.auth_code = auth_code
74
- self.cookie = cookie
75
- self.api_endpoint = "https://liaobots.work/api/chat"
76
- self.model = model
77
- self.system_prompt = system_prompt
78
45
  self.session = requests.Session()
79
46
  self.is_conversation = is_conversation
80
47
  self.max_tokens_to_sample = max_tokens
48
+ self.api_endpoint = "https://www.elmo.chat/api/v1/prompt"
81
49
  self.stream_chunk_size = 64
82
50
  self.timeout = timeout
83
51
  self.last_response = {}
52
+ self.system_prompt = system_prompt
84
53
  self.headers = {
85
54
  "accept": "*/*",
86
55
  "accept-encoding": "gzip, deflate, br, zstd",
87
56
  "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
88
- "content-type": "application/json",
89
- "cookie": self.cookie,
57
+ "content-length": "763",
58
+ "content-type": "text/plain;charset=UTF-8",
90
59
  "dnt": "1",
91
- "origin": "https://liaobots.work",
60
+ "origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
92
61
  "priority": "u=1, i",
93
- "referer": "https://liaobots.work/en",
94
- "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
62
+ "sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
95
63
  "sec-ch-ua-mobile": "?0",
96
64
  "sec-ch-ua-platform": '"Windows"',
97
65
  "sec-fetch-dest": "empty",
98
66
  "sec-fetch-mode": "cors",
99
- "sec-fetch-site": "same-origin",
100
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
101
- "x-Auth-Code": self.auth_code,
67
+ "sec-fetch-site": "cross-site",
68
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
102
69
  }
70
+
103
71
  self.__available_optimizers = (
104
72
  method
105
73
  for method in dir(Optimizers)
@@ -126,19 +94,22 @@ class LiaoBots(Provider):
126
94
  raw: bool = False,
127
95
  optimizer: str = None,
128
96
  conversationally: bool = False,
129
- ) -> Dict[str, Any]:
130
- """
131
- Sends a prompt to the LiaoBots API and returns the response.
97
+ ) -> dict:
98
+ """Chat with AI
132
99
 
133
100
  Args:
134
- prompt: The text prompt to generate text from.
135
- stream (bool, optional): Whether to stream the response. Defaults to False.
136
- raw (bool, optional): Whether to return the raw response. Defaults to False.
137
- optimizer (str, optional): The name of the optimizer to use. Defaults to None.
138
- conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
139
-
101
+ prompt (str): Prompt to be send.
102
+ stream (bool, optional): Flag for streaming response. Defaults to False.
103
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
104
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
105
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
140
106
  Returns:
141
- The response from the API.
107
+ dict : {}
108
+ ```json
109
+ {
110
+ "text" : "How may I assist you today?"
111
+ }
112
+ ```
142
113
  """
143
114
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
144
115
  if optimizer:
@@ -151,61 +122,58 @@ class LiaoBots(Provider):
151
122
  f"Optimizer is not one of {self.__available_optimizers}"
152
123
  )
153
124
 
154
- payload: Dict[str, any] = {
155
- "conversationId": str(uuid.uuid4()),
156
- "model": {
157
- "id": self.model
125
+
126
+ payload = {
127
+ "metadata": {
128
+ "system": {"language": "en-US"},
129
+ "website": {
130
+ "url": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm/options.html",
131
+ "origin": "chrome-extension://ipnlcfhfdicbfbchfoihipknbaeenenm",
132
+ "title": "Elmo Chat - Your AI Web Copilot",
133
+ "xpathIndexLength": 0,
134
+ "favicons": [],
135
+ "language": "en",
136
+ "content": "",
137
+ "type": "html",
138
+ "selection": "",
139
+ "hash": "d41d8cd98f00b204e9800998ecf8427e",
140
+ },
158
141
  },
159
- "messages": [
160
- {
161
- "role": "user",
162
- "content": conversation_prompt
163
- }
142
+ "regenerate": True,
143
+ "conversation": [
144
+ {"role": "system", "content": self.system_prompt},
145
+ {"role": "user", "content": conversation_prompt},
164
146
  ],
165
- "key": "",
166
- "prompt": self.system_prompt
147
+ "enableCache": False,
167
148
  }
168
149
 
169
150
  def for_stream():
170
151
  response = self.session.post(
171
- self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
152
+ self.api_endpoint,
153
+ headers=self.headers,
154
+ json=payload,
155
+ stream=True,
156
+ timeout=self.timeout,
172
157
  )
173
-
174
158
  if not response.ok:
175
- raise exceptions.FailedToGenerateResponseError(
176
- f"Failed to generate response - ({response.status_code}, {response.reason})"
159
+ raise Exception(
160
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
177
161
  )
178
-
179
- streaming_response = ""
180
- content_encoding = response.headers.get('Content-Encoding')
181
- # Stream the response
182
- for chunk in response.iter_content():
183
- if chunk:
184
- try:
185
- # Decompress the chunk if necessary
186
- if content_encoding == 'gzip':
187
- chunk = gzip.decompress(chunk)
188
- elif content_encoding == 'deflate':
189
- chunk = zlib.decompress(chunk)
190
-
191
- # Decode the chunk
192
- decoded_chunk = chunk.decode('utf-8')
193
- streaming_response += decoded_chunk
194
- except UnicodeDecodeError:
195
- # Handle non-textual data
196
- pass
197
- else:
198
- pass
199
- self.last_response.update(dict(text=streaming_response))
162
+ full_response = ""
163
+ for line in response.iter_lines(decode_unicode=True):
164
+ if line:
165
+ if line.startswith('0:'):
166
+ chunk = line.split(':"')[1].strip('"')
167
+ formatted_output = (
168
+ chunk.replace("\\n", "\n").replace("\\n\\n", "\n\n")
169
+ )
170
+ full_response += formatted_output
171
+ self.last_response.update(dict(text=full_response))
172
+ yield formatted_output if raw else dict(text=full_response)
200
173
  self.conversation.update_chat_history(
201
174
  prompt, self.get_message(self.last_response)
202
175
  )
203
176
 
204
- if stream:
205
- yield from [] # Yield nothing when streaming, focus on side effects
206
- else:
207
- return [] # Return empty list for non-streaming case
208
-
209
177
  def for_non_stream():
210
178
  for _ in for_stream():
211
179
  pass
@@ -260,9 +228,10 @@ class LiaoBots(Provider):
260
228
  assert isinstance(response, dict), "Response should be of dict data-type only"
261
229
  return response["text"]
262
230
 
263
- if __name__ == '__main__':
231
+
232
+ if __name__ == "__main__":
264
233
  from rich import print
265
- liaobots = LiaoBots()
266
- response = liaobots.chat("tell me about india")
234
+ ai = Elmo()
235
+ response = ai.chat(input(">>> "))
267
236
  for chunk in response:
268
237
  print(chunk, end="", flush=True)
@@ -1,32 +1,14 @@
1
- import time
2
- import uuid
3
- from selenium import webdriver
4
- from selenium.webdriver.chrome.options import Options
5
- from selenium.webdriver.common.by import By
6
- from selenium.webdriver.support import expected_conditions as EC
7
- from selenium.webdriver.support.ui import WebDriverWait
8
- import click
1
+ import re
9
2
  import requests
10
- from requests import get
11
3
  from uuid import uuid4
12
- from re import findall
13
- from requests.exceptions import RequestException
14
- from curl_cffi.requests import get, RequestsError
15
- import g4f
16
- from random import randint
17
- from PIL import Image
18
- import io
19
- import re
20
4
  import json
21
- import yaml
22
5
  from webscout.AIutel import Optimizers
23
6
  from webscout.AIutel import Conversation
24
7
  from webscout.AIutel import AwesomePrompts, sanitize_stream
25
8
  from webscout.AIbase import Provider, AsyncProvider
26
9
  from webscout import exceptions
27
10
  from typing import Any, AsyncGenerator, Dict
28
- import logging
29
- import httpx
11
+
30
12
 
31
13
  class Felo(Provider):
32
14
  def __init__(
@@ -41,19 +23,6 @@ class Felo(Provider):
41
23
  history_offset: int = 10250,
42
24
  act: str = None,
43
25
  ):
44
- """Instantiates Felo
45
-
46
- Args:
47
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
48
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
49
- timeout (int, optional): Http request timeout. Defaults to 30.
50
- intro (str, optional): Conversation introductory prompt. Defaults to None.
51
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
52
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
53
- proxies (dict, optional): Http request proxies. Defaults to {}.
54
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
55
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
56
- """
57
26
  self.session = requests.Session()
58
27
  self.is_conversation = is_conversation
59
28
  self.max_tokens_to_sample = max_tokens
@@ -106,22 +75,6 @@ class Felo(Provider):
106
75
  optimizer: str = None,
107
76
  conversationally: bool = False,
108
77
  ) -> dict:
109
- """Chat with AI
110
-
111
- Args:
112
- prompt (str): Prompt to be send.
113
- stream (bool, optional): Flag for streaming response. Defaults to False.
114
- raw (bool, optional): Stream back raw response as received. Defaults to False.
115
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
116
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
117
- Returns:
118
- dict : {}
119
- ```json
120
- {
121
- "text" : "How may I assist you today?"
122
- }
123
- ```
124
- """
125
78
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
126
79
  if optimizer:
127
80
  if optimizer in self.__available_optimizers:
@@ -156,28 +109,31 @@ class Felo(Provider):
156
109
  )
157
110
 
158
111
  streaming_text = ""
159
- for value in response.iter_lines(
160
- decode_unicode=True,
161
- chunk_size=self.stream_chunk_size,
162
- delimiter="\n",
163
- ):
164
- try:
165
- if bool(value) and value.startswith('data:'):
166
- data = json.loads(value[len('data:'):].strip())
167
- if data['type'] == 'a':
168
- streaming_text += data['data']['k']
169
- resp = dict(text=streaming_text)
170
- self.last_response.update(resp)
171
- yield value if raw else resp
172
- except json.decoder.JSONDecodeError:
173
- pass
112
+ for line in response.iter_lines(decode_unicode=True):
113
+ if line.startswith('data:'):
114
+ try:
115
+ data = json.loads(line[5:].strip())
116
+ if data['type'] == 'answer' and 'text' in data['data']:
117
+ new_text = data['data']['text']
118
+ if len(new_text) > len(streaming_text):
119
+ delta = new_text[len(streaming_text):]
120
+ streaming_text = new_text
121
+ resp = dict(text=delta)
122
+ self.last_response.update(dict(text=streaming_text))
123
+ yield line if raw else resp
124
+ except json.JSONDecodeError:
125
+ pass
126
+
174
127
  self.conversation.update_chat_history(
175
128
  prompt, self.get_message(self.last_response)
176
129
  )
177
130
 
178
131
  def for_non_stream():
179
- for _ in for_stream():
180
- pass
132
+ full_response = ""
133
+ for chunk in for_stream():
134
+ if not raw:
135
+ full_response += chunk['text']
136
+ self.last_response = dict(text=full_response)
181
137
  return self.last_response
182
138
 
183
139
  return for_stream() if stream else for_non_stream()
@@ -189,16 +145,6 @@ class Felo(Provider):
189
145
  optimizer: str = None,
190
146
  conversationally: bool = False,
191
147
  ) -> str:
192
- """Generate response `str`
193
- Args:
194
- prompt (str): Prompt to be send.
195
- stream (bool, optional): Flag for streaming response. Defaults to False.
196
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
197
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
198
- Returns:
199
- str: Response generated
200
- """
201
-
202
148
  def for_stream():
203
149
  for response in self.ask(
204
150
  prompt, True, optimizer=optimizer, conversationally=conversationally
@@ -218,21 +164,17 @@ class Felo(Provider):
218
164
  return for_stream() if stream else for_non_stream()
219
165
 
220
166
  def get_message(self, response: dict) -> str:
221
- """Retrieves message only from response
222
-
223
- Args:
224
- response (dict): Response generated by `self.ask`
225
-
226
- Returns:
227
- str: Message extracted
228
- """
229
167
  assert isinstance(response, dict), "Response should be of dict data-type only"
230
168
 
231
- text = re.sub(r'\[\[\d+\]\]', '', response["text"])
232
- return text
169
+ if "text" in response:
170
+ text = re.sub(r'\[\[\d+\]\]', '', response["text"])
171
+ return text
172
+ else:
173
+ return ""
174
+
233
175
  if __name__ == '__main__':
234
176
  from rich import print
235
177
  ai = Felo()
236
178
  response = ai.chat(input(">>> "))
237
179
  for chunk in response:
238
- print(chunk, end="", flush=True)
180
+ print(chunk, end="", flush=True)