webscout 4.7__py3-none-any.whl → 4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (43) hide show
  1. webscout/Agents/functioncall.py +97 -37
  2. webscout/Bard.py +365 -0
  3. webscout/Local/_version.py +1 -1
  4. webscout/Provider/Andi.py +7 -1
  5. webscout/Provider/BasedGPT.py +11 -5
  6. webscout/Provider/Berlin4h.py +11 -5
  7. webscout/Provider/Blackboxai.py +10 -4
  8. webscout/Provider/Cohere.py +11 -5
  9. webscout/Provider/DARKAI.py +25 -7
  10. webscout/Provider/Deepinfra.py +2 -1
  11. webscout/Provider/Deepseek.py +25 -9
  12. webscout/Provider/DiscordRocks.py +389 -0
  13. webscout/Provider/{ChatGPTUK.py → Farfalle.py} +80 -67
  14. webscout/Provider/Gemini.py +1 -1
  15. webscout/Provider/Groq.py +244 -110
  16. webscout/Provider/Llama.py +13 -5
  17. webscout/Provider/Llama3.py +15 -2
  18. webscout/Provider/OLLAMA.py +8 -7
  19. webscout/Provider/Perplexity.py +422 -52
  20. webscout/Provider/Phind.py +6 -5
  21. webscout/Provider/PizzaGPT.py +7 -1
  22. webscout/Provider/__init__.py +12 -31
  23. webscout/Provider/ai4chat.py +193 -0
  24. webscout/Provider/koala.py +11 -5
  25. webscout/Provider/{VTLchat.py → liaobots.py} +120 -104
  26. webscout/Provider/meta.py +2 -1
  27. webscout/version.py +1 -1
  28. webscout/webai.py +2 -64
  29. webscout/webscout_search.py +1 -1
  30. {webscout-4.7.dist-info → webscout-4.8.dist-info}/METADATA +227 -252
  31. {webscout-4.7.dist-info → webscout-4.8.dist-info}/RECORD +35 -40
  32. webscout/Provider/FreeGemini.py +0 -169
  33. webscout/Provider/Geminiflash.py +0 -152
  34. webscout/Provider/Geminipro.py +0 -152
  35. webscout/Provider/Leo.py +0 -469
  36. webscout/Provider/OpenGPT.py +0 -867
  37. webscout/Provider/Xjai.py +0 -230
  38. webscout/Provider/Yepchat.py +0 -478
  39. webscout/Provider/Youchat.py +0 -225
  40. {webscout-4.7.dist-info → webscout-4.8.dist-info}/LICENSE.md +0 -0
  41. {webscout-4.7.dist-info → webscout-4.8.dist-info}/WHEEL +0 -0
  42. {webscout-4.7.dist-info → webscout-4.8.dist-info}/entry_points.txt +0 -0
  43. {webscout-4.7.dist-info → webscout-4.8.dist-info}/top_level.txt +0 -0
@@ -1,29 +1,28 @@
1
1
  import requests
2
- from typing import Any, AsyncGenerator, Dict, Optional
3
2
  import json
4
- import re
3
+ from typing import Any, Dict, Optional
5
4
 
6
- from ..AIutel import Optimizers
7
- from ..AIutel import Conversation
8
- from ..AIutel import AwesomePrompts, sanitize_stream
9
- from ..AIbase import Provider, AsyncProvider
5
+ from webscout.AIutel import Optimizers
6
+ from webscout.AIutel import Conversation
7
+ from webscout.AIutel import AwesomePrompts
8
+ from webscout.AIbase import Provider, AsyncProvider
10
9
  from webscout import exceptions
11
10
 
12
11
 
13
- class ChatGPTUK(Provider):
12
+ class Farfalle(Provider):
14
13
  """
15
- A class to interact with the ChatGPT UK API.
14
+ A class to interact with the Farfalle.dev API.
16
15
  """
17
16
 
17
+ available_models = [
18
+ "gpt-3.5-turbo",
19
+
20
+ ]
21
+
18
22
  def __init__(
19
23
  self,
20
24
  is_conversation: bool = True,
21
25
  max_tokens: int = 600,
22
- temperature: float = 0.9,
23
- presence_penalty: float = 0,
24
- frequency_penalty: float = 0,
25
- top_p: float = 1,
26
- model: str = "google-gemini-pro",
27
26
  timeout: int = 30,
28
27
  intro: str = None,
29
28
  filepath: str = None,
@@ -31,39 +30,54 @@ class ChatGPTUK(Provider):
31
30
  proxies: dict = {},
32
31
  history_offset: int = 10250,
33
32
  act: str = None,
33
+ model: str = "gpt-3.5-turbo",
34
34
  ) -> None:
35
35
  """
36
- Initializes the ChatGPTUK API with given parameters.
36
+ Initializes the Farfalle.dev API with given parameters.
37
37
 
38
38
  Args:
39
39
  is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
40
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
41
- temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.9.
42
- presence_penalty (float, optional): Chances of topic being repeated. Defaults to 0.
43
- frequency_penalty (float, optional): Chances of word being repeated. Defaults to 0.
44
- top_p (float, optional): Sampling threshold during inference time. Defaults to 1.
45
- model (str, optional): LLM model name. Defaults to "google-gemini-pro".
40
+ max_tokens (int, optional): Maximum number of tokens to be generated upon completion.
41
+ Defaults to 600.
46
42
  timeout (int, optional): Http request timeout. Defaults to 30.
47
43
  intro (str, optional): Conversation introductory prompt. Defaults to None.
48
44
  filepath (str, optional): Path to file containing conversation history. Defaults to None.
49
45
  update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
50
46
  proxies (dict, optional): Http request proxies. Defaults to {}.
51
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
47
+ history_offset (int, optional): Limit conversation history to this number of last texts.
48
+ Defaults to 10250.
52
49
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
50
+ model (str, optional): AI model to use. Defaults to "gpt-3.5-turbo".
51
+ Options: "gpt-3.5-turbo", "gpt-4"
53
52
  """
53
+ if model not in self.available_models:
54
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.available_models}")
55
+
54
56
  self.session = requests.Session()
55
57
  self.is_conversation = is_conversation
56
58
  self.max_tokens_to_sample = max_tokens
57
- self.api_endpoint = "https://free.chatgpt.org.uk/api/openai/v1/chat/completions"
59
+ self.api_endpoint = "https://api.farfalle.dev/chat"
58
60
  self.stream_chunk_size = 64
59
61
  self.timeout = timeout
60
62
  self.last_response = {}
61
63
  self.model = model
62
- self.temperature = temperature
63
- self.presence_penalty = presence_penalty
64
- self.frequency_penalty = frequency_penalty
65
- self.top_p = top_p
66
- self.headers = {"Content-Type": "application/json"}
64
+ self.headers = {
65
+ "accept": "text/event-stream",
66
+ "accept-encoding": "gzip, deflate, br, zstd",
67
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
68
+ "content-type": "application/json",
69
+ "dnt": "1",
70
+ "origin": "https://www.farfalle.dev",
71
+ "priority": "u=1, i",
72
+ "referer": "https://www.farfalle.dev/",
73
+ "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
74
+ "sec-ch-ua-mobile": "?0",
75
+ "sec-ch-ua-platform": '"Windows"',
76
+ "sec-fetch-dest": "empty",
77
+ "sec-fetch-mode": "cors",
78
+ "sec-fetch-site": "same-site",
79
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0"
80
+ }
67
81
 
68
82
  self.__available_optimizers = (
69
83
  method
@@ -91,22 +105,19 @@ class ChatGPTUK(Provider):
91
105
  raw: bool = False,
92
106
  optimizer: str = None,
93
107
  conversationally: bool = False,
94
- ) -> dict:
95
- """Chat with AI
108
+ ) -> Dict[str, Any]:
109
+ """
110
+ Sends a prompt to the Farfalle.dev API and returns the response.
96
111
 
97
112
  Args:
98
- prompt (str): Prompt to be send.
99
- stream (bool, optional): Flag for streaming response. Defaults to False.
100
- raw (bool, optional): Stream back raw response as received. Defaults to False.
101
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
102
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
113
+ prompt: The text prompt to generate text from.
114
+ stream (bool, optional): Whether to stream the response. Defaults to False.
115
+ raw (bool, optional): Whether to return the raw response. Defaults to False.
116
+ optimizer (str, optional): The name of the optimizer to use. Defaults to None.
117
+ conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
118
+
103
119
  Returns:
104
- dict : {}
105
- ```json
106
- {
107
- "text" : "How may I assist you today?"
108
- }
109
- ```
120
+ The response from the API.
110
121
  """
111
122
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
112
123
  if optimizer:
@@ -118,47 +129,41 @@ class ChatGPTUK(Provider):
118
129
  raise Exception(
119
130
  f"Optimizer is not one of {self.__available_optimizers}"
120
131
  )
121
-
122
- self.session.headers.update(self.headers)
132
+
123
133
  payload = {
124
- "messages": [
125
- {"role": "system", "content": "Keep your responses long and detailed"},
126
- {"role": "user", "content": conversation_prompt}
127
- ],
128
- "stream": True,
129
- "model": self.model,
130
- "temperature": self.temperature,
131
- "presence_penalty": self.presence_penalty,
132
- "frequency_penalty": self.frequency_penalty,
133
- "top_p": self.top_p,
134
- "max_tokens": self.max_tokens_to_sample
134
+ "query": conversation_prompt,
135
+ "model": self.model
135
136
  }
136
137
 
137
138
  def for_stream():
138
139
  response = self.session.post(
139
- self.api_endpoint, json=payload, stream=True, timeout=self.timeout
140
+ self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
140
141
  )
142
+
141
143
  if not response.ok:
142
144
  raise exceptions.FailedToGenerateResponseError(
143
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
145
+ f"Failed to generate response - ({response.status_code}, {response.reason})"
144
146
  )
145
-
147
+
146
148
  streaming_response = ""
147
- for line in response.iter_lines(decode_unicode=True, chunk_size=1):
149
+ for line in response.iter_lines():
148
150
  if line:
149
- modified_line = re.sub("data:", "", line)
150
- try:
151
- json_data = json.loads(modified_line)
152
- content = json_data['choices'][0]['delta']['content']
153
- streaming_response += content
154
- yield content if raw else dict(text=streaming_response)
155
- except:
156
- continue
151
+ decoded_line = line.decode('utf-8')
152
+ if decoded_line.startswith("data:"):
153
+ data = decoded_line[len("data:"):].strip()
154
+ if data:
155
+ try:
156
+ event = json.loads(data)
157
+ if event.get("event") == "final-response":
158
+ message = event['data'].get('message', '')
159
+ streaming_response += message
160
+ yield message if raw else dict(text=streaming_response)
161
+ except json.decoder.JSONDecodeError:
162
+ continue
157
163
  self.last_response.update(dict(text=streaming_response))
158
164
  self.conversation.update_chat_history(
159
165
  prompt, self.get_message(self.last_response)
160
166
  )
161
-
162
167
  def for_non_stream():
163
168
  for _ in for_stream():
164
169
  pass
@@ -211,4 +216,12 @@ class ChatGPTUK(Provider):
211
216
  str: Message extracted
212
217
  """
213
218
  assert isinstance(response, dict), "Response should be of dict data-type only"
214
- return response["text"]
219
+ return response["text"]
220
+ if __name__ == "__main__":
221
+ from rich import print
222
+
223
+ ai = Farfalle()
224
+ # Stream the response
225
+ response = ai.chat(input(">>> "))
226
+ for chunk in response:
227
+ print(chunk, end="", flush=True)
@@ -28,7 +28,7 @@ from webscout import exceptions
28
28
  from typing import Any, AsyncGenerator, Dict
29
29
  import logging
30
30
  import httpx
31
- from Bard import Chatbot
31
+ from ..Bard import Chatbot
32
32
  import logging
33
33
  from os import path
34
34
  from json import load