webscout 7.3__py3-none-any.whl → 7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (62) hide show
  1. webscout/Provider/AISEARCH/__init__.py +4 -3
  2. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  3. webscout/Provider/AllenAI.py +282 -0
  4. webscout/Provider/C4ai.py +414 -0
  5. webscout/Provider/Cloudflare.py +18 -21
  6. webscout/Provider/DeepSeek.py +3 -32
  7. webscout/Provider/Deepinfra.py +52 -44
  8. webscout/Provider/ElectronHub.py +634 -0
  9. webscout/Provider/GithubChat.py +362 -0
  10. webscout/Provider/Glider.py +7 -41
  11. webscout/Provider/HeckAI.py +217 -0
  12. webscout/Provider/HuggingFaceChat.py +462 -0
  13. webscout/Provider/Jadve.py +49 -63
  14. webscout/Provider/Marcus.py +7 -50
  15. webscout/Provider/Netwrck.py +6 -53
  16. webscout/Provider/PI.py +106 -93
  17. webscout/Provider/Perplexitylabs.py +395 -0
  18. webscout/Provider/Phind.py +29 -3
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/aiarta/__init__.py +2 -0
  22. webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
  23. webscout/Provider/TTI/aiarta/sync_aiarta.py +409 -0
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +3 -2
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/Youchat.py +1 -1
  32. webscout/Provider/__init__.py +25 -2
  33. webscout/Provider/akashgpt.py +315 -0
  34. webscout/Provider/chatglm.py +5 -5
  35. webscout/Provider/copilot.py +416 -0
  36. webscout/Provider/flowith.py +181 -0
  37. webscout/Provider/freeaichat.py +251 -221
  38. webscout/Provider/granite.py +17 -53
  39. webscout/Provider/koala.py +9 -1
  40. webscout/Provider/llamatutor.py +6 -46
  41. webscout/Provider/llmchat.py +7 -46
  42. webscout/Provider/multichat.py +29 -91
  43. webscout/Provider/yep.py +4 -24
  44. webscout/exceptions.py +19 -9
  45. webscout/update_checker.py +55 -93
  46. webscout/version.py +1 -1
  47. webscout-7.5.dist-info/LICENSE.md +146 -0
  48. {webscout-7.3.dist-info → webscout-7.5.dist-info}/METADATA +46 -172
  49. {webscout-7.3.dist-info → webscout-7.5.dist-info}/RECORD +52 -42
  50. webscout/Local/__init__.py +0 -10
  51. webscout/Local/_version.py +0 -3
  52. webscout/Local/formats.py +0 -747
  53. webscout/Local/model.py +0 -1368
  54. webscout/Local/samplers.py +0 -125
  55. webscout/Local/thread.py +0 -539
  56. webscout/Local/ui.py +0 -401
  57. webscout/Local/utils.py +0 -388
  58. webscout/Provider/dgaf.py +0 -214
  59. webscout-7.3.dist-info/LICENSE.md +0 -211
  60. {webscout-7.3.dist-info → webscout-7.5.dist-info}/WHEEL +0 -0
  61. {webscout-7.3.dist-info → webscout-7.5.dist-info}/entry_points.txt +0 -0
  62. {webscout-7.3.dist-info → webscout-7.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,200 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from webscout import LitAgent
12
+
13
+ class TwoAI(Provider):
14
+ """
15
+ A class to interact with the Two AI API with LitAgent user-agent.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ "sutra-light",
20
+ ]
21
+
22
+ def __init__(
23
+ self,
24
+ api_key: str = None,
25
+ is_conversation: bool = True,
26
+ max_tokens: int = 1024,
27
+ timeout: int = 30,
28
+ intro: str = None,
29
+ filepath: str = None,
30
+ update_file: bool = True,
31
+ proxies: dict = {},
32
+ history_offset: int = 10250,
33
+ act: str = None,
34
+ model: str = "sutra-light",
35
+ temperature: float = 0.6,
36
+ system_message: str = "You are a helpful assistant."
37
+ ):
38
+ """Initializes the TwoAI API client."""
39
+ if model not in self.AVAILABLE_MODELS:
40
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
41
+ self.url = "https://api.two.app/v1/sutra-light/completion"
42
+ self.headers = {
43
+ 'User-Agent': LitAgent().random(),
44
+ 'Accept': 'application/json',
45
+ 'Content-Type': 'application/json',
46
+ 'X-Session-Token': api_key,
47
+ 'Origin': 'https://chat.two.ai',
48
+ 'Referer': 'https://api.two.app/'
49
+ }
50
+
51
+ self.session = requests.Session()
52
+ self.session.headers.update(self.headers)
53
+ self.session.proxies.update(proxies)
54
+
55
+ self.is_conversation = is_conversation
56
+ self.max_tokens_to_sample = max_tokens
57
+ self.timeout = timeout
58
+ self.last_response = {}
59
+ self.model = model
60
+ self.temperature = temperature
61
+ self.system_message = system_message
62
+
63
+ self.__available_optimizers = (
64
+ method
65
+ for method in dir(Optimizers)
66
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
+ )
68
+ Conversation.intro = (
69
+ AwesomePrompts().get_act(
70
+ act, raise_not_found=True, default=None, case_insensitive=True
71
+ )
72
+ if act
73
+ else intro or Conversation.intro
74
+ )
75
+
76
+ self.conversation = Conversation(
77
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
78
+ )
79
+ self.conversation.history_offset = history_offset
80
+
81
+ def ask(
82
+ self,
83
+ prompt: str,
84
+ stream: bool = True,
85
+ raw: bool = False,
86
+ optimizer: str = None,
87
+ conversationally: bool = False,
88
+ online_search: bool = True,
89
+ reasoning_on: bool = False,
90
+ ) -> Union[Dict[str, Any], Generator]:
91
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
92
+ if optimizer:
93
+ if optimizer in self.__available_optimizers:
94
+ conversation_prompt = getattr(Optimizers, optimizer)(
95
+ conversation_prompt if conversationally else prompt
96
+ )
97
+ else:
98
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
99
+
100
+ # Payload construction
101
+ payload = {
102
+ "messages": [
103
+ {"role": "system", "content": self.system_message},
104
+ {"role": "user", "content": conversation_prompt},
105
+ ],
106
+ "model": self.model,
107
+ "temperature": self.temperature,
108
+ "max_tokens": self.max_tokens_to_sample,
109
+ "reasoningOn": reasoning_on,
110
+ "onlineSearch": online_search
111
+ }
112
+
113
+ def for_stream():
114
+ try:
115
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
116
+ if response.status_code != 200:
117
+ raise exceptions.FailedToGenerateResponseError(
118
+ f"Request failed with status code {response.status_code}"
119
+ )
120
+
121
+ streaming_text = ""
122
+ for line in response.iter_lines(decode_unicode=True):
123
+ if line:
124
+ try:
125
+ chunk = json.loads(line)
126
+ if chunk["typeName"] == "LLMChunk":
127
+ content = chunk["content"]
128
+ streaming_text += content
129
+ resp = dict(text=content)
130
+ yield resp if raw else resp
131
+ except json.JSONDecodeError:
132
+ continue
133
+
134
+ self.last_response = {"text": streaming_text}
135
+ self.conversation.update_chat_history(prompt, streaming_text)
136
+
137
+ except requests.RequestException as e:
138
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
139
+
140
+ def for_non_stream():
141
+ streaming_text = ""
142
+ for resp in for_stream():
143
+ streaming_text += resp["text"]
144
+ self.last_response = {"text": streaming_text}
145
+ return self.last_response
146
+
147
+ return for_stream() if stream else for_non_stream()
148
+
149
+ def chat(
150
+ self,
151
+ prompt: str,
152
+ stream: bool = True,
153
+ optimizer: str = None,
154
+ conversationally: bool = False,
155
+ online_search: bool = True,
156
+ reasoning_on: bool = False,
157
+ ) -> str:
158
+ def for_stream():
159
+ for response in self.ask(
160
+ prompt,
161
+ True,
162
+ optimizer=optimizer,
163
+ conversationally=conversationally,
164
+ online_search=online_search,
165
+ reasoning_on=reasoning_on
166
+ ):
167
+ yield self.get_message(response)
168
+
169
+ def for_non_stream():
170
+ return self.get_message(
171
+ self.ask(
172
+ prompt,
173
+ False,
174
+ optimizer=optimizer,
175
+ conversationally=conversationally,
176
+ online_search=online_search,
177
+ reasoning_on=reasoning_on
178
+ )
179
+ )
180
+
181
+ return for_stream() if stream else for_non_stream()
182
+
183
+ def get_message(self, response: dict) -> str:
184
+ assert isinstance(response, dict), "Response should be of dict data-type only"
185
+ return response["text"]
186
+
187
+ if __name__ == "__main__":
188
+ from rich import print
189
+
190
+ api_key = ""
191
+
192
+ ai = TwoAI(
193
+ api_key=api_key,
194
+ timeout=60,
195
+ system_message="You are an intelligent AI assistant. Be concise and helpful."
196
+ )
197
+
198
+ response = ai.chat("666+444=?", stream=True, reasoning_on=True)
199
+ for chunk in response:
200
+ print(chunk, end="", flush=True)
@@ -0,0 +1,200 @@
1
+ import requests
2
+ import json
3
+ from typing import Generator, Dict, Any, List, Union
4
+ from uuid import uuid4
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout import LitAgent
12
+
13
+ class Venice(Provider):
14
+ """
15
+ A class to interact with the Venice AI API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ "llama-3.3-70b",
20
+ "llama-3.2-3b-akash",
21
+ "qwen2dot5-coder-32b"
22
+ ]
23
+
24
+ def __init__(
25
+ self,
26
+ is_conversation: bool = True,
27
+ max_tokens: int = 2000,
28
+ timeout: int = 30,
29
+ temperature: float = 0.8,
30
+ top_p: float = 0.9,
31
+ intro: str = None,
32
+ filepath: str = None,
33
+ update_file: bool = True,
34
+ proxies: dict = {},
35
+ history_offset: int = 10250,
36
+ act: str = None,
37
+ model: str = "llama-3.3-70b",
38
+ system_prompt: str = "You are a helpful AI assistant."
39
+ ):
40
+ """Initialize Venice AI client"""
41
+ if model not in self.AVAILABLE_MODELS:
42
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
43
+
44
+ self.api_endpoint = "https://venice.ai/api/inference/chat"
45
+ self.session = requests.Session()
46
+ self.is_conversation = is_conversation
47
+ self.max_tokens_to_sample = max_tokens
48
+ self.temperature = temperature
49
+ self.top_p = top_p
50
+ self.timeout = timeout
51
+ self.model = model
52
+ self.system_prompt = system_prompt
53
+ self.last_response = {}
54
+
55
+ # Headers for the request
56
+ self.headers = {
57
+ "User-Agent": LitAgent().random(),
58
+ "accept": "*/*",
59
+ "accept-language": "en-US,en;q=0.9",
60
+ "content-type": "application/json",
61
+ "origin": "https://venice.ai",
62
+ "referer": "https://venice.ai/chat/",
63
+ "sec-ch-ua": '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
64
+ "sec-ch-ua-mobile": "?0",
65
+ "sec-ch-ua-platform": '"Windows"',
66
+ "sec-fetch-dest": "empty",
67
+ "sec-fetch-mode": "cors",
68
+ "sec-fetch-site": "same-origin"
69
+ }
70
+
71
+ self.session.headers.update(self.headers)
72
+ self.session.proxies.update(proxies)
73
+
74
+ self.__available_optimizers = (
75
+ method
76
+ for method in dir(Optimizers)
77
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
78
+ )
79
+ Conversation.intro = (
80
+ AwesomePrompts().get_act(
81
+ act, raise_not_found=True, default=None, case_insensitive=True
82
+ )
83
+ if act
84
+ else intro or Conversation.intro
85
+ )
86
+
87
+ self.conversation = Conversation(
88
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
89
+ )
90
+ self.conversation.history_offset = history_offset
91
+
92
+ def ask(
93
+ self,
94
+ prompt: str,
95
+ stream: bool = False,
96
+ raw: bool = False,
97
+ optimizer: str = None,
98
+ conversationally: bool = False,
99
+ ) -> Union[Dict[str, Any], Generator]:
100
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
101
+ if optimizer:
102
+ if optimizer in self.__available_optimizers:
103
+ conversation_prompt = getattr(Optimizers, optimizer)(
104
+ conversation_prompt if conversationally else prompt
105
+ )
106
+ else:
107
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
108
+
109
+ # Payload construction
110
+ payload = {
111
+ "requestId": str(uuid4())[:7],
112
+ "modelId": self.model,
113
+ "prompt": [{"content": conversation_prompt, "role": "user"}],
114
+ "systemPrompt": self.system_prompt,
115
+ "conversationType": "text",
116
+ "temperature": self.temperature,
117
+ "webEnabled": True,
118
+ "topP": self.top_p,
119
+ "includeVeniceSystemPrompt": False,
120
+ "isCharacter": False,
121
+ "clientProcessingTime": 2000
122
+ }
123
+
124
+ def for_stream():
125
+ try:
126
+ with self.session.post(
127
+ self.api_endpoint,
128
+ json=payload,
129
+ stream=True,
130
+ timeout=self.timeout
131
+ ) as response:
132
+ if response.status_code != 200:
133
+ raise exceptions.FailedToGenerateResponseError(
134
+ f"Request failed with status code {response.status_code}"
135
+ )
136
+
137
+ streaming_text = ""
138
+ for line in response.iter_lines():
139
+ if not line:
140
+ continue
141
+
142
+ try:
143
+ # Decode bytes to string
144
+ line_data = line.decode('utf-8').strip()
145
+ if '"kind":"content"' in line_data:
146
+ data = json.loads(line_data)
147
+ if 'content' in data:
148
+ content = data['content']
149
+ streaming_text += content
150
+ resp = dict(text=content)
151
+ yield resp if raw else resp
152
+ except json.JSONDecodeError:
153
+ continue
154
+ except UnicodeDecodeError:
155
+ continue
156
+
157
+ self.conversation.update_chat_history(prompt, streaming_text)
158
+
159
+ except requests.RequestException as e:
160
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
161
+
162
+ def for_non_stream():
163
+ full_text = ""
164
+ for chunk in for_stream():
165
+ full_text += chunk["text"]
166
+ return {"text": full_text}
167
+
168
+ return for_stream() if stream else for_non_stream()
169
+
170
+ def chat(
171
+ self,
172
+ prompt: str,
173
+ stream: bool = False,
174
+ optimizer: str = None,
175
+ conversationally: bool = False,
176
+ ) -> Union[str, Generator]:
177
+ def for_stream():
178
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
179
+ yield self.get_message(response)
180
+ def for_non_stream():
181
+ return self.get_message(
182
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
183
+ )
184
+ return for_stream() if stream else for_non_stream()
185
+
186
+ def get_message(self, response: dict) -> str:
187
+ assert isinstance(response, dict), "Response should be of dict data-type only"
188
+ return response["text"]
189
+
190
+ if __name__ == "__main__":
191
+ from rich import print
192
+
193
+ # Initialize Venice AI
194
+ ai = Venice(model="qwen2dot5-coder-32b", timeout=50)
195
+
196
+ # Test chat with streaming
197
+ response = ai.chat("Write a short story about an AI assistant", stream=False)
198
+ print(response)
199
+ # for chunk in response:
200
+ # print(chunk, end="", flush=True)
@@ -8,9 +8,6 @@ from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
10
  from webscout import LitAgent
11
- from webscout import Logger
12
- from webscout import LogFormat
13
-
14
11
 
15
12
 
16
13
  class WiseCat(Provider):
@@ -36,8 +33,7 @@ class WiseCat(Provider):
36
33
  history_offset: int = 10250,
37
34
  act: str = None,
38
35
  model: str = "chat-model-large",
39
- system_prompt: str = "You are a helpful AI assistant.",
40
- logging: bool = False,
36
+ system_prompt: str = "You are a helpful AI assistant."
41
37
  ):
42
38
  """Initializes the WiseCat API client."""
43
39
 
@@ -61,9 +57,6 @@ class WiseCat(Provider):
61
57
  self.session.headers.update(self.headers)
62
58
  self.session.proxies = proxies
63
59
 
64
- # Initialize logger
65
- self.logger = Logger(name="WISECAT", format=LogFormat.MODERN_EMOJI) if logging else None
66
-
67
60
  self.__available_optimizers = (
68
61
  method
69
62
  for method in dir(Optimizers)
@@ -90,9 +83,6 @@ class WiseCat(Provider):
90
83
  conversationally: bool = False,
91
84
  ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
92
85
  """Chat with AI"""
93
- if self.logger:
94
- self.logger.debug(f"ask() called with prompt: {prompt}")
95
-
96
86
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
97
87
  if optimizer:
98
88
  if optimizer in self.__available_optimizers:
@@ -100,8 +90,6 @@ class WiseCat(Provider):
100
90
  conversation_prompt if conversationally else prompt
101
91
  )
102
92
  else:
103
- if self.logger:
104
- self.logger.error(f"Invalid optimizer: {optimizer}")
105
93
  raise Exception(
106
94
  f"Optimizer is not one of {self.__available_optimizers}"
107
95
  )
@@ -127,8 +115,6 @@ class WiseCat(Provider):
127
115
  )
128
116
  if not response.ok:
129
117
  error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
130
- if self.logger:
131
- self.logger.error(error_msg)
132
118
  raise exceptions.FailedToGenerateResponseError(error_msg)
133
119
 
134
120
  streaming_response = ""
@@ -159,9 +145,6 @@ class WiseCat(Provider):
159
145
  conversationally: bool = False,
160
146
  ) -> str:
161
147
  """Generate response `str`"""
162
- if self.logger:
163
- self.logger.debug(f"chat() called with prompt: {prompt}")
164
-
165
148
  def for_stream():
166
149
  for response in self.ask(
167
150
  prompt, True, optimizer=optimizer, conversationally=conversationally
@@ -264,6 +264,6 @@ class YouChat(Provider):
264
264
  if __name__ == '__main__':
265
265
  from rich import print
266
266
  ai = YouChat(timeout=5000)
267
- response = ai.chat("hi", stream=True)
267
+ response = ai.chat(input(">>> "), stream=True)
268
268
  for chunk in response:
269
269
  print(chunk, end="", flush=True)
@@ -63,21 +63,42 @@ from .chatglm import *
63
63
  from .hermes import *
64
64
  from .TextPollinationsAI import *
65
65
  from .Glider import *
66
- from .dgaf import *
67
66
  from .ChatGPTGratis import *
68
67
  from .QwenLM import *
69
68
  from .granite import *
70
69
  from .WiseCat import *
71
70
  from .DeepSeek import *
72
71
  from .freeaichat import FreeAIChat
72
+ from .akashgpt import *
73
+ from .Perplexitylabs import *
74
+ from .AllenAI import *
75
+ from .HeckAI import *
76
+ from .TwoAI import *
77
+ from .Venice import *
78
+ from .ElectronHub import *
79
+ from .HuggingFaceChat import *
80
+ from .GithubChat import *
81
+ from .copilot import *
82
+ from .C4ai import *
83
+ from .flowith import *
73
84
  __all__ = [
74
85
  'LLAMA',
86
+ 'Flowith',
87
+ 'C4ai',
88
+ 'Venice',
89
+ 'Copilot',
90
+ 'HuggingFaceChat',
91
+ 'TwoAI',
92
+ 'HeckAI',
93
+ 'AllenAI',
94
+ 'PerplexityLabs',
95
+ 'AkashGPT',
75
96
  'DeepSeek',
76
97
  'WiseCat',
77
98
  'IBMGranite',
78
99
  'QwenLM',
79
100
  'ChatGPTGratis',
80
- 'DGAFAI',
101
+
81
102
  'TextPollinationsAI',
82
103
  'GliderAI',
83
104
  'Cohere',
@@ -143,4 +164,6 @@ __all__ = [
143
164
  'ChatGLM',
144
165
  'NousHermes',
145
166
  'FreeAIChat',
167
+ 'ElectronHub',
168
+ 'GithubChat',
146
169
  ]