webscout 7.2__py3-none-any.whl → 7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (47) hide show
  1. webscout/Bard.py +2 -2
  2. webscout/Litlogger/core/level.py +3 -0
  3. webscout/Litlogger/core/logger.py +101 -58
  4. webscout/Litlogger/handlers/console.py +14 -31
  5. webscout/Litlogger/handlers/network.py +16 -17
  6. webscout/Litlogger/styles/colors.py +81 -63
  7. webscout/Litlogger/styles/formats.py +163 -80
  8. webscout/Provider/AISEARCH/ISou.py +277 -0
  9. webscout/Provider/AISEARCH/__init__.py +4 -2
  10. webscout/Provider/AISEARCH/genspark_search.py +208 -0
  11. webscout/Provider/AllenAI.py +282 -0
  12. webscout/Provider/Deepinfra.py +52 -37
  13. webscout/Provider/ElectronHub.py +634 -0
  14. webscout/Provider/Glider.py +7 -41
  15. webscout/Provider/HeckAI.py +200 -0
  16. webscout/Provider/Jadve.py +49 -63
  17. webscout/Provider/PI.py +106 -93
  18. webscout/Provider/Perplexitylabs.py +395 -0
  19. webscout/Provider/QwenLM.py +7 -61
  20. webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
  21. webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
  22. webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
  23. webscout/Provider/TTI/__init__.py +3 -1
  24. webscout/Provider/TTI/piclumen/__init__.py +23 -0
  25. webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
  26. webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
  27. webscout/Provider/TextPollinationsAI.py +28 -6
  28. webscout/Provider/TwoAI.py +200 -0
  29. webscout/Provider/Venice.py +200 -0
  30. webscout/Provider/WiseCat.py +1 -18
  31. webscout/Provider/__init__.py +14 -0
  32. webscout/Provider/akashgpt.py +312 -0
  33. webscout/Provider/chatglm.py +5 -5
  34. webscout/Provider/freeaichat.py +251 -0
  35. webscout/Provider/koala.py +9 -1
  36. webscout/Provider/yep.py +5 -25
  37. webscout/__init__.py +1 -0
  38. webscout/version.py +1 -1
  39. webscout/webscout_search.py +82 -2
  40. webscout/webscout_search_async.py +58 -1
  41. webscout/yep_search.py +297 -0
  42. {webscout-7.2.dist-info → webscout-7.4.dist-info}/METADATA +99 -65
  43. {webscout-7.2.dist-info → webscout-7.4.dist-info}/RECORD +47 -30
  44. {webscout-7.2.dist-info → webscout-7.4.dist-info}/WHEEL +1 -1
  45. {webscout-7.2.dist-info → webscout-7.4.dist-info}/LICENSE.md +0 -0
  46. {webscout-7.2.dist-info → webscout-7.4.dist-info}/entry_points.txt +0 -0
  47. {webscout-7.2.dist-info → webscout-7.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,200 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, Union
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider, AsyncProvider
10
+ from webscout import exceptions
11
+ from webscout import LitAgent
12
+
13
+ class TwoAI(Provider):
14
+ """
15
+ A class to interact with the Two AI API with LitAgent user-agent.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ "sutra-light",
20
+ ]
21
+
22
+ def __init__(
23
+ self,
24
+ api_key: str = None,
25
+ is_conversation: bool = True,
26
+ max_tokens: int = 1024,
27
+ timeout: int = 30,
28
+ intro: str = None,
29
+ filepath: str = None,
30
+ update_file: bool = True,
31
+ proxies: dict = {},
32
+ history_offset: int = 10250,
33
+ act: str = None,
34
+ model: str = "sutra-light",
35
+ temperature: float = 0.6,
36
+ system_message: str = "You are a helpful assistant."
37
+ ):
38
+ """Initializes the TwoAI API client."""
39
+ if model not in self.AVAILABLE_MODELS:
40
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
41
+ self.url = "https://api.two.app/v1/sutra-light/completion"
42
+ self.headers = {
43
+ 'User-Agent': LitAgent().random(),
44
+ 'Accept': 'application/json',
45
+ 'Content-Type': 'application/json',
46
+ 'X-Session-Token': api_key,
47
+ 'Origin': 'https://chat.two.ai',
48
+ 'Referer': 'https://api.two.app/'
49
+ }
50
+
51
+ self.session = requests.Session()
52
+ self.session.headers.update(self.headers)
53
+ self.session.proxies.update(proxies)
54
+
55
+ self.is_conversation = is_conversation
56
+ self.max_tokens_to_sample = max_tokens
57
+ self.timeout = timeout
58
+ self.last_response = {}
59
+ self.model = model
60
+ self.temperature = temperature
61
+ self.system_message = system_message
62
+
63
+ self.__available_optimizers = (
64
+ method
65
+ for method in dir(Optimizers)
66
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
+ )
68
+ Conversation.intro = (
69
+ AwesomePrompts().get_act(
70
+ act, raise_not_found=True, default=None, case_insensitive=True
71
+ )
72
+ if act
73
+ else intro or Conversation.intro
74
+ )
75
+
76
+ self.conversation = Conversation(
77
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
78
+ )
79
+ self.conversation.history_offset = history_offset
80
+
81
+ def ask(
82
+ self,
83
+ prompt: str,
84
+ stream: bool = True,
85
+ raw: bool = False,
86
+ optimizer: str = None,
87
+ conversationally: bool = False,
88
+ online_search: bool = True,
89
+ reasoning_on: bool = False,
90
+ ) -> Union[Dict[str, Any], Generator]:
91
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
92
+ if optimizer:
93
+ if optimizer in self.__available_optimizers:
94
+ conversation_prompt = getattr(Optimizers, optimizer)(
95
+ conversation_prompt if conversationally else prompt
96
+ )
97
+ else:
98
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
99
+
100
+ # Payload construction
101
+ payload = {
102
+ "messages": [
103
+ {"role": "system", "content": self.system_message},
104
+ {"role": "user", "content": conversation_prompt},
105
+ ],
106
+ "model": self.model,
107
+ "temperature": self.temperature,
108
+ "max_tokens": self.max_tokens_to_sample,
109
+ "reasoningOn": reasoning_on,
110
+ "onlineSearch": online_search
111
+ }
112
+
113
+ def for_stream():
114
+ try:
115
+ with self.session.post(self.url, json=payload, stream=True, timeout=self.timeout) as response:
116
+ if response.status_code != 200:
117
+ raise exceptions.FailedToGenerateResponseError(
118
+ f"Request failed with status code {response.status_code}"
119
+ )
120
+
121
+ streaming_text = ""
122
+ for line in response.iter_lines(decode_unicode=True):
123
+ if line:
124
+ try:
125
+ chunk = json.loads(line)
126
+ if chunk["typeName"] == "LLMChunk":
127
+ content = chunk["content"]
128
+ streaming_text += content
129
+ resp = dict(text=content)
130
+ yield resp if raw else resp
131
+ except json.JSONDecodeError:
132
+ continue
133
+
134
+ self.last_response = {"text": streaming_text}
135
+ self.conversation.update_chat_history(prompt, streaming_text)
136
+
137
+ except requests.RequestException as e:
138
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
139
+
140
+ def for_non_stream():
141
+ streaming_text = ""
142
+ for resp in for_stream():
143
+ streaming_text += resp["text"]
144
+ self.last_response = {"text": streaming_text}
145
+ return self.last_response
146
+
147
+ return for_stream() if stream else for_non_stream()
148
+
149
+ def chat(
150
+ self,
151
+ prompt: str,
152
+ stream: bool = True,
153
+ optimizer: str = None,
154
+ conversationally: bool = False,
155
+ online_search: bool = True,
156
+ reasoning_on: bool = False,
157
+ ) -> str:
158
+ def for_stream():
159
+ for response in self.ask(
160
+ prompt,
161
+ True,
162
+ optimizer=optimizer,
163
+ conversationally=conversationally,
164
+ online_search=online_search,
165
+ reasoning_on=reasoning_on
166
+ ):
167
+ yield self.get_message(response)
168
+
169
+ def for_non_stream():
170
+ return self.get_message(
171
+ self.ask(
172
+ prompt,
173
+ False,
174
+ optimizer=optimizer,
175
+ conversationally=conversationally,
176
+ online_search=online_search,
177
+ reasoning_on=reasoning_on
178
+ )
179
+ )
180
+
181
+ return for_stream() if stream else for_non_stream()
182
+
183
+ def get_message(self, response: dict) -> str:
184
+ assert isinstance(response, dict), "Response should be of dict data-type only"
185
+ return response["text"]
186
+
187
+ if __name__ == "__main__":
188
+ from rich import print
189
+
190
+ api_key = ""
191
+
192
+ ai = TwoAI(
193
+ api_key=api_key,
194
+ timeout=60,
195
+ system_message="You are an intelligent AI assistant. Be concise and helpful."
196
+ )
197
+
198
+ response = ai.chat("666+444=?", stream=True, reasoning_on=True)
199
+ for chunk in response:
200
+ print(chunk, end="", flush=True)
@@ -0,0 +1,200 @@
1
+ import requests
2
+ import json
3
+ from typing import Generator, Dict, Any, List, Union
4
+ from uuid import uuid4
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+ from webscout import LitAgent
12
+
13
+ class Venice(Provider):
14
+ """
15
+ A class to interact with the Venice AI API.
16
+ """
17
+
18
+ AVAILABLE_MODELS = [
19
+ "llama-3.3-70b",
20
+ "llama-3.2-3b-akash",
21
+ "qwen2dot5-coder-32b"
22
+
23
+
24
+ ]
25
+
26
+ def __init__(
27
+ self,
28
+ is_conversation: bool = True,
29
+ max_tokens: int = 2000,
30
+ timeout: int = 30,
31
+ temperature: float = 0.8,
32
+ top_p: float = 0.9,
33
+ intro: str = None,
34
+ filepath: str = None,
35
+ update_file: bool = True,
36
+ proxies: dict = {},
37
+ history_offset: int = 10250,
38
+ act: str = None,
39
+ model: str = "llama-3.3-70b",
40
+ system_prompt: str = "You are a helpful AI assistant."
41
+ ):
42
+ """Initialize Venice AI client"""
43
+ if model not in self.AVAILABLE_MODELS:
44
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
45
+
46
+ self.api_endpoint = "https://venice.ai/api/inference/chat"
47
+ self.session = requests.Session()
48
+ self.is_conversation = is_conversation
49
+ self.max_tokens_to_sample = max_tokens
50
+ self.temperature = temperature
51
+ self.top_p = top_p
52
+ self.timeout = timeout
53
+ self.model = model
54
+ self.system_prompt = system_prompt
55
+ self.last_response = {}
56
+
57
+ # Headers for the request
58
+ self.headers = {
59
+ "User-Agent": LitAgent().random(),
60
+ "accept": "*/*",
61
+ "accept-language": "en-US,en;q=0.9",
62
+ "content-type": "application/json",
63
+ "origin": "https://venice.ai",
64
+ "referer": "https://venice.ai/chat/",
65
+ "sec-ch-ua": '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
66
+ "sec-ch-ua-mobile": "?0",
67
+ "sec-ch-ua-platform": '"Windows"',
68
+ "sec-fetch-dest": "empty",
69
+ "sec-fetch-mode": "cors",
70
+ "sec-fetch-site": "same-origin"
71
+ }
72
+
73
+ self.session.headers.update(self.headers)
74
+ self.session.proxies.update(proxies)
75
+
76
+ self.__available_optimizers = (
77
+ method
78
+ for method in dir(Optimizers)
79
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
80
+ )
81
+ Conversation.intro = (
82
+ AwesomePrompts().get_act(
83
+ act, raise_not_found=True, default=None, case_insensitive=True
84
+ )
85
+ if act
86
+ else intro or Conversation.intro
87
+ )
88
+
89
+ self.conversation = Conversation(
90
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
91
+ )
92
+ self.conversation.history_offset = history_offset
93
+
94
+ def ask(
95
+ self,
96
+ prompt: str,
97
+ stream: bool = False,
98
+ raw: bool = False,
99
+ optimizer: str = None,
100
+ conversationally: bool = False,
101
+ ) -> Union[Dict[str, Any], Generator]:
102
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
103
+ if optimizer:
104
+ if optimizer in self.__available_optimizers:
105
+ conversation_prompt = getattr(Optimizers, optimizer)(
106
+ conversation_prompt if conversationally else prompt
107
+ )
108
+ else:
109
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
110
+
111
+ # Payload construction
112
+ payload = {
113
+ "requestId": str(uuid4())[:7],
114
+ "modelId": self.model,
115
+ "prompt": [{"content": conversation_prompt, "role": "user"}],
116
+ "systemPrompt": self.system_prompt,
117
+ "conversationType": "text",
118
+ "temperature": self.temperature,
119
+ "webEnabled": True,
120
+ "topP": self.top_p,
121
+ "includeVeniceSystemPrompt": False,
122
+ "isCharacter": False,
123
+ "clientProcessingTime": 2000
124
+ }
125
+
126
+ def for_stream():
127
+ try:
128
+ with self.session.post(
129
+ self.api_endpoint,
130
+ json=payload,
131
+ stream=True,
132
+ timeout=self.timeout
133
+ ) as response:
134
+ if response.status_code != 200:
135
+ raise exceptions.FailedToGenerateResponseError(
136
+ f"Request failed with status code {response.status_code}"
137
+ )
138
+
139
+ streaming_text = ""
140
+ for line in response.iter_lines():
141
+ if not line:
142
+ continue
143
+
144
+ try:
145
+ # Decode bytes to string
146
+ line_data = line.decode('utf-8').strip()
147
+ if '"kind":"content"' in line_data:
148
+ data = json.loads(line_data)
149
+ if 'content' in data:
150
+ content = data['content']
151
+ streaming_text += content
152
+ resp = dict(text=content)
153
+ yield resp if raw else resp
154
+ except json.JSONDecodeError:
155
+ continue
156
+ except UnicodeDecodeError:
157
+ continue
158
+
159
+ self.conversation.update_chat_history(prompt, streaming_text)
160
+
161
+ except requests.RequestException as e:
162
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
163
+
164
+ def for_non_stream():
165
+ for _ in for_stream():
166
+ pass
167
+ return self.last_response
168
+
169
+ return for_stream() if stream else for_non_stream()
170
+
171
+ def chat(
172
+ self,
173
+ prompt: str,
174
+ stream: bool = False,
175
+ optimizer: str = None,
176
+ conversationally: bool = False,
177
+ ) -> Union[str, Generator]:
178
+ def for_stream():
179
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
180
+ yield self.get_message(response)
181
+ def for_non_stream():
182
+ return self.get_message(
183
+ self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
184
+ )
185
+ return for_stream() if stream else for_non_stream()
186
+
187
+ def get_message(self, response: dict) -> str:
188
+ assert isinstance(response, dict), "Response should be of dict data-type only"
189
+ return response["text"]
190
+
191
+ if __name__ == "__main__":
192
+ from rich import print
193
+
194
+ # Initialize Venice AI
195
+ ai = Venice(model="qwen2dot5-coder-32b", timeout=50)
196
+
197
+ # Test chat with streaming
198
+ response = ai.chat("Write a short story about an AI assistant", stream=True)
199
+ for chunk in response:
200
+ print(chunk, end="", flush=True)
@@ -8,9 +8,6 @@ from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
9
  from webscout import exceptions
10
10
  from webscout import LitAgent
11
- from webscout import Logger
12
- from webscout import LogFormat
13
-
14
11
 
15
12
 
16
13
  class WiseCat(Provider):
@@ -36,8 +33,7 @@ class WiseCat(Provider):
36
33
  history_offset: int = 10250,
37
34
  act: str = None,
38
35
  model: str = "chat-model-large",
39
- system_prompt: str = "You are a helpful AI assistant.",
40
- logging: bool = False,
36
+ system_prompt: str = "You are a helpful AI assistant."
41
37
  ):
42
38
  """Initializes the WiseCat API client."""
43
39
 
@@ -61,9 +57,6 @@ class WiseCat(Provider):
61
57
  self.session.headers.update(self.headers)
62
58
  self.session.proxies = proxies
63
59
 
64
- # Initialize logger
65
- self.logger = Logger(name="WISECAT", format=LogFormat.MODERN_EMOJI) if logging else None
66
-
67
60
  self.__available_optimizers = (
68
61
  method
69
62
  for method in dir(Optimizers)
@@ -90,9 +83,6 @@ class WiseCat(Provider):
90
83
  conversationally: bool = False,
91
84
  ) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
92
85
  """Chat with AI"""
93
- if self.logger:
94
- self.logger.debug(f"ask() called with prompt: {prompt}")
95
-
96
86
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
97
87
  if optimizer:
98
88
  if optimizer in self.__available_optimizers:
@@ -100,8 +90,6 @@ class WiseCat(Provider):
100
90
  conversation_prompt if conversationally else prompt
101
91
  )
102
92
  else:
103
- if self.logger:
104
- self.logger.error(f"Invalid optimizer: {optimizer}")
105
93
  raise Exception(
106
94
  f"Optimizer is not one of {self.__available_optimizers}"
107
95
  )
@@ -127,8 +115,6 @@ class WiseCat(Provider):
127
115
  )
128
116
  if not response.ok:
129
117
  error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
130
- if self.logger:
131
- self.logger.error(error_msg)
132
118
  raise exceptions.FailedToGenerateResponseError(error_msg)
133
119
 
134
120
  streaming_response = ""
@@ -159,9 +145,6 @@ class WiseCat(Provider):
159
145
  conversationally: bool = False,
160
146
  ) -> str:
161
147
  """Generate response `str`"""
162
- if self.logger:
163
- self.logger.debug(f"chat() called with prompt: {prompt}")
164
-
165
148
  def for_stream():
166
149
  for response in self.ask(
167
150
  prompt, True, optimizer=optimizer, conversationally=conversationally
@@ -69,8 +69,21 @@ from .QwenLM import *
69
69
  from .granite import *
70
70
  from .WiseCat import *
71
71
  from .DeepSeek import *
72
+ from .freeaichat import FreeAIChat
73
+ from .akashgpt import *
74
+ from .Perplexitylabs import *
75
+ from .AllenAI import *
76
+ from .HeckAI import *
77
+ from .TwoAI import *
78
+ from .Venice import *
72
79
  __all__ = [
73
80
  'LLAMA',
81
+ 'Venice',
82
+ 'TwoAI',
83
+ 'HeckAI',
84
+ 'AllenAI',
85
+ 'PerplexityLabs',
86
+ 'AkashGPT',
74
87
  'DeepSeek',
75
88
  'WiseCat',
76
89
  'IBMGranite',
@@ -141,4 +154,5 @@ __all__ = [
141
154
  'JadveOpenAI',
142
155
  'ChatGLM',
143
156
  'NousHermes',
157
+ 'FreeAIChat',
144
158
  ]