webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +91 -78
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +30 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +191 -137
- webscout/Provider/Netwrck.py +62 -50
- webscout/Provider/PI.py +79 -124
- webscout/Provider/PizzaGPT.py +129 -83
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +74 -47
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -136
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +67 -39
- webscout/Provider/gaurish.py +105 -66
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +60 -35
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -230
- webscout/Provider/typegpt.py +359 -356
- webscout/Provider/yep.py +5 -5
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1142 -1140
- webscout/webscout_search_async.py +635 -635
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout/Provider/RUBIKSAI.py +0 -272
- webscout-7.0.dist-info/RECORD +0 -199
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
|
@@ -1,117 +1,117 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import time
|
|
4
|
-
from pathlib import Path
|
|
5
|
-
from typing import Generator
|
|
6
|
-
from playsound import playsound
|
|
7
|
-
from webscout import exceptions
|
|
8
|
-
from webscout.AIbase import TTSProvider
|
|
9
|
-
|
|
10
|
-
class Voicepods(TTSProvider):
|
|
11
|
-
"""
|
|
12
|
-
A class to interact with the Voicepods text-to-speech API.
|
|
13
|
-
"""
|
|
14
|
-
|
|
15
|
-
def __init__(self, timeout: int = 20, proxies: dict = None):
|
|
16
|
-
"""
|
|
17
|
-
Initializes the Voicepods API client.
|
|
18
|
-
"""
|
|
19
|
-
self.api_endpoint = "https://voicepods-stream.vercel.app/api/resemble"
|
|
20
|
-
self.headers = {
|
|
21
|
-
'Accept': '*/*',
|
|
22
|
-
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
23
|
-
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
24
|
-
'Content-Type': 'application/json',
|
|
25
|
-
'DNT': '1',
|
|
26
|
-
'Origin': 'https://voicepods-stream.vercel.app',
|
|
27
|
-
'Referer': 'https://voicepods-stream.vercel.app/',
|
|
28
|
-
'Sec-CH-UA': '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
29
|
-
'Sec-CH-UA-Mobile': '?0',
|
|
30
|
-
'Sec-CH-UA-Platform': '"Windows"',
|
|
31
|
-
'Sec-Fetch-Dest': 'empty',
|
|
32
|
-
'Sec-Fetch-Mode': 'cors',
|
|
33
|
-
'Sec-Fetch-Site': 'same-origin',
|
|
34
|
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0',
|
|
35
|
-
}
|
|
36
|
-
self.session = requests.Session()
|
|
37
|
-
self.session.headers.update(self.headers)
|
|
38
|
-
if proxies:
|
|
39
|
-
self.session.proxies.update(proxies)
|
|
40
|
-
self.timeout = timeout
|
|
41
|
-
self.audio_cache_dir = Path("./audio_cache")
|
|
42
|
-
|
|
43
|
-
def tts(self, text: str) -> str:
|
|
44
|
-
"""
|
|
45
|
-
Converts text to speech using the Voicepods API.
|
|
46
|
-
|
|
47
|
-
Args:
|
|
48
|
-
text (str): The text to be converted to speech.
|
|
49
|
-
|
|
50
|
-
Returns:
|
|
51
|
-
str: The filename of the saved audio file.
|
|
52
|
-
|
|
53
|
-
Raises:
|
|
54
|
-
exceptions.FailedToGenerateResponseError: If there is an error generating or saving the audio.
|
|
55
|
-
"""
|
|
56
|
-
payload = json.dumps({"query": text})
|
|
57
|
-
filename = self.audio_cache_dir / f"{int(time.time())}.wav" # Using timestamp for filename
|
|
58
|
-
|
|
59
|
-
try:
|
|
60
|
-
response = self.session.post(self.api_endpoint, data=payload, timeout=self.timeout)
|
|
61
|
-
response.raise_for_status()
|
|
62
|
-
|
|
63
|
-
content_type = response.headers.get('Content-Type', '')
|
|
64
|
-
if 'audio' not in content_type.lower():
|
|
65
|
-
raise ValueError(f"Unexpected content type: {content_type}")
|
|
66
|
-
|
|
67
|
-
audio_data = response.content
|
|
68
|
-
self._save_audio(audio_data, filename)
|
|
69
|
-
return filename.as_posix() # Return the filename as a string
|
|
70
|
-
|
|
71
|
-
except requests.exceptions.RequestException as e:
|
|
72
|
-
raise exceptions.FailedToGenerateResponseError(f"Error generating audio: {e}")
|
|
73
|
-
|
|
74
|
-
def _save_audio(self, audio_data: bytes, filename: Path):
|
|
75
|
-
"""Saves the audio data to a WAV file in the audio cache directory."""
|
|
76
|
-
try:
|
|
77
|
-
# Create the audio_cache directory if it doesn't exist
|
|
78
|
-
self.audio_cache_dir.mkdir(parents=True, exist_ok=True)
|
|
79
|
-
|
|
80
|
-
riff_start = audio_data.find(b'RIFF')
|
|
81
|
-
if riff_start == -1:
|
|
82
|
-
raise ValueError("RIFF header not found in audio data")
|
|
83
|
-
|
|
84
|
-
trimmed_audio_data = audio_data[riff_start:]
|
|
85
|
-
|
|
86
|
-
with open(filename, "wb") as f:
|
|
87
|
-
f.write(trimmed_audio_data)
|
|
88
|
-
|
|
89
|
-
except Exception as e:
|
|
90
|
-
raise exceptions.FailedToGenerateResponseError(f"Error saving audio: {e}")
|
|
91
|
-
|
|
92
|
-
def play_audio(self, filename: str):
|
|
93
|
-
"""
|
|
94
|
-
Plays an audio file using playsound.
|
|
95
|
-
|
|
96
|
-
Args:
|
|
97
|
-
filename (str): The path to the audio file.
|
|
98
|
-
|
|
99
|
-
Raises:
|
|
100
|
-
RuntimeError: If there is an error playing the audio.
|
|
101
|
-
"""
|
|
102
|
-
try:
|
|
103
|
-
playsound(filename)
|
|
104
|
-
except Exception as e:
|
|
105
|
-
raise RuntimeError(f"Error playing audio: {e}")
|
|
106
|
-
|
|
107
|
-
# Example usage
|
|
108
|
-
if __name__ == "__main__":
|
|
109
|
-
|
|
110
|
-
voicepods = Voicepods()
|
|
111
|
-
text = "Hello, this is a test of the Voicepods text-to-speech system."
|
|
112
|
-
|
|
113
|
-
print("Generating audio...")
|
|
114
|
-
audio_file = voicepods.tts(text)
|
|
115
|
-
|
|
116
|
-
print("Playing audio...")
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Generator
|
|
6
|
+
from playsound import playsound
|
|
7
|
+
from webscout import exceptions
|
|
8
|
+
from webscout.AIbase import TTSProvider
|
|
9
|
+
|
|
10
|
+
class Voicepods(TTSProvider):
|
|
11
|
+
"""
|
|
12
|
+
A class to interact with the Voicepods text-to-speech API.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, timeout: int = 20, proxies: dict = None):
|
|
16
|
+
"""
|
|
17
|
+
Initializes the Voicepods API client.
|
|
18
|
+
"""
|
|
19
|
+
self.api_endpoint = "https://voicepods-stream.vercel.app/api/resemble"
|
|
20
|
+
self.headers = {
|
|
21
|
+
'Accept': '*/*',
|
|
22
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
23
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
24
|
+
'Content-Type': 'application/json',
|
|
25
|
+
'DNT': '1',
|
|
26
|
+
'Origin': 'https://voicepods-stream.vercel.app',
|
|
27
|
+
'Referer': 'https://voicepods-stream.vercel.app/',
|
|
28
|
+
'Sec-CH-UA': '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
29
|
+
'Sec-CH-UA-Mobile': '?0',
|
|
30
|
+
'Sec-CH-UA-Platform': '"Windows"',
|
|
31
|
+
'Sec-Fetch-Dest': 'empty',
|
|
32
|
+
'Sec-Fetch-Mode': 'cors',
|
|
33
|
+
'Sec-Fetch-Site': 'same-origin',
|
|
34
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0',
|
|
35
|
+
}
|
|
36
|
+
self.session = requests.Session()
|
|
37
|
+
self.session.headers.update(self.headers)
|
|
38
|
+
if proxies:
|
|
39
|
+
self.session.proxies.update(proxies)
|
|
40
|
+
self.timeout = timeout
|
|
41
|
+
self.audio_cache_dir = Path("./audio_cache")
|
|
42
|
+
|
|
43
|
+
def tts(self, text: str) -> str:
|
|
44
|
+
"""
|
|
45
|
+
Converts text to speech using the Voicepods API.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
text (str): The text to be converted to speech.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
str: The filename of the saved audio file.
|
|
52
|
+
|
|
53
|
+
Raises:
|
|
54
|
+
exceptions.FailedToGenerateResponseError: If there is an error generating or saving the audio.
|
|
55
|
+
"""
|
|
56
|
+
payload = json.dumps({"query": text})
|
|
57
|
+
filename = self.audio_cache_dir / f"{int(time.time())}.wav" # Using timestamp for filename
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
response = self.session.post(self.api_endpoint, data=payload, timeout=self.timeout)
|
|
61
|
+
response.raise_for_status()
|
|
62
|
+
|
|
63
|
+
content_type = response.headers.get('Content-Type', '')
|
|
64
|
+
if 'audio' not in content_type.lower():
|
|
65
|
+
raise ValueError(f"Unexpected content type: {content_type}")
|
|
66
|
+
|
|
67
|
+
audio_data = response.content
|
|
68
|
+
self._save_audio(audio_data, filename)
|
|
69
|
+
return filename.as_posix() # Return the filename as a string
|
|
70
|
+
|
|
71
|
+
except requests.exceptions.RequestException as e:
|
|
72
|
+
raise exceptions.FailedToGenerateResponseError(f"Error generating audio: {e}")
|
|
73
|
+
|
|
74
|
+
def _save_audio(self, audio_data: bytes, filename: Path):
|
|
75
|
+
"""Saves the audio data to a WAV file in the audio cache directory."""
|
|
76
|
+
try:
|
|
77
|
+
# Create the audio_cache directory if it doesn't exist
|
|
78
|
+
self.audio_cache_dir.mkdir(parents=True, exist_ok=True)
|
|
79
|
+
|
|
80
|
+
riff_start = audio_data.find(b'RIFF')
|
|
81
|
+
if riff_start == -1:
|
|
82
|
+
raise ValueError("RIFF header not found in audio data")
|
|
83
|
+
|
|
84
|
+
trimmed_audio_data = audio_data[riff_start:]
|
|
85
|
+
|
|
86
|
+
with open(filename, "wb") as f:
|
|
87
|
+
f.write(trimmed_audio_data)
|
|
88
|
+
|
|
89
|
+
except Exception as e:
|
|
90
|
+
raise exceptions.FailedToGenerateResponseError(f"Error saving audio: {e}")
|
|
91
|
+
|
|
92
|
+
def play_audio(self, filename: str):
|
|
93
|
+
"""
|
|
94
|
+
Plays an audio file using playsound.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
filename (str): The path to the audio file.
|
|
98
|
+
|
|
99
|
+
Raises:
|
|
100
|
+
RuntimeError: If there is an error playing the audio.
|
|
101
|
+
"""
|
|
102
|
+
try:
|
|
103
|
+
playsound(filename)
|
|
104
|
+
except Exception as e:
|
|
105
|
+
raise RuntimeError(f"Error playing audio: {e}")
|
|
106
|
+
|
|
107
|
+
# Example usage
|
|
108
|
+
if __name__ == "__main__":
|
|
109
|
+
|
|
110
|
+
voicepods = Voicepods()
|
|
111
|
+
text = "Hello, this is a test of the Voicepods text-to-speech system."
|
|
112
|
+
|
|
113
|
+
print("Generating audio...")
|
|
114
|
+
audio_file = voicepods.tts(text)
|
|
115
|
+
|
|
116
|
+
print("Playing audio...")
|
|
117
117
|
voicepods.play_audio(audio_file)
|
|
@@ -1,41 +1,28 @@
|
|
|
1
|
+
|
|
1
2
|
import requests
|
|
2
3
|
import json
|
|
3
4
|
from typing import Any, Dict, Generator
|
|
4
|
-
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
5
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
8
6
|
from webscout.AIbase import Provider
|
|
9
7
|
from webscout import exceptions
|
|
10
|
-
|
|
11
|
-
|
|
8
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
9
|
+
from webscout import LitAgent as Lit
|
|
12
10
|
class TextPollinationsAI(Provider):
|
|
13
11
|
"""
|
|
14
|
-
A class to interact with the Pollinations AI API.
|
|
12
|
+
A class to interact with the Pollinations AI API with comprehensive logging.
|
|
15
13
|
"""
|
|
16
14
|
|
|
17
15
|
AVAILABLE_MODELS = [
|
|
18
|
-
"openai",
|
|
19
|
-
"
|
|
20
|
-
"
|
|
21
|
-
"
|
|
22
|
-
"llama",
|
|
23
|
-
"mistral",
|
|
24
|
-
"unity",
|
|
25
|
-
"midijourney",
|
|
26
|
-
"rtist",
|
|
27
|
-
"searchgpt",
|
|
28
|
-
"evil",
|
|
29
|
-
"deepseek",
|
|
30
|
-
"claude-hybridspace",
|
|
31
|
-
"deepseek-r1",
|
|
32
|
-
"llamalight"
|
|
16
|
+
"openai", "openai-large", "qwen", "qwen-coder", "llama", "mistral",
|
|
17
|
+
"unity", "midijourney", "rtist", "searchgpt", "evil", "deepseek",
|
|
18
|
+
"claude-hybridspace", "deepseek-r1", "llamalight", "llamaguard",
|
|
19
|
+
"gemini", "gemini-thinking", "hormoz"
|
|
33
20
|
]
|
|
34
21
|
|
|
35
22
|
def __init__(
|
|
36
23
|
self,
|
|
37
24
|
is_conversation: bool = True,
|
|
38
|
-
max_tokens: int =
|
|
25
|
+
max_tokens: int = 8096,
|
|
39
26
|
timeout: int = 30,
|
|
40
27
|
intro: str = None,
|
|
41
28
|
filepath: str = None,
|
|
@@ -43,13 +30,22 @@ class TextPollinationsAI(Provider):
|
|
|
43
30
|
proxies: dict = {},
|
|
44
31
|
history_offset: int = 10250,
|
|
45
32
|
act: str = None,
|
|
46
|
-
model: str = "openai",
|
|
33
|
+
model: str = "openai-large",
|
|
47
34
|
system_prompt: str = "You are a helpful AI assistant.",
|
|
35
|
+
logging: bool = False
|
|
48
36
|
):
|
|
49
|
-
"""Initializes the TextPollinationsAI API client."""
|
|
37
|
+
"""Initializes the TextPollinationsAI API client with logging capabilities."""
|
|
50
38
|
if model not in self.AVAILABLE_MODELS:
|
|
51
39
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
52
40
|
|
|
41
|
+
self.logger = Logger(
|
|
42
|
+
name="TextPollinationsAI",
|
|
43
|
+
format=LogFormat.MODERN_EMOJI,
|
|
44
|
+
) if logging else None
|
|
45
|
+
|
|
46
|
+
if self.logger:
|
|
47
|
+
self.logger.info(f"Initializing TextPollinationsAI with model: {model}")
|
|
48
|
+
|
|
53
49
|
self.session = requests.Session()
|
|
54
50
|
self.is_conversation = is_conversation
|
|
55
51
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -59,20 +55,22 @@ class TextPollinationsAI(Provider):
|
|
|
59
55
|
self.last_response = {}
|
|
60
56
|
self.model = model
|
|
61
57
|
self.system_prompt = system_prompt
|
|
58
|
+
|
|
62
59
|
self.headers = {
|
|
63
60
|
'Accept': '*/*',
|
|
64
61
|
'Accept-Language': 'en-US,en;q=0.9',
|
|
65
|
-
'User-Agent':
|
|
62
|
+
'User-Agent': Lit().random(),
|
|
66
63
|
'Content-Type': 'application/json',
|
|
67
64
|
}
|
|
65
|
+
|
|
68
66
|
self.session.headers.update(self.headers)
|
|
69
67
|
self.session.proxies = proxies
|
|
70
68
|
|
|
71
69
|
self.__available_optimizers = (
|
|
72
|
-
method
|
|
73
|
-
for method in dir(Optimizers)
|
|
70
|
+
method for method in dir(Optimizers)
|
|
74
71
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
75
72
|
)
|
|
73
|
+
|
|
76
74
|
Conversation.intro = (
|
|
77
75
|
AwesomePrompts().get_act(
|
|
78
76
|
act, raise_not_found=True, default=None, case_insensitive=True
|
|
@@ -80,11 +78,15 @@ class TextPollinationsAI(Provider):
|
|
|
80
78
|
if act
|
|
81
79
|
else intro or Conversation.intro
|
|
82
80
|
)
|
|
81
|
+
|
|
83
82
|
self.conversation = Conversation(
|
|
84
83
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
85
84
|
)
|
|
86
85
|
self.conversation.history_offset = history_offset
|
|
87
86
|
|
|
87
|
+
if self.logger:
|
|
88
|
+
self.logger.info("TextPollinationsAI initialized successfully")
|
|
89
|
+
|
|
88
90
|
def ask(
|
|
89
91
|
self,
|
|
90
92
|
prompt: str,
|
|
@@ -93,26 +95,23 @@ class TextPollinationsAI(Provider):
|
|
|
93
95
|
optimizer: str = None,
|
|
94
96
|
conversationally: bool = False,
|
|
95
97
|
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
96
|
-
"""Chat with AI
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
stream
|
|
100
|
-
|
|
101
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
102
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
103
|
-
Returns:
|
|
104
|
-
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
105
|
-
"""
|
|
98
|
+
"""Chat with AI with logging capabilities"""
|
|
99
|
+
if self.logger:
|
|
100
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
101
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
102
|
+
|
|
106
103
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
107
104
|
if optimizer:
|
|
108
105
|
if optimizer in self.__available_optimizers:
|
|
109
106
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
110
107
|
conversation_prompt if conversationally else prompt
|
|
111
108
|
)
|
|
109
|
+
if self.logger:
|
|
110
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
112
111
|
else:
|
|
113
|
-
|
|
114
|
-
f"
|
|
115
|
-
)
|
|
112
|
+
if self.logger:
|
|
113
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
114
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
116
115
|
|
|
117
116
|
payload = {
|
|
118
117
|
"messages": [
|
|
@@ -124,26 +123,40 @@ class TextPollinationsAI(Provider):
|
|
|
124
123
|
}
|
|
125
124
|
|
|
126
125
|
def for_stream():
|
|
126
|
+
if self.logger:
|
|
127
|
+
self.logger.debug("Initiating streaming request to API")
|
|
128
|
+
|
|
127
129
|
response = self.session.post(
|
|
128
|
-
self.api_endpoint,
|
|
130
|
+
self.api_endpoint,
|
|
131
|
+
headers=self.headers,
|
|
132
|
+
json=payload,
|
|
133
|
+
stream=True,
|
|
134
|
+
timeout=self.timeout
|
|
129
135
|
)
|
|
136
|
+
|
|
130
137
|
if not response.ok:
|
|
138
|
+
if self.logger:
|
|
139
|
+
self.logger.error(f"API request failed. Status: {response.status_code}, Reason: {response.reason}")
|
|
131
140
|
raise exceptions.FailedToGenerateResponseError(
|
|
132
141
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
133
142
|
)
|
|
143
|
+
|
|
144
|
+
if self.logger:
|
|
145
|
+
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
146
|
+
|
|
134
147
|
full_response = ""
|
|
135
148
|
for line in response.iter_lines():
|
|
136
149
|
if line:
|
|
137
150
|
line = line.decode('utf-8').strip()
|
|
138
|
-
# Break if the stream signals completion
|
|
139
151
|
if line == "data: [DONE]":
|
|
152
|
+
if self.logger:
|
|
153
|
+
self.logger.debug("Stream completed")
|
|
140
154
|
break
|
|
141
155
|
if line.startswith('data: '):
|
|
142
156
|
try:
|
|
143
157
|
json_data = json.loads(line[6:])
|
|
144
158
|
if 'choices' in json_data and len(json_data['choices']) > 0:
|
|
145
159
|
choice = json_data['choices'][0]
|
|
146
|
-
# Handle delta responses from streaming output
|
|
147
160
|
if 'delta' in choice and 'content' in choice['delta']:
|
|
148
161
|
content = choice['delta']['content']
|
|
149
162
|
else:
|
|
@@ -151,13 +164,21 @@ class TextPollinationsAI(Provider):
|
|
|
151
164
|
full_response += content
|
|
152
165
|
yield content if raw else dict(text=content)
|
|
153
166
|
except json.JSONDecodeError as e:
|
|
154
|
-
|
|
167
|
+
if self.logger:
|
|
168
|
+
self.logger.error(f"JSON parsing error: {str(e)}")
|
|
169
|
+
continue
|
|
170
|
+
|
|
155
171
|
self.last_response.update(dict(text=full_response))
|
|
156
172
|
self.conversation.update_chat_history(
|
|
157
173
|
prompt, self.get_message(self.last_response)
|
|
158
174
|
)
|
|
159
175
|
|
|
176
|
+
if self.logger:
|
|
177
|
+
self.logger.debug("Response processing completed")
|
|
178
|
+
|
|
160
179
|
def for_non_stream():
|
|
180
|
+
if self.logger:
|
|
181
|
+
self.logger.debug("Processing non-streaming request")
|
|
161
182
|
for _ in for_stream():
|
|
162
183
|
pass
|
|
163
184
|
return self.last_response
|
|
@@ -171,12 +192,16 @@ class TextPollinationsAI(Provider):
|
|
|
171
192
|
optimizer: str = None,
|
|
172
193
|
conversationally: bool = False,
|
|
173
194
|
) -> str | Generator[str, None, None]:
|
|
174
|
-
"""Generate response as a string"""
|
|
195
|
+
"""Generate response as a string with logging"""
|
|
196
|
+
if self.logger:
|
|
197
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
198
|
+
|
|
175
199
|
def for_stream():
|
|
176
200
|
for response in self.ask(
|
|
177
201
|
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
178
202
|
):
|
|
179
203
|
yield self.get_message(response)
|
|
204
|
+
|
|
180
205
|
def for_non_stream():
|
|
181
206
|
return self.get_message(
|
|
182
207
|
self.ask(
|
|
@@ -186,6 +211,7 @@ class TextPollinationsAI(Provider):
|
|
|
186
211
|
conversationally=conversationally,
|
|
187
212
|
)
|
|
188
213
|
)
|
|
214
|
+
|
|
189
215
|
return for_stream() if stream else for_non_stream()
|
|
190
216
|
|
|
191
217
|
def get_message(self, response: dict) -> str:
|
|
@@ -195,7 +221,8 @@ class TextPollinationsAI(Provider):
|
|
|
195
221
|
|
|
196
222
|
if __name__ == "__main__":
|
|
197
223
|
from rich import print
|
|
198
|
-
|
|
224
|
+
# Enable logging for testing
|
|
225
|
+
ai = TextPollinationsAI(model="deepseek-r1", logging=True)
|
|
199
226
|
response = ai.chat(input(">>> "), stream=True)
|
|
200
227
|
for chunk in response:
|
|
201
228
|
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Generator, Optional
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout import LitAgent
|
|
11
|
+
from webscout import Logger
|
|
12
|
+
from webscout import LogFormat
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class WiseCat(Provider):
|
|
17
|
+
"""
|
|
18
|
+
A class to interact with the WiseCat API.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
AVAILABLE_MODELS = [
|
|
22
|
+
"chat-model-small",
|
|
23
|
+
"chat-model-large",
|
|
24
|
+
"chat-model-reasoning",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
is_conversation: bool = True,
|
|
30
|
+
max_tokens: int = 600,
|
|
31
|
+
timeout: int = 30,
|
|
32
|
+
intro: str = None,
|
|
33
|
+
filepath: str = None,
|
|
34
|
+
update_file: bool = True,
|
|
35
|
+
proxies: dict = {},
|
|
36
|
+
history_offset: int = 10250,
|
|
37
|
+
act: str = None,
|
|
38
|
+
model: str = "chat-model-large",
|
|
39
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
40
|
+
logging: bool = False,
|
|
41
|
+
):
|
|
42
|
+
"""Initializes the WiseCat API client."""
|
|
43
|
+
|
|
44
|
+
if model not in self.AVAILABLE_MODELS:
|
|
45
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
46
|
+
|
|
47
|
+
self.session = requests.Session()
|
|
48
|
+
self.is_conversation = is_conversation
|
|
49
|
+
self.max_tokens_to_sample = max_tokens
|
|
50
|
+
self.api_endpoint = "https://wise-cat-groq.vercel.app/api/chat"
|
|
51
|
+
self.stream_chunk_size = 64
|
|
52
|
+
self.timeout = timeout
|
|
53
|
+
self.last_response = {}
|
|
54
|
+
self.model = model
|
|
55
|
+
self.system_prompt = system_prompt
|
|
56
|
+
self.headers = {
|
|
57
|
+
"Content-Type": "application/json",
|
|
58
|
+
"Accept": "*/*",
|
|
59
|
+
"User-Agent": LitAgent().random()
|
|
60
|
+
}
|
|
61
|
+
self.session.headers.update(self.headers)
|
|
62
|
+
self.session.proxies = proxies
|
|
63
|
+
|
|
64
|
+
# Initialize logger
|
|
65
|
+
self.logger = Logger(name="WISECAT", format=LogFormat.MODERN_EMOJI) if logging else None
|
|
66
|
+
|
|
67
|
+
self.__available_optimizers = (
|
|
68
|
+
method
|
|
69
|
+
for method in dir(Optimizers)
|
|
70
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
71
|
+
)
|
|
72
|
+
Conversation.intro = (
|
|
73
|
+
AwesomePrompts().get_act(
|
|
74
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
75
|
+
)
|
|
76
|
+
if act
|
|
77
|
+
else intro or Conversation.intro
|
|
78
|
+
)
|
|
79
|
+
self.conversation = Conversation(
|
|
80
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
81
|
+
)
|
|
82
|
+
self.conversation.history_offset = history_offset
|
|
83
|
+
|
|
84
|
+
def ask(
|
|
85
|
+
self,
|
|
86
|
+
prompt: str,
|
|
87
|
+
stream: bool = False,
|
|
88
|
+
raw: bool = False,
|
|
89
|
+
optimizer: str = None,
|
|
90
|
+
conversationally: bool = False,
|
|
91
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
92
|
+
"""Chat with AI"""
|
|
93
|
+
if self.logger:
|
|
94
|
+
self.logger.debug(f"ask() called with prompt: {prompt}")
|
|
95
|
+
|
|
96
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
97
|
+
if optimizer:
|
|
98
|
+
if optimizer in self.__available_optimizers:
|
|
99
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
100
|
+
conversation_prompt if conversationally else prompt
|
|
101
|
+
)
|
|
102
|
+
else:
|
|
103
|
+
if self.logger:
|
|
104
|
+
self.logger.error(f"Invalid optimizer: {optimizer}")
|
|
105
|
+
raise Exception(
|
|
106
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
payload = {
|
|
110
|
+
"id": "ephemeral",
|
|
111
|
+
"messages": [
|
|
112
|
+
{
|
|
113
|
+
"role": "system",
|
|
114
|
+
"content": self.system_prompt
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
"role": "user",
|
|
118
|
+
"content": conversation_prompt,
|
|
119
|
+
}
|
|
120
|
+
],
|
|
121
|
+
"selectedChatModel": self.model
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
def for_stream():
|
|
125
|
+
response = self.session.post(
|
|
126
|
+
self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
|
|
127
|
+
)
|
|
128
|
+
if not response.ok:
|
|
129
|
+
error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
130
|
+
if self.logger:
|
|
131
|
+
self.logger.error(error_msg)
|
|
132
|
+
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
133
|
+
|
|
134
|
+
streaming_response = ""
|
|
135
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
136
|
+
if line:
|
|
137
|
+
if line.startswith("0:"):
|
|
138
|
+
content = line[2:].strip('"')
|
|
139
|
+
streaming_response += content
|
|
140
|
+
yield content if raw else dict(text=content)
|
|
141
|
+
|
|
142
|
+
self.last_response.update(dict(text=streaming_response))
|
|
143
|
+
self.conversation.update_chat_history(
|
|
144
|
+
prompt, self.get_message(self.last_response)
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
def for_non_stream():
|
|
148
|
+
for _ in for_stream():
|
|
149
|
+
pass
|
|
150
|
+
return self.last_response
|
|
151
|
+
|
|
152
|
+
return for_stream() if stream else for_non_stream()
|
|
153
|
+
|
|
154
|
+
def chat(
|
|
155
|
+
self,
|
|
156
|
+
prompt: str,
|
|
157
|
+
stream: bool = False,
|
|
158
|
+
optimizer: str = None,
|
|
159
|
+
conversationally: bool = False,
|
|
160
|
+
) -> str:
|
|
161
|
+
"""Generate response `str`"""
|
|
162
|
+
if self.logger:
|
|
163
|
+
self.logger.debug(f"chat() called with prompt: {prompt}")
|
|
164
|
+
|
|
165
|
+
def for_stream():
|
|
166
|
+
for response in self.ask(
|
|
167
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
168
|
+
):
|
|
169
|
+
yield self.get_message(response)
|
|
170
|
+
|
|
171
|
+
def for_non_stream():
|
|
172
|
+
return self.get_message(
|
|
173
|
+
self.ask(
|
|
174
|
+
prompt,
|
|
175
|
+
False,
|
|
176
|
+
optimizer=optimizer,
|
|
177
|
+
conversationally=conversationally,
|
|
178
|
+
)
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
return for_stream() if stream else for_non_stream()
|
|
182
|
+
|
|
183
|
+
def get_message(self, response: dict) -> str:
|
|
184
|
+
"""Retrieves message only from response"""
|
|
185
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
186
|
+
return response["text"]
|
|
187
|
+
|
|
188
|
+
if __name__ == "__main__":
|
|
189
|
+
from rich import print
|
|
190
|
+
ai = WiseCat()
|
|
191
|
+
response = ai.chat(input(">>> "))
|
|
192
|
+
for chunk in response:
|
|
193
|
+
print(chunk, end="", flush=True)
|