webscout 7.1__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +3 -3
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +3 -4
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +3 -3
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/Groq.py +5 -1
- webscout/Provider/Jadve.py +3 -3
- webscout/Provider/Marcus.py +191 -192
- webscout/Provider/Netwrck.py +3 -3
- webscout/Provider/PI.py +2 -2
- webscout/Provider/PizzaGPT.py +2 -3
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +2 -3
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -134
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +2 -3
- webscout/Provider/gaurish.py +2 -3
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +3 -3
- webscout/Provider/llmchat.py +2 -3
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -331
- webscout/Provider/typegpt.py +359 -359
- webscout/Provider/yep.py +2 -2
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +5 -4
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/METADATA +4 -3
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout-7.1.dist-info/RECORD +0 -198
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.1.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
from typing import Any, Dict, Generator, Optional
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
|
+
from webscout.AIbase import Provider
|
|
7
|
+
from webscout import exceptions
|
|
8
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
9
|
+
from webscout import LitAgent as Lit
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ChatGPTGratis(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the chatgptgratis.eu backend API with logging and real-time streaming.
|
|
15
|
+
"""
|
|
16
|
+
AVAILABLE_MODELS = [
|
|
17
|
+
"Meta-Llama-3.2-1B-Instruct",
|
|
18
|
+
"Meta-Llama-3.2-3B-Instruct",
|
|
19
|
+
"Meta-Llama-3.1-8B-Instruct",
|
|
20
|
+
"Meta-Llama-3.1-70B-Instruct",
|
|
21
|
+
"Meta-Llama-3.1-405B-Instruct",
|
|
22
|
+
"gpt4o"
|
|
23
|
+
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
model: str = "gpt4o",
|
|
29
|
+
timeout: int = 30,
|
|
30
|
+
logging: bool = False,
|
|
31
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
32
|
+
intro: Optional[str] = None,
|
|
33
|
+
filepath: Optional[str] = None,
|
|
34
|
+
update_file: bool = True,
|
|
35
|
+
history_offset: int = 10250,
|
|
36
|
+
act: Optional[str] = None,
|
|
37
|
+
) -> None:
|
|
38
|
+
"""
|
|
39
|
+
Initializes the ChatGPTGratis.
|
|
40
|
+
"""
|
|
41
|
+
if model not in self.AVAILABLE_MODELS:
|
|
42
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
43
|
+
|
|
44
|
+
self.logger = Logger(
|
|
45
|
+
name="ChatGPTGratis",
|
|
46
|
+
format=LogFormat.MODERN_EMOJI,
|
|
47
|
+
) if logging else None
|
|
48
|
+
|
|
49
|
+
if self.logger:
|
|
50
|
+
self.logger.info(f"Initializing ChatGPTGratis with model: {model}")
|
|
51
|
+
|
|
52
|
+
self.session = requests.Session()
|
|
53
|
+
self.timeout = timeout
|
|
54
|
+
self.api_endpoint = "https://chatgptgratis.eu/backend/chat.php"
|
|
55
|
+
self.model = model
|
|
56
|
+
|
|
57
|
+
# Set up headers similar to a browser request with dynamic User-Agent
|
|
58
|
+
self.headers = {
|
|
59
|
+
"Accept": "*/*",
|
|
60
|
+
"Content-Type": "application/json",
|
|
61
|
+
"Origin": "https://chatgptgratis.eu",
|
|
62
|
+
"Referer": "https://chatgptgratis.eu/chat.html",
|
|
63
|
+
"User-Agent": Lit().random(),
|
|
64
|
+
}
|
|
65
|
+
self.session.headers.update(self.headers)
|
|
66
|
+
self.session.proxies = proxies or {}
|
|
67
|
+
|
|
68
|
+
# Set up conversation history and prompts
|
|
69
|
+
Conversation.intro = (
|
|
70
|
+
AwesomePrompts().get_act(
|
|
71
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
72
|
+
)
|
|
73
|
+
if act
|
|
74
|
+
else intro or Conversation.intro
|
|
75
|
+
)
|
|
76
|
+
self.conversation = Conversation(
|
|
77
|
+
True, 8096, filepath, update_file
|
|
78
|
+
)
|
|
79
|
+
self.conversation.history_offset = history_offset
|
|
80
|
+
|
|
81
|
+
if self.logger:
|
|
82
|
+
self.logger.info("ChatGPTGratis initialized successfully.")
|
|
83
|
+
|
|
84
|
+
def ask(
|
|
85
|
+
self,
|
|
86
|
+
prompt: str,
|
|
87
|
+
stream: bool = False,
|
|
88
|
+
raw: bool = False,
|
|
89
|
+
optimizer: Optional[str] = None,
|
|
90
|
+
conversationally: bool = False,
|
|
91
|
+
) -> Dict[str, Any] | Generator[Dict[str, Any], None, None]:
|
|
92
|
+
"""
|
|
93
|
+
Sends a request to the API and returns the response.
|
|
94
|
+
If stream is True, yields response chunks as they are received.
|
|
95
|
+
"""
|
|
96
|
+
if self.logger:
|
|
97
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
98
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
99
|
+
|
|
100
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
101
|
+
if optimizer:
|
|
102
|
+
available_opts = (
|
|
103
|
+
method for method in dir(Optimizers)
|
|
104
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
105
|
+
)
|
|
106
|
+
if optimizer in available_opts:
|
|
107
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
108
|
+
conversation_prompt if conversationally else prompt
|
|
109
|
+
)
|
|
110
|
+
if self.logger:
|
|
111
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
112
|
+
else:
|
|
113
|
+
if self.logger:
|
|
114
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
115
|
+
raise Exception(f"Optimizer is not one of {list(available_opts)}")
|
|
116
|
+
|
|
117
|
+
payload = {
|
|
118
|
+
"message": conversation_prompt,
|
|
119
|
+
"model": self.model,
|
|
120
|
+
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
def for_stream() -> Generator[Dict[str, Any], None, None]:
|
|
124
|
+
if self.logger:
|
|
125
|
+
self.logger.debug("Initiating streaming request to API")
|
|
126
|
+
response = self.session.post(
|
|
127
|
+
self.api_endpoint,
|
|
128
|
+
json=payload,
|
|
129
|
+
stream=True,
|
|
130
|
+
timeout=self.timeout
|
|
131
|
+
)
|
|
132
|
+
if not response.ok:
|
|
133
|
+
if self.logger:
|
|
134
|
+
self.logger.error(
|
|
135
|
+
f"API request failed. Status: {response.status_code}, Reason: {response.reason}"
|
|
136
|
+
)
|
|
137
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
138
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
139
|
+
)
|
|
140
|
+
if self.logger:
|
|
141
|
+
self.logger.info(f"API connection established. Status: {response.status_code}")
|
|
142
|
+
|
|
143
|
+
full_response = ""
|
|
144
|
+
for line in response.iter_lines():
|
|
145
|
+
if line:
|
|
146
|
+
line_decoded = line.decode('utf-8').strip()
|
|
147
|
+
if line_decoded == "data: [DONE]":
|
|
148
|
+
if self.logger:
|
|
149
|
+
self.logger.debug("Stream completed.")
|
|
150
|
+
break
|
|
151
|
+
if line_decoded.startswith("data: "):
|
|
152
|
+
try:
|
|
153
|
+
json_data = json.loads(line_decoded[6:])
|
|
154
|
+
choices = json_data.get("choices", [])
|
|
155
|
+
if choices and "delta" in choices[0]:
|
|
156
|
+
content = choices[0]["delta"].get("content", "")
|
|
157
|
+
else:
|
|
158
|
+
content = ""
|
|
159
|
+
full_response += content
|
|
160
|
+
yield content if raw else {"text": content}
|
|
161
|
+
except json.JSONDecodeError as e:
|
|
162
|
+
if self.logger:
|
|
163
|
+
self.logger.error(f"JSON parsing error: {str(e)}")
|
|
164
|
+
continue
|
|
165
|
+
# Update last response and conversation history.
|
|
166
|
+
self.conversation.update_chat_history(prompt, self.get_message({"text": full_response}))
|
|
167
|
+
if self.logger:
|
|
168
|
+
self.logger.debug("Response processing completed.")
|
|
169
|
+
|
|
170
|
+
def for_non_stream() -> Dict[str, Any]:
|
|
171
|
+
if self.logger:
|
|
172
|
+
self.logger.debug("Processing non-streaming request")
|
|
173
|
+
collected = ""
|
|
174
|
+
for chunk in for_stream():
|
|
175
|
+
collected += chunk["text"] if isinstance(chunk, dict) else chunk
|
|
176
|
+
return {"text": collected}
|
|
177
|
+
|
|
178
|
+
return for_stream() if stream else for_non_stream()
|
|
179
|
+
|
|
180
|
+
def chat(
|
|
181
|
+
self,
|
|
182
|
+
prompt: str,
|
|
183
|
+
stream: bool = False,
|
|
184
|
+
optimizer: Optional[str] = None,
|
|
185
|
+
conversationally: bool = False,
|
|
186
|
+
) -> str | Generator[str, None, None]:
|
|
187
|
+
"""
|
|
188
|
+
Returns the response as a string.
|
|
189
|
+
For streaming requests, yields each response chunk as a string.
|
|
190
|
+
"""
|
|
191
|
+
if self.logger:
|
|
192
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
193
|
+
|
|
194
|
+
def stream_response() -> Generator[str, None, None]:
|
|
195
|
+
for response in self.ask(
|
|
196
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
197
|
+
):
|
|
198
|
+
yield self.get_message(response)
|
|
199
|
+
|
|
200
|
+
def non_stream_response() -> str:
|
|
201
|
+
return self.get_message(self.ask(
|
|
202
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
203
|
+
))
|
|
204
|
+
|
|
205
|
+
return stream_response() if stream else non_stream_response()
|
|
206
|
+
|
|
207
|
+
def get_message(self, response: dict) -> str:
|
|
208
|
+
"""
|
|
209
|
+
Extracts and returns the text message from the response dictionary.
|
|
210
|
+
"""
|
|
211
|
+
assert isinstance(response, dict), "Response must be a dictionary."
|
|
212
|
+
return response.get("text", "")
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
if __name__ == "__main__":
|
|
216
|
+
from rich import print
|
|
217
|
+
|
|
218
|
+
# Create an instance of the ChatGPTGratis with logging enabled for testing.
|
|
219
|
+
client = ChatGPTGratis(
|
|
220
|
+
model="Meta-Llama-3.2-1B-Instruct",
|
|
221
|
+
logging=False
|
|
222
|
+
)
|
|
223
|
+
prompt_input = input(">>> ")
|
|
224
|
+
response = client.chat(prompt_input, stream=True)
|
|
225
|
+
for chunk in response:
|
|
226
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/Cloudflare.py
CHANGED
|
@@ -9,12 +9,12 @@ from webscout import exceptions
|
|
|
9
9
|
from typing import Any, AsyncGenerator, Dict
|
|
10
10
|
import cloudscraper
|
|
11
11
|
from webscout import LitAgent
|
|
12
|
-
from webscout.Litlogger import
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
13
13
|
|
|
14
14
|
class Cloudflare(Provider):
|
|
15
15
|
"""
|
|
16
16
|
Cloudflare provider to interact with Cloudflare's text generation API.
|
|
17
|
-
Includes logging capabilities using
|
|
17
|
+
Includes logging capabilities using Logger and uses LitAgent for user-agent.
|
|
18
18
|
"""
|
|
19
19
|
|
|
20
20
|
# Updated AVAILABLE_MODELS from given JSON data
|
|
@@ -145,10 +145,9 @@ class Cloudflare(Provider):
|
|
|
145
145
|
self.conversation.history_offset = history_offset
|
|
146
146
|
|
|
147
147
|
# Initialize logger if logging is enabled
|
|
148
|
-
self.logger =
|
|
148
|
+
self.logger = Logger(
|
|
149
149
|
name="Cloudflare",
|
|
150
150
|
format=LogFormat.MODERN_EMOJI,
|
|
151
|
-
color_scheme=ColorScheme.CYBERPUNK
|
|
152
151
|
) if logging else None
|
|
153
152
|
|
|
154
153
|
if self.logger:
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, Dict, Optional, Generator
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
11
|
+
from webscout import LitAgent as Lit
|
|
12
|
+
|
|
13
|
+
class DeepSeek(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the DeepSeek AI API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
AVAILABLE_MODELS = {
|
|
19
|
+
"deepseek-v3": "deepseek-v3",
|
|
20
|
+
"deepseek-r1": "deepseek-r1",
|
|
21
|
+
"deepseek-llm-67b-chat": "deepseek-llm-67b-chat"
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
is_conversation: bool = True,
|
|
27
|
+
max_tokens: int = 600,
|
|
28
|
+
timeout: int = 30,
|
|
29
|
+
intro: str = None,
|
|
30
|
+
filepath: str = None,
|
|
31
|
+
update_file: bool = True,
|
|
32
|
+
proxies: dict = {},
|
|
33
|
+
history_offset: int = 10250,
|
|
34
|
+
act: str = None,
|
|
35
|
+
model: str = "deepseek-r1", # Default model
|
|
36
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
37
|
+
logging: bool = False
|
|
38
|
+
):
|
|
39
|
+
"""
|
|
40
|
+
Initializes the DeepSeek AI API with given parameters.
|
|
41
|
+
"""
|
|
42
|
+
if model not in self.AVAILABLE_MODELS:
|
|
43
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS.keys()}")
|
|
44
|
+
|
|
45
|
+
# Initialize logging
|
|
46
|
+
self.logger = Logger(
|
|
47
|
+
name="DeepSeek",
|
|
48
|
+
format=LogFormat.MODERN_EMOJI,
|
|
49
|
+
) if logging else None
|
|
50
|
+
|
|
51
|
+
if self.logger:
|
|
52
|
+
self.logger.info(f"Initializing DeepSeek with model: {model}")
|
|
53
|
+
|
|
54
|
+
self.session = requests.Session()
|
|
55
|
+
self.is_conversation = is_conversation
|
|
56
|
+
self.max_tokens_to_sample = max_tokens
|
|
57
|
+
self.api_endpoint = "https://www.deepseekapp.io/v1/chat/completions"
|
|
58
|
+
self.timeout = timeout
|
|
59
|
+
self.last_response = {}
|
|
60
|
+
self.system_prompt = system_prompt
|
|
61
|
+
self.model = model
|
|
62
|
+
self.api_key = "skgadi_mare_2_seater"
|
|
63
|
+
self.headers = {
|
|
64
|
+
"Content-Type": "application/json",
|
|
65
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
self.__available_optimizers = (
|
|
69
|
+
method
|
|
70
|
+
for method in dir(Optimizers)
|
|
71
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
|
+
)
|
|
73
|
+
self.session.headers.update(self.headers)
|
|
74
|
+
Conversation.intro = (
|
|
75
|
+
AwesomePrompts().get_act(
|
|
76
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
77
|
+
)
|
|
78
|
+
if act
|
|
79
|
+
else intro or Conversation.intro
|
|
80
|
+
)
|
|
81
|
+
self.conversation = Conversation(
|
|
82
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
83
|
+
)
|
|
84
|
+
self.conversation.history_offset = history_offset
|
|
85
|
+
self.session.proxies = proxies
|
|
86
|
+
|
|
87
|
+
def ask(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
stream: bool = False,
|
|
91
|
+
raw: bool = False,
|
|
92
|
+
optimizer: str = None,
|
|
93
|
+
conversationally: bool = False,
|
|
94
|
+
) -> Dict[str, Any]:
|
|
95
|
+
"""Chat with AI"""
|
|
96
|
+
if self.logger:
|
|
97
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
98
|
+
|
|
99
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
100
|
+
if optimizer:
|
|
101
|
+
if optimizer in self.__available_optimizers:
|
|
102
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
103
|
+
conversation_prompt if conversationally else prompt
|
|
104
|
+
)
|
|
105
|
+
if self.logger:
|
|
106
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
107
|
+
else:
|
|
108
|
+
if self.logger:
|
|
109
|
+
self.logger.error(f"Invalid optimizer: {optimizer}")
|
|
110
|
+
raise Exception(
|
|
111
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
messages = [
|
|
115
|
+
{"role": "system", "content": self.system_prompt},
|
|
116
|
+
{"role": "user", "content": conversation_prompt}
|
|
117
|
+
]
|
|
118
|
+
|
|
119
|
+
payload = {
|
|
120
|
+
"model": self.model,
|
|
121
|
+
"messages": messages
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
def for_stream():
|
|
125
|
+
if self.logger:
|
|
126
|
+
self.logger.debug("Sending streaming request to DeepInfra API...")
|
|
127
|
+
try:
|
|
128
|
+
with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
129
|
+
if response.status_code != 200:
|
|
130
|
+
if self.logger:
|
|
131
|
+
self.logger.error(f"Request failed with status code {response.status_code}")
|
|
132
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
133
|
+
f"Request failed with status code {response.status_code}"
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
streaming_text = ""
|
|
137
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
138
|
+
if line:
|
|
139
|
+
line = line.strip()
|
|
140
|
+
if line.startswith("data: "):
|
|
141
|
+
json_str = line[6:] # Remove "data: " prefix
|
|
142
|
+
if json_str == "[DONE]":
|
|
143
|
+
break
|
|
144
|
+
try:
|
|
145
|
+
json_data = json.loads(json_str)
|
|
146
|
+
if 'choices' in json_data:
|
|
147
|
+
choice = json_data['choices'][0]
|
|
148
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
149
|
+
content = choice['delta']['content']
|
|
150
|
+
streaming_text += content
|
|
151
|
+
resp = {"text": content}
|
|
152
|
+
yield resp if raw else resp
|
|
153
|
+
except json.JSONDecodeError:
|
|
154
|
+
if self.logger:
|
|
155
|
+
self.logger.error("JSON decode error in streaming data")
|
|
156
|
+
continue
|
|
157
|
+
|
|
158
|
+
self.last_response = {"text": streaming_text}
|
|
159
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
160
|
+
if self.logger:
|
|
161
|
+
self.logger.info("Streaming response completed successfully")
|
|
162
|
+
|
|
163
|
+
except requests.RequestException as e:
|
|
164
|
+
if self.logger:
|
|
165
|
+
self.logger.error(f"Request failed: {e}")
|
|
166
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
167
|
+
|
|
168
|
+
def for_non_stream():
|
|
169
|
+
for _ in for_stream():
|
|
170
|
+
pass
|
|
171
|
+
return self.last_response
|
|
172
|
+
|
|
173
|
+
return for_stream() if stream else for_non_stream()
|
|
174
|
+
|
|
175
|
+
def chat(
|
|
176
|
+
self,
|
|
177
|
+
prompt: str,
|
|
178
|
+
stream: bool = False,
|
|
179
|
+
optimizer: str = None,
|
|
180
|
+
conversationally: bool = False,
|
|
181
|
+
) -> str:
|
|
182
|
+
"""Generate response string"""
|
|
183
|
+
def for_stream():
|
|
184
|
+
for response in self.ask(
|
|
185
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
186
|
+
):
|
|
187
|
+
yield self.get_message(response)
|
|
188
|
+
|
|
189
|
+
def for_non_stream():
|
|
190
|
+
return self.get_message(
|
|
191
|
+
self.ask(
|
|
192
|
+
prompt,
|
|
193
|
+
False,
|
|
194
|
+
optimizer=optimizer,
|
|
195
|
+
conversationally=conversationally,
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
return for_stream() if stream else for_non_stream()
|
|
200
|
+
|
|
201
|
+
def get_message(self, response: dict) -> str:
|
|
202
|
+
"""Retrieves message only from response"""
|
|
203
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
204
|
+
return response["text"]
|
|
205
|
+
|
|
206
|
+
if __name__ == "__main__":
|
|
207
|
+
from rich import print
|
|
208
|
+
|
|
209
|
+
# Example usage
|
|
210
|
+
ai = DeepSeek(system_prompt="You are an expert AI assistant.", logging=True)
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
# Send a prompt and stream the response
|
|
214
|
+
response = ai.chat("Write me a short poem about AI.", stream=True)
|
|
215
|
+
for chunk in response:
|
|
216
|
+
print(chunk, end="", flush=True)
|
|
217
|
+
except Exception as e:
|
|
218
|
+
print(f"Error: {e}")
|
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -9,7 +9,7 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
|
9
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
10
|
from webscout import exceptions
|
|
11
11
|
from webscout import LitAgent
|
|
12
|
-
from webscout.Litlogger import
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
13
13
|
|
|
14
14
|
class DeepInfra(Provider):
|
|
15
15
|
"""
|
|
@@ -80,10 +80,10 @@ class DeepInfra(Provider):
|
|
|
80
80
|
self.conversation.history_offset = history_offset
|
|
81
81
|
|
|
82
82
|
# Initialize logger if enabled
|
|
83
|
-
self.logger =
|
|
83
|
+
self.logger = Logger(
|
|
84
84
|
name="DeepInfra",
|
|
85
85
|
format=LogFormat.MODERN_EMOJI,
|
|
86
|
-
|
|
86
|
+
|
|
87
87
|
) if logging else None
|
|
88
88
|
|
|
89
89
|
if self.logger:
|