webscout 7.1__py3-none-any.whl → 7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +166 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +460 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/AISEARCH/ISou.py +277 -0
- webscout/Provider/AISEARCH/__init__.py +2 -1
- webscout/Provider/Blackboxai.py +3 -3
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +3 -4
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +40 -24
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/Groq.py +5 -1
- webscout/Provider/Jadve.py +3 -3
- webscout/Provider/Marcus.py +191 -192
- webscout/Provider/Netwrck.py +3 -3
- webscout/Provider/PI.py +2 -2
- webscout/Provider/PizzaGPT.py +2 -3
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +28 -8
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +146 -134
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +2 -3
- webscout/Provider/freeaichat.py +221 -0
- webscout/Provider/gaurish.py +2 -3
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +3 -3
- webscout/Provider/llmchat.py +2 -3
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -331
- webscout/Provider/typegpt.py +359 -359
- webscout/Provider/yep.py +3 -3
- webscout/__init__.py +1 -0
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +87 -6
- webscout/webscout_search_async.py +58 -1
- webscout/yep_search.py +297 -0
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/METADATA +62 -22
- webscout-7.3.dist-info/RECORD +223 -0
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/WHEEL +1 -1
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout-7.1.dist-info/RECORD +0 -198
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/LICENSE.md +0 -0
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/entry_points.txt +0 -0
- {webscout-7.1.dist-info → webscout-7.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
|
|
2
|
+
import requests
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, Dict, Optional, Generator
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
11
|
+
from webscout import LitAgent as Lit
|
|
12
|
+
|
|
13
|
+
class DeepSeek(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the DeepSeek AI API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
AVAILABLE_MODELS = {
|
|
19
|
+
"deepseek-v3": "deepseek-v3",
|
|
20
|
+
"deepseek-r1": "deepseek-r1",
|
|
21
|
+
"deepseek-llm-67b-chat": "deepseek-llm-67b-chat"
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
is_conversation: bool = True,
|
|
27
|
+
max_tokens: int = 600,
|
|
28
|
+
timeout: int = 30,
|
|
29
|
+
intro: str = None,
|
|
30
|
+
filepath: str = None,
|
|
31
|
+
update_file: bool = True,
|
|
32
|
+
proxies: dict = {},
|
|
33
|
+
history_offset: int = 10250,
|
|
34
|
+
act: str = None,
|
|
35
|
+
model: str = "deepseek-r1", # Default model
|
|
36
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
37
|
+
logging: bool = False
|
|
38
|
+
):
|
|
39
|
+
"""
|
|
40
|
+
Initializes the DeepSeek AI API with given parameters.
|
|
41
|
+
"""
|
|
42
|
+
if model not in self.AVAILABLE_MODELS:
|
|
43
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS.keys()}")
|
|
44
|
+
|
|
45
|
+
# Initialize logging
|
|
46
|
+
self.logger = Logger(
|
|
47
|
+
name="DeepSeek",
|
|
48
|
+
format=LogFormat.MODERN_EMOJI,
|
|
49
|
+
) if logging else None
|
|
50
|
+
|
|
51
|
+
if self.logger:
|
|
52
|
+
self.logger.info(f"Initializing DeepSeek with model: {model}")
|
|
53
|
+
|
|
54
|
+
self.session = requests.Session()
|
|
55
|
+
self.is_conversation = is_conversation
|
|
56
|
+
self.max_tokens_to_sample = max_tokens
|
|
57
|
+
self.api_endpoint = "https://www.deepseekapp.io/v1/chat/completions"
|
|
58
|
+
self.timeout = timeout
|
|
59
|
+
self.last_response = {}
|
|
60
|
+
self.system_prompt = system_prompt
|
|
61
|
+
self.model = model
|
|
62
|
+
self.api_key = "skgadi_mare_2_seater"
|
|
63
|
+
self.headers = {
|
|
64
|
+
"Content-Type": "application/json",
|
|
65
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
self.__available_optimizers = (
|
|
69
|
+
method
|
|
70
|
+
for method in dir(Optimizers)
|
|
71
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
|
+
)
|
|
73
|
+
self.session.headers.update(self.headers)
|
|
74
|
+
Conversation.intro = (
|
|
75
|
+
AwesomePrompts().get_act(
|
|
76
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
77
|
+
)
|
|
78
|
+
if act
|
|
79
|
+
else intro or Conversation.intro
|
|
80
|
+
)
|
|
81
|
+
self.conversation = Conversation(
|
|
82
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
83
|
+
)
|
|
84
|
+
self.conversation.history_offset = history_offset
|
|
85
|
+
self.session.proxies = proxies
|
|
86
|
+
|
|
87
|
+
def ask(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
stream: bool = False,
|
|
91
|
+
raw: bool = False,
|
|
92
|
+
optimizer: str = None,
|
|
93
|
+
conversationally: bool = False,
|
|
94
|
+
) -> Dict[str, Any]:
|
|
95
|
+
"""Chat with AI"""
|
|
96
|
+
if self.logger:
|
|
97
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
98
|
+
|
|
99
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
100
|
+
if optimizer:
|
|
101
|
+
if optimizer in self.__available_optimizers:
|
|
102
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
103
|
+
conversation_prompt if conversationally else prompt
|
|
104
|
+
)
|
|
105
|
+
if self.logger:
|
|
106
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
107
|
+
else:
|
|
108
|
+
if self.logger:
|
|
109
|
+
self.logger.error(f"Invalid optimizer: {optimizer}")
|
|
110
|
+
raise Exception(
|
|
111
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
messages = [
|
|
115
|
+
{"role": "system", "content": self.system_prompt},
|
|
116
|
+
{"role": "user", "content": conversation_prompt}
|
|
117
|
+
]
|
|
118
|
+
|
|
119
|
+
payload = {
|
|
120
|
+
"model": self.model,
|
|
121
|
+
"messages": messages
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
def for_stream():
|
|
125
|
+
if self.logger:
|
|
126
|
+
self.logger.debug("Sending streaming request to DeepInfra API...")
|
|
127
|
+
try:
|
|
128
|
+
with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
|
|
129
|
+
if response.status_code != 200:
|
|
130
|
+
if self.logger:
|
|
131
|
+
self.logger.error(f"Request failed with status code {response.status_code}")
|
|
132
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
133
|
+
f"Request failed with status code {response.status_code}"
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
streaming_text = ""
|
|
137
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
138
|
+
if line:
|
|
139
|
+
line = line.strip()
|
|
140
|
+
if line.startswith("data: "):
|
|
141
|
+
json_str = line[6:] # Remove "data: " prefix
|
|
142
|
+
if json_str == "[DONE]":
|
|
143
|
+
break
|
|
144
|
+
try:
|
|
145
|
+
json_data = json.loads(json_str)
|
|
146
|
+
if 'choices' in json_data:
|
|
147
|
+
choice = json_data['choices'][0]
|
|
148
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
149
|
+
content = choice['delta']['content']
|
|
150
|
+
streaming_text += content
|
|
151
|
+
resp = {"text": content}
|
|
152
|
+
yield resp if raw else resp
|
|
153
|
+
except json.JSONDecodeError:
|
|
154
|
+
if self.logger:
|
|
155
|
+
self.logger.error("JSON decode error in streaming data")
|
|
156
|
+
continue
|
|
157
|
+
|
|
158
|
+
self.last_response = {"text": streaming_text}
|
|
159
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
160
|
+
if self.logger:
|
|
161
|
+
self.logger.info("Streaming response completed successfully")
|
|
162
|
+
|
|
163
|
+
except requests.RequestException as e:
|
|
164
|
+
if self.logger:
|
|
165
|
+
self.logger.error(f"Request failed: {e}")
|
|
166
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
167
|
+
|
|
168
|
+
def for_non_stream():
|
|
169
|
+
for _ in for_stream():
|
|
170
|
+
pass
|
|
171
|
+
return self.last_response
|
|
172
|
+
|
|
173
|
+
return for_stream() if stream else for_non_stream()
|
|
174
|
+
|
|
175
|
+
def chat(
|
|
176
|
+
self,
|
|
177
|
+
prompt: str,
|
|
178
|
+
stream: bool = False,
|
|
179
|
+
optimizer: str = None,
|
|
180
|
+
conversationally: bool = False,
|
|
181
|
+
) -> str:
|
|
182
|
+
"""Generate response string"""
|
|
183
|
+
def for_stream():
|
|
184
|
+
for response in self.ask(
|
|
185
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
186
|
+
):
|
|
187
|
+
yield self.get_message(response)
|
|
188
|
+
|
|
189
|
+
def for_non_stream():
|
|
190
|
+
return self.get_message(
|
|
191
|
+
self.ask(
|
|
192
|
+
prompt,
|
|
193
|
+
False,
|
|
194
|
+
optimizer=optimizer,
|
|
195
|
+
conversationally=conversationally,
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
return for_stream() if stream else for_non_stream()
|
|
200
|
+
|
|
201
|
+
def get_message(self, response: dict) -> str:
|
|
202
|
+
"""Retrieves message only from response"""
|
|
203
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
204
|
+
return response["text"]
|
|
205
|
+
|
|
206
|
+
if __name__ == "__main__":
|
|
207
|
+
from rich import print
|
|
208
|
+
|
|
209
|
+
# Example usage
|
|
210
|
+
ai = DeepSeek(system_prompt="You are an expert AI assistant.", logging=True)
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
# Send a prompt and stream the response
|
|
214
|
+
response = ai.chat("Write me a short poem about AI.", stream=True)
|
|
215
|
+
for chunk in response:
|
|
216
|
+
print(chunk, end="", flush=True)
|
|
217
|
+
except Exception as e:
|
|
218
|
+
print(f"Error: {e}")
|
webscout/Provider/Deepinfra.py
CHANGED
|
@@ -9,7 +9,8 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
|
9
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
10
10
|
from webscout import exceptions
|
|
11
11
|
from webscout import LitAgent
|
|
12
|
-
from webscout.Litlogger import
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat, ConsoleHandler
|
|
13
|
+
from webscout.Litlogger.core.level import LogLevel
|
|
13
14
|
|
|
14
15
|
class DeepInfra(Provider):
|
|
15
16
|
"""
|
|
@@ -79,15 +80,20 @@ class DeepInfra(Provider):
|
|
|
79
80
|
)
|
|
80
81
|
self.conversation.history_offset = history_offset
|
|
81
82
|
|
|
82
|
-
# Initialize logger
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
83
|
+
# Initialize logger with proper configuration
|
|
84
|
+
if logging:
|
|
85
|
+
console_handler = ConsoleHandler(
|
|
86
|
+
level=LogLevel.DEBUG,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
self.logger = Logger(
|
|
90
|
+
name="DeepInfra",
|
|
91
|
+
level=LogLevel.DEBUG,
|
|
92
|
+
handlers=[console_handler]
|
|
93
|
+
)
|
|
94
|
+
self.logger.info("DeepInfra initialized successfully ✨")
|
|
95
|
+
else:
|
|
96
|
+
self.logger = None
|
|
91
97
|
|
|
92
98
|
def ask(
|
|
93
99
|
self,
|
|
@@ -97,6 +103,9 @@ class DeepInfra(Provider):
|
|
|
97
103
|
optimizer: str = None,
|
|
98
104
|
conversationally: bool = False,
|
|
99
105
|
) -> Union[Dict[str, Any], Generator]:
|
|
106
|
+
if self.logger:
|
|
107
|
+
self.logger.debug(f"Processing request - Stream: {stream}, Optimizer: {optimizer}")
|
|
108
|
+
|
|
100
109
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
101
110
|
if optimizer:
|
|
102
111
|
if optimizer in self.__available_optimizers:
|
|
@@ -104,7 +113,7 @@ class DeepInfra(Provider):
|
|
|
104
113
|
conversation_prompt if conversationally else prompt
|
|
105
114
|
)
|
|
106
115
|
if self.logger:
|
|
107
|
-
self.logger.
|
|
116
|
+
self.logger.info(f"Applied optimizer: {optimizer} 🔧")
|
|
108
117
|
else:
|
|
109
118
|
if self.logger:
|
|
110
119
|
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
@@ -120,25 +129,30 @@ class DeepInfra(Provider):
|
|
|
120
129
|
"stream": stream
|
|
121
130
|
}
|
|
122
131
|
|
|
132
|
+
if self.logger:
|
|
133
|
+
self.logger.debug(f"Sending request to model: {self.model} 🚀")
|
|
134
|
+
|
|
123
135
|
def for_stream():
|
|
124
136
|
if self.logger:
|
|
125
|
-
self.logger.
|
|
137
|
+
self.logger.info("Starting stream processing ⚡")
|
|
126
138
|
try:
|
|
127
139
|
with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
|
|
128
140
|
if response.status_code != 200:
|
|
129
141
|
if self.logger:
|
|
130
|
-
self.logger.error(f"Request failed with status
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
142
|
+
self.logger.error(f"Request failed with status {response.status_code} ❌")
|
|
143
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
144
|
+
f"Request failed with status code {response.status_code}"
|
|
145
|
+
)
|
|
146
|
+
|
|
135
147
|
streaming_text = ""
|
|
136
148
|
for line in response.iter_lines(decode_unicode=True):
|
|
137
149
|
if line:
|
|
138
150
|
line = line.strip()
|
|
139
151
|
if line.startswith("data: "):
|
|
140
|
-
json_str = line[6:]
|
|
152
|
+
json_str = line[6:]
|
|
141
153
|
if json_str == "[DONE]":
|
|
154
|
+
if self.logger:
|
|
155
|
+
self.logger.info("Stream completed successfully ✅")
|
|
142
156
|
break
|
|
143
157
|
try:
|
|
144
158
|
json_data = json.loads(json_str)
|
|
@@ -151,17 +165,19 @@ class DeepInfra(Provider):
|
|
|
151
165
|
yield resp if raw else resp
|
|
152
166
|
except json.JSONDecodeError:
|
|
153
167
|
if self.logger:
|
|
154
|
-
self.logger.error("
|
|
155
|
-
|
|
168
|
+
self.logger.error("Failed to decode JSON response 🔥")
|
|
169
|
+
continue
|
|
170
|
+
|
|
156
171
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
157
|
-
|
|
158
|
-
self.logger.info("Streaming response completed successfully")
|
|
172
|
+
|
|
159
173
|
except requests.RequestException as e:
|
|
160
174
|
if self.logger:
|
|
161
|
-
self.logger.error(f"Request failed: {e}")
|
|
162
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
175
|
+
self.logger.error(f"Request failed: {str(e)} 🔥")
|
|
176
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
163
177
|
|
|
164
178
|
def for_non_stream():
|
|
179
|
+
if self.logger:
|
|
180
|
+
self.logger.debug("Processing non-stream request")
|
|
165
181
|
for _ in for_stream():
|
|
166
182
|
pass
|
|
167
183
|
return self.last_response
|