webscout 7.0__py3-none-any.whl → 7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +191 -191
- webscout/AIbase.py +122 -122
- webscout/AIutel.py +440 -440
- webscout/Bard.py +343 -161
- webscout/DWEBS.py +489 -492
- webscout/Extra/YTToolkit/YTdownloader.py +995 -995
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +476 -479
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +103 -103
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder_utiles.py +199 -199
- webscout/Extra/autocoder/rawdog.py +5 -7
- webscout/Extra/autollama.py +230 -230
- webscout/Extra/gguf.py +3 -3
- webscout/Extra/weather.py +171 -171
- webscout/LLM.py +442 -442
- webscout/Litlogger/__init__.py +67 -681
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +20 -0
- webscout/Litlogger/core/logger.py +123 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +50 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +174 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +231 -0
- webscout/Litlogger/styles/formats.py +377 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +154 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AISEARCH/DeepFind.py +250 -250
- webscout/Provider/Blackboxai.py +136 -137
- webscout/Provider/ChatGPTGratis.py +226 -0
- webscout/Provider/Cloudflare.py +91 -78
- webscout/Provider/DeepSeek.py +218 -0
- webscout/Provider/Deepinfra.py +59 -35
- webscout/Provider/Free2GPT.py +131 -124
- webscout/Provider/Gemini.py +100 -115
- webscout/Provider/Glider.py +74 -59
- webscout/Provider/Groq.py +30 -18
- webscout/Provider/Jadve.py +108 -77
- webscout/Provider/Llama3.py +117 -94
- webscout/Provider/Marcus.py +191 -137
- webscout/Provider/Netwrck.py +62 -50
- webscout/Provider/PI.py +79 -124
- webscout/Provider/PizzaGPT.py +129 -83
- webscout/Provider/QwenLM.py +311 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +257 -257
- webscout/Provider/TTI/AiForce/sync_aiforce.py +242 -242
- webscout/Provider/TTI/Nexra/__init__.py +22 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +330 -330
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +285 -285
- webscout/Provider/TTI/artbit/__init__.py +22 -22
- webscout/Provider/TTI/artbit/async_artbit.py +184 -184
- webscout/Provider/TTI/artbit/sync_artbit.py +176 -176
- webscout/Provider/TTI/blackbox/__init__.py +4 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +212 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +199 -199
- webscout/Provider/TTI/deepinfra/__init__.py +4 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +227 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +199 -199
- webscout/Provider/TTI/huggingface/__init__.py +22 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -195
- webscout/Provider/TTI/imgninza/__init__.py +4 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +214 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +209 -209
- webscout/Provider/TTI/talkai/__init__.py +4 -4
- webscout/Provider/TTI/talkai/async_talkai.py +229 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -207
- webscout/Provider/TTS/deepgram.py +182 -182
- webscout/Provider/TTS/elevenlabs.py +136 -136
- webscout/Provider/TTS/gesserit.py +150 -150
- webscout/Provider/TTS/murfai.py +138 -138
- webscout/Provider/TTS/parler.py +133 -134
- webscout/Provider/TTS/streamElements.py +360 -360
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TTS/voicepod.py +116 -116
- webscout/Provider/TextPollinationsAI.py +74 -47
- webscout/Provider/WiseCat.py +193 -0
- webscout/Provider/__init__.py +144 -136
- webscout/Provider/cerebras.py +242 -227
- webscout/Provider/chatglm.py +204 -204
- webscout/Provider/dgaf.py +67 -39
- webscout/Provider/gaurish.py +105 -66
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +223 -0
- webscout/Provider/hermes.py +218 -218
- webscout/Provider/llama3mitril.py +179 -179
- webscout/Provider/llamatutor.py +72 -62
- webscout/Provider/llmchat.py +60 -35
- webscout/Provider/meta.py +794 -794
- webscout/Provider/multichat.py +331 -230
- webscout/Provider/typegpt.py +359 -356
- webscout/Provider/yep.py +5 -5
- webscout/__main__.py +5 -5
- webscout/cli.py +319 -319
- webscout/conversation.py +241 -242
- webscout/exceptions.py +328 -328
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +2 -3
- webscout/litprinter/__init__.py +0 -58
- webscout/scout/__init__.py +8 -8
- webscout/scout/core.py +884 -884
- webscout/scout/element.py +459 -459
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +38 -38
- webscout/swiftcli/__init__.py +811 -811
- webscout/update_checker.py +2 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1142 -1140
- webscout/webscout_search_async.py +635 -635
- webscout/zeroart/__init__.py +54 -54
- webscout/zeroart/base.py +60 -60
- webscout/zeroart/effects.py +99 -99
- webscout/zeroart/fonts.py +816 -816
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/METADATA +21 -28
- webscout-7.2.dist-info/RECORD +217 -0
- webstoken/__init__.py +30 -30
- webstoken/classifier.py +189 -189
- webstoken/keywords.py +216 -216
- webstoken/language.py +128 -128
- webstoken/ner.py +164 -164
- webstoken/normalizer.py +35 -35
- webstoken/processor.py +77 -77
- webstoken/sentiment.py +206 -206
- webstoken/stemmer.py +73 -73
- webstoken/tagger.py +60 -60
- webstoken/tokenizer.py +158 -158
- webscout/Provider/RUBIKSAI.py +0 -272
- webscout-7.0.dist-info/RECORD +0 -199
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/LICENSE.md +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/WHEEL +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/entry_points.txt +0 -0
- {webscout-7.0.dist-info → webscout-7.2.dist-info}/top_level.txt +0 -0
webscout/Provider/gaurish.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import requests
|
|
2
2
|
import json
|
|
3
|
-
import
|
|
4
|
-
from typing import Any, Dict, Optional, Generator, List, Union
|
|
3
|
+
from typing import Any, Dict, Generator, Union
|
|
5
4
|
import uuid
|
|
6
5
|
|
|
7
6
|
from webscout.AIutel import Optimizers
|
|
@@ -9,11 +8,12 @@ from webscout.AIutel import Conversation
|
|
|
9
8
|
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
9
|
from webscout.AIbase import Provider, AsyncProvider
|
|
11
10
|
from webscout import exceptions
|
|
12
|
-
|
|
13
11
|
from webscout import LitAgent
|
|
12
|
+
from webscout.Litlogger import Logger, LogFormat
|
|
13
|
+
|
|
14
14
|
class GaurishCerebras(Provider):
|
|
15
15
|
"""
|
|
16
|
-
A class to interact with the Gaurish Cerebras API.
|
|
16
|
+
A class to interact with the Gaurish Cerebras API with comprehensive logging.
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
19
|
def __init__(
|
|
@@ -27,52 +27,39 @@ class GaurishCerebras(Provider):
|
|
|
27
27
|
proxies: dict = {},
|
|
28
28
|
history_offset: int = 10250,
|
|
29
29
|
act: str = None,
|
|
30
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
30
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
31
|
+
logging: bool = False
|
|
31
32
|
):
|
|
32
|
-
"""Initializes the Gaurish Cerebras API client."""
|
|
33
|
-
self.
|
|
33
|
+
"""Initializes the Gaurish Cerebras API client with logging capabilities."""
|
|
34
|
+
self.logger = Logger(
|
|
35
|
+
name="GaurishCerebras",
|
|
36
|
+
format=LogFormat.MODERN_EMOJI,
|
|
37
|
+
) if logging else None
|
|
38
|
+
|
|
39
|
+
if self.logger:
|
|
40
|
+
self.logger.info("Initializing GaurishCerebras client")
|
|
41
|
+
|
|
42
|
+
self.chat_endpoint = "https://proxy.gaurish.xyz/api/cerebras/v1/chat/completions"
|
|
43
|
+
|
|
34
44
|
self.headers = {
|
|
35
45
|
"Content-Type": "application/json",
|
|
36
|
-
"Accept": "
|
|
37
|
-
"
|
|
38
|
-
"access-control-allow-headers": "*",
|
|
39
|
-
"access-control-allow-methods": "*",
|
|
40
|
-
"access-control-allow-origin": "*",
|
|
41
|
-
"cache-control": "public, max-age=0, must-revalidate",
|
|
42
|
-
"referrer-policy": "strict-origin-when-cross-origin",
|
|
43
|
-
"content-type": "text/event-stream; charset=utf-8",
|
|
44
|
-
"strict-transport-security": "max-age=3600; includeSubDomains",
|
|
45
|
-
"x-content-type-options": "nosniff",
|
|
46
|
-
"x-matched-path": "/api/cerebras/[...path]",
|
|
47
|
-
"x-ratelimit-limit-requests-day": "30000",
|
|
48
|
-
"x-ratelimit-limit-tokens-minute": "60000",
|
|
49
|
-
"x-ratelimit-remaining-requests-day": "29984",
|
|
50
|
-
"x-ratelimit-remaining-tokens-minute": "60000",
|
|
51
|
-
"x-ratelimit-reset-requests-day": "24092.23299384117",
|
|
52
|
-
"x-ratelimit-reset-tokens-minute": "32.232993841171265",
|
|
53
|
-
"x-request-id": "0vWYzSEvd9Ytk5Zvl8NGRfT_Ekjm0ErInwwxlihBPyqUBAjJpyXwCg==",
|
|
54
|
-
"x-vercel-id": "bom1::nsbfd-1729703907288-16e74bb1db50",
|
|
55
|
-
"accept": "application/json",
|
|
46
|
+
"Accept": "application/json",
|
|
47
|
+
"authorization": "Bearer 123",
|
|
56
48
|
"accept-encoding": "gzip, deflate, br, zstd",
|
|
57
49
|
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
58
50
|
"dnt": "1",
|
|
59
51
|
"origin": "https://chat.gaurish.xyz",
|
|
60
52
|
"priority": "u=1, i",
|
|
61
53
|
"referer": "https://chat.gaurish.xyz/",
|
|
62
|
-
"sec-ch-ua": "
|
|
54
|
+
"sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
|
|
63
55
|
"sec-ch-ua-mobile": "?0",
|
|
64
|
-
"sec-ch-ua-platform": "
|
|
56
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
65
57
|
"sec-fetch-dest": "empty",
|
|
66
58
|
"sec-fetch-mode": "cors",
|
|
67
59
|
"sec-fetch-site": "same-site",
|
|
68
|
-
"user-agent":
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
"x-stainless-os": "Unknown",
|
|
72
|
-
"x-stainless-package-version": "4.67.3",
|
|
73
|
-
"x-stainless-retry-count": "0",
|
|
74
|
-
"x-stainless-runtime": "browser:chrome",
|
|
75
|
-
"x-stainless-runtime-version": "130.0.0",
|
|
60
|
+
"user-agent": ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
61
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
62
|
+
"Chrome/132.0.0.0 Safari/537.36 Edg/132.0.0.0"),
|
|
76
63
|
}
|
|
77
64
|
self.session = requests.Session()
|
|
78
65
|
self.session.headers.update(self.headers)
|
|
@@ -82,6 +69,11 @@ class GaurishCerebras(Provider):
|
|
|
82
69
|
|
|
83
70
|
self.is_conversation = is_conversation
|
|
84
71
|
self.max_tokens_to_sample = max_tokens
|
|
72
|
+
|
|
73
|
+
if self.logger:
|
|
74
|
+
self.logger.debug(f"Session configured with timeout: {timeout}")
|
|
75
|
+
self.logger.debug(f"Max tokens set to: {max_tokens}")
|
|
76
|
+
|
|
85
77
|
self.__available_optimizers = (
|
|
86
78
|
method
|
|
87
79
|
for method in dir(Optimizers)
|
|
@@ -98,8 +90,10 @@ class GaurishCerebras(Provider):
|
|
|
98
90
|
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
99
91
|
)
|
|
100
92
|
self.conversation.history_offset = history_offset
|
|
101
|
-
self.system_prompt = system_prompt
|
|
93
|
+
self.system_prompt = system_prompt
|
|
102
94
|
|
|
95
|
+
if self.logger:
|
|
96
|
+
self.logger.info("GaurishCerebras initialization completed successfully")
|
|
103
97
|
|
|
104
98
|
def ask(
|
|
105
99
|
self,
|
|
@@ -109,6 +103,13 @@ class GaurishCerebras(Provider):
|
|
|
109
103
|
optimizer: str = None,
|
|
110
104
|
conversationally: bool = False,
|
|
111
105
|
) -> Union[Dict, Generator]:
|
|
106
|
+
"""
|
|
107
|
+
Sends a prompt to the API and returns the response with logging.
|
|
108
|
+
If stream is True, returns a generator for streamed responses.
|
|
109
|
+
"""
|
|
110
|
+
if self.logger:
|
|
111
|
+
self.logger.debug(f"Processing request - Prompt: {prompt[:50]}...")
|
|
112
|
+
self.logger.debug(f"Stream: {stream}, Optimizer: {optimizer}")
|
|
112
113
|
|
|
113
114
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
115
|
if optimizer:
|
|
@@ -116,57 +117,81 @@ class GaurishCerebras(Provider):
|
|
|
116
117
|
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
118
|
conversation_prompt if conversationally else prompt
|
|
118
119
|
)
|
|
120
|
+
if self.logger:
|
|
121
|
+
self.logger.debug(f"Applied optimizer: {optimizer}")
|
|
119
122
|
else:
|
|
120
|
-
|
|
123
|
+
if self.logger:
|
|
124
|
+
self.logger.error(f"Invalid optimizer requested: {optimizer}")
|
|
125
|
+
raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
121
126
|
|
|
122
127
|
payload = {
|
|
123
128
|
"messages": [
|
|
124
129
|
{"role": "system", "content": self.system_prompt},
|
|
125
130
|
{"role": "user", "content": conversation_prompt},
|
|
126
131
|
],
|
|
127
|
-
"model": "llama3.
|
|
132
|
+
"model": "llama3.3-70b",
|
|
133
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
128
134
|
"temperature": 0.75,
|
|
129
135
|
"stream": stream,
|
|
130
136
|
}
|
|
131
137
|
|
|
132
138
|
def for_stream():
|
|
133
139
|
try:
|
|
134
|
-
|
|
135
|
-
|
|
140
|
+
if self.logger:
|
|
141
|
+
self.logger.debug("Initiating streaming request to API")
|
|
142
|
+
|
|
143
|
+
with self.session.post(self.chat_endpoint, json=payload, stream=True, timeout=self.timeout) as response:
|
|
144
|
+
if response.status_code != 200:
|
|
145
|
+
if self.logger:
|
|
146
|
+
self.logger.error(f"API request failed. Status: {response.status_code}")
|
|
147
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
148
|
+
f"Request failed with status code {response.status_code}"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
if self.logger:
|
|
152
|
+
self.logger.info(f"API connection established successfully. Status: {response.status_code}")
|
|
153
|
+
|
|
136
154
|
streaming_text = ""
|
|
137
155
|
for line in response.iter_lines(decode_unicode=True):
|
|
138
156
|
if line:
|
|
139
157
|
line = line.strip()
|
|
140
158
|
if line.startswith("data: "):
|
|
141
|
-
|
|
142
|
-
if
|
|
159
|
+
json_str = line[6:]
|
|
160
|
+
if json_str == "[DONE]":
|
|
161
|
+
if self.logger:
|
|
162
|
+
self.logger.debug("Stream completed")
|
|
143
163
|
break
|
|
144
164
|
try:
|
|
145
|
-
|
|
146
|
-
if
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
165
|
+
json_data = json.loads(json_str)
|
|
166
|
+
if 'choices' in json_data:
|
|
167
|
+
choice = json_data['choices'][0]
|
|
168
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
169
|
+
content = choice['delta']['content']
|
|
170
|
+
streaming_text += content
|
|
171
|
+
yield dict(text=content) if raw else dict(text=content)
|
|
172
|
+
except json.JSONDecodeError as e:
|
|
173
|
+
if self.logger:
|
|
174
|
+
self.logger.error(f"JSON parsing error: {str(e)}")
|
|
153
175
|
pass
|
|
176
|
+
|
|
154
177
|
self.conversation.update_chat_history(prompt, streaming_text)
|
|
155
|
-
self.
|
|
178
|
+
if self.logger:
|
|
179
|
+
self.logger.debug("Response processing completed")
|
|
156
180
|
|
|
157
|
-
except requests.
|
|
181
|
+
except requests.RequestException as e:
|
|
182
|
+
if self.logger:
|
|
183
|
+
self.logger.error(f"Request failed: {str(e)}")
|
|
158
184
|
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
159
185
|
|
|
160
|
-
|
|
161
186
|
def for_non_stream():
|
|
187
|
+
if self.logger:
|
|
188
|
+
self.logger.debug("Processing non-streaming request")
|
|
162
189
|
for _ in for_stream():
|
|
163
190
|
pass
|
|
164
191
|
return self.last_response
|
|
165
192
|
|
|
166
193
|
return for_stream() if stream else for_non_stream()
|
|
167
194
|
|
|
168
|
-
|
|
169
|
-
|
|
170
195
|
def chat(
|
|
171
196
|
self,
|
|
172
197
|
prompt: str,
|
|
@@ -174,33 +199,47 @@ class GaurishCerebras(Provider):
|
|
|
174
199
|
optimizer: str = None,
|
|
175
200
|
conversationally: bool = False,
|
|
176
201
|
) -> Union[str, Generator]:
|
|
202
|
+
"""
|
|
203
|
+
A convenience method to return just the text message from the response with logging.
|
|
204
|
+
"""
|
|
205
|
+
if self.logger:
|
|
206
|
+
self.logger.debug(f"Chat request initiated - Prompt: {prompt[:50]}...")
|
|
177
207
|
|
|
178
208
|
def for_stream():
|
|
179
209
|
for response in self.ask(
|
|
180
210
|
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
181
211
|
):
|
|
182
|
-
yield self.get_message(response)
|
|
212
|
+
yield response if isinstance(response, str) else self.get_message(response)
|
|
183
213
|
|
|
184
214
|
def for_non_stream():
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
188
|
-
)
|
|
215
|
+
resp = self.ask(
|
|
216
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
189
217
|
)
|
|
218
|
+
return resp if isinstance(resp, str) else self.get_message(resp)
|
|
190
219
|
|
|
191
220
|
return for_stream() if stream else for_non_stream()
|
|
192
221
|
|
|
193
222
|
def get_message(self, response: dict) -> str:
|
|
194
|
-
|
|
195
|
-
|
|
223
|
+
"""
|
|
224
|
+
Retrieve the message text from the API response with logging.
|
|
225
|
+
"""
|
|
226
|
+
if not isinstance(response, dict):
|
|
227
|
+
if self.logger:
|
|
228
|
+
self.logger.warning("Invalid response format received")
|
|
229
|
+
return ""
|
|
196
230
|
|
|
231
|
+
if "text" in response and response["text"]:
|
|
232
|
+
return response["text"]
|
|
197
233
|
|
|
234
|
+
if self.logger:
|
|
235
|
+
self.logger.warning("No valid message content found in response")
|
|
236
|
+
return ""
|
|
198
237
|
|
|
199
238
|
if __name__ == "__main__":
|
|
200
239
|
from rich import print
|
|
201
|
-
bot = GaurishCerebras()
|
|
240
|
+
bot = GaurishCerebras(logging=True)
|
|
202
241
|
try:
|
|
203
|
-
response = bot.chat("
|
|
242
|
+
response = bot.chat("what is meaning of life", stream=True)
|
|
204
243
|
for chunk in response:
|
|
205
244
|
print(chunk, end="", flush=True)
|
|
206
245
|
except Exception as e:
|