webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/Venice.py
DELETED
|
@@ -1,219 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
from typing import Generator, Dict, Any, List, Union
|
|
4
|
-
from uuid import uuid4
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
from webscout.litagent import LitAgent
|
|
12
|
-
|
|
13
|
-
class Venice(Provider):
|
|
14
|
-
"""
|
|
15
|
-
A class to interact with the Venice AI API.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
AVAILABLE_MODELS = [
|
|
19
|
-
"mistral-31-24b",
|
|
20
|
-
"llama-3.2-3b-akash",
|
|
21
|
-
"qwen2dot5-coder-32b",
|
|
22
|
-
"deepseek-coder-v2-lite",
|
|
23
|
-
|
|
24
|
-
]
|
|
25
|
-
|
|
26
|
-
def __init__(
|
|
27
|
-
self,
|
|
28
|
-
is_conversation: bool = True,
|
|
29
|
-
max_tokens: int = 2000,
|
|
30
|
-
timeout: int = 30,
|
|
31
|
-
temperature: float = 0.8,
|
|
32
|
-
top_p: float = 0.9,
|
|
33
|
-
intro: str = None,
|
|
34
|
-
filepath: str = None,
|
|
35
|
-
update_file: bool = True,
|
|
36
|
-
proxies: dict = {},
|
|
37
|
-
history_offset: int = 10250,
|
|
38
|
-
act: str = None,
|
|
39
|
-
model: str = "llama-3.3-70b",
|
|
40
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
41
|
-
):
|
|
42
|
-
"""Initialize Venice AI client"""
|
|
43
|
-
if model not in self.AVAILABLE_MODELS:
|
|
44
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
45
|
-
|
|
46
|
-
self.api_endpoint = "https://venice.ai/api/inference/chat"
|
|
47
|
-
self.session = requests.Session()
|
|
48
|
-
self.is_conversation = is_conversation
|
|
49
|
-
self.max_tokens_to_sample = max_tokens
|
|
50
|
-
self.temperature = temperature
|
|
51
|
-
self.top_p = top_p
|
|
52
|
-
self.timeout = timeout
|
|
53
|
-
self.model = model
|
|
54
|
-
self.system_prompt = system_prompt
|
|
55
|
-
self.last_response = {}
|
|
56
|
-
|
|
57
|
-
# Headers for the request
|
|
58
|
-
self.headers = {
|
|
59
|
-
"User-Agent": LitAgent().random(),
|
|
60
|
-
"accept": "*/*",
|
|
61
|
-
"accept-language": "en-US,en;q=0.9",
|
|
62
|
-
"content-type": "application/json",
|
|
63
|
-
"origin": "https://venice.ai",
|
|
64
|
-
"referer": "https://venice.ai/chat/",
|
|
65
|
-
"sec-ch-ua": '"Google Chrome";v="133", "Chromium";v="133", "Not?A_Brand";v="24"',
|
|
66
|
-
"sec-ch-ua-mobile": "?0",
|
|
67
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
68
|
-
"sec-fetch-dest": "empty",
|
|
69
|
-
"sec-fetch-mode": "cors",
|
|
70
|
-
"sec-fetch-site": "same-origin"
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
self.session.headers.update(self.headers)
|
|
74
|
-
self.session.proxies.update(proxies)
|
|
75
|
-
|
|
76
|
-
self.__available_optimizers = (
|
|
77
|
-
method
|
|
78
|
-
for method in dir(Optimizers)
|
|
79
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
80
|
-
)
|
|
81
|
-
Conversation.intro = (
|
|
82
|
-
AwesomePrompts().get_act(
|
|
83
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
84
|
-
)
|
|
85
|
-
if act
|
|
86
|
-
else intro or Conversation.intro
|
|
87
|
-
)
|
|
88
|
-
|
|
89
|
-
self.conversation = Conversation(
|
|
90
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
91
|
-
)
|
|
92
|
-
self.conversation.history_offset = history_offset
|
|
93
|
-
|
|
94
|
-
def ask(
|
|
95
|
-
self,
|
|
96
|
-
prompt: str,
|
|
97
|
-
stream: bool = False,
|
|
98
|
-
raw: bool = False,
|
|
99
|
-
optimizer: str = None,
|
|
100
|
-
conversationally: bool = False,
|
|
101
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
102
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
103
|
-
if optimizer:
|
|
104
|
-
if optimizer in self.__available_optimizers:
|
|
105
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
106
|
-
conversation_prompt if conversationally else prompt
|
|
107
|
-
)
|
|
108
|
-
else:
|
|
109
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
110
|
-
|
|
111
|
-
# Payload construction
|
|
112
|
-
payload = {
|
|
113
|
-
"requestId": str(uuid4())[:7],
|
|
114
|
-
"modelId": self.model,
|
|
115
|
-
"prompt": [{"content": conversation_prompt, "role": "user"}],
|
|
116
|
-
"systemPrompt": self.system_prompt,
|
|
117
|
-
"conversationType": "text",
|
|
118
|
-
"temperature": self.temperature,
|
|
119
|
-
"webEnabled": True,
|
|
120
|
-
"topP": self.top_p,
|
|
121
|
-
"includeVeniceSystemPrompt": False,
|
|
122
|
-
"isCharacter": False,
|
|
123
|
-
"clientProcessingTime": 2000
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
def for_stream():
|
|
127
|
-
try:
|
|
128
|
-
with self.session.post(
|
|
129
|
-
self.api_endpoint,
|
|
130
|
-
json=payload,
|
|
131
|
-
stream=True,
|
|
132
|
-
timeout=self.timeout
|
|
133
|
-
) as response:
|
|
134
|
-
if response.status_code != 200:
|
|
135
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
136
|
-
f"Request failed with status code {response.status_code}"
|
|
137
|
-
)
|
|
138
|
-
|
|
139
|
-
streaming_text = ""
|
|
140
|
-
for line in response.iter_lines():
|
|
141
|
-
if not line:
|
|
142
|
-
continue
|
|
143
|
-
|
|
144
|
-
try:
|
|
145
|
-
# Decode bytes to string
|
|
146
|
-
line_data = line.decode('utf-8').strip()
|
|
147
|
-
if '"kind":"content"' in line_data:
|
|
148
|
-
data = json.loads(line_data)
|
|
149
|
-
if 'content' in data:
|
|
150
|
-
content = data['content']
|
|
151
|
-
streaming_text += content
|
|
152
|
-
resp = dict(text=content)
|
|
153
|
-
yield resp if raw else resp
|
|
154
|
-
except json.JSONDecodeError:
|
|
155
|
-
continue
|
|
156
|
-
except UnicodeDecodeError:
|
|
157
|
-
continue
|
|
158
|
-
|
|
159
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
160
|
-
|
|
161
|
-
except requests.RequestException as e:
|
|
162
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
163
|
-
|
|
164
|
-
def for_non_stream():
|
|
165
|
-
full_text = ""
|
|
166
|
-
for chunk in for_stream():
|
|
167
|
-
full_text += chunk["text"]
|
|
168
|
-
return {"text": full_text}
|
|
169
|
-
|
|
170
|
-
return for_stream() if stream else for_non_stream()
|
|
171
|
-
|
|
172
|
-
def chat(
|
|
173
|
-
self,
|
|
174
|
-
prompt: str,
|
|
175
|
-
stream: bool = False,
|
|
176
|
-
optimizer: str = None,
|
|
177
|
-
conversationally: bool = False,
|
|
178
|
-
) -> Union[str, Generator]:
|
|
179
|
-
def for_stream():
|
|
180
|
-
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
181
|
-
yield self.get_message(response)
|
|
182
|
-
def for_non_stream():
|
|
183
|
-
return self.get_message(
|
|
184
|
-
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
185
|
-
)
|
|
186
|
-
return for_stream() if stream else for_non_stream()
|
|
187
|
-
|
|
188
|
-
def get_message(self, response: dict) -> str:
|
|
189
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
190
|
-
return response["text"]
|
|
191
|
-
|
|
192
|
-
if __name__ == "__main__":
|
|
193
|
-
print("-" * 80)
|
|
194
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
195
|
-
print("-" * 80)
|
|
196
|
-
|
|
197
|
-
# Test all available models
|
|
198
|
-
working = 0
|
|
199
|
-
total = len(Venice.AVAILABLE_MODELS)
|
|
200
|
-
|
|
201
|
-
for model in Venice.AVAILABLE_MODELS:
|
|
202
|
-
try:
|
|
203
|
-
test_ai = Venice(model=model, timeout=60)
|
|
204
|
-
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
205
|
-
response_text = ""
|
|
206
|
-
for chunk in response:
|
|
207
|
-
response_text += chunk
|
|
208
|
-
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
209
|
-
|
|
210
|
-
if response_text and len(response_text.strip()) > 0:
|
|
211
|
-
status = "✓"
|
|
212
|
-
# Truncate response if too long
|
|
213
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
214
|
-
else:
|
|
215
|
-
status = "✗"
|
|
216
|
-
display_text = "Empty or invalid response"
|
|
217
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
218
|
-
except Exception as e:
|
|
219
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/VercelAI.py
DELETED
|
@@ -1,234 +0,0 @@
|
|
|
1
|
-
import re
|
|
2
|
-
import time
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
from typing import Union, Any, Dict, Generator, Optional
|
|
6
|
-
import uuid
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class VercelAI(Provider):
|
|
17
|
-
"""
|
|
18
|
-
A class to interact with the Vercel AI API.
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
|
-
AVAILABLE_MODELS = [
|
|
22
|
-
"chat-model",
|
|
23
|
-
"chat-model-reasoning"
|
|
24
|
-
]
|
|
25
|
-
|
|
26
|
-
def __init__(
|
|
27
|
-
self,
|
|
28
|
-
is_conversation: bool = True,
|
|
29
|
-
max_tokens: int = 600,
|
|
30
|
-
timeout: int = 30,
|
|
31
|
-
intro: str = None,
|
|
32
|
-
filepath: str = None,
|
|
33
|
-
update_file: bool = True,
|
|
34
|
-
proxies: dict = {},
|
|
35
|
-
history_offset: int = 10250,
|
|
36
|
-
act: str = None,
|
|
37
|
-
model: str = "chat-model",
|
|
38
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
39
|
-
):
|
|
40
|
-
"""Initializes the Vercel AI API client."""
|
|
41
|
-
|
|
42
|
-
if model not in self.AVAILABLE_MODELS:
|
|
43
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
44
|
-
|
|
45
|
-
self.session = requests.Session()
|
|
46
|
-
self.is_conversation = is_conversation
|
|
47
|
-
self.max_tokens_to_sample = max_tokens
|
|
48
|
-
self.api_endpoint = "https://chat.vercel.ai/api/chat"
|
|
49
|
-
self.stream_chunk_size = 64
|
|
50
|
-
self.timeout = timeout
|
|
51
|
-
self.last_response = {}
|
|
52
|
-
self.model = model
|
|
53
|
-
self.system_prompt = system_prompt
|
|
54
|
-
self.litagent = LitAgent()
|
|
55
|
-
self.headers = self.litagent.generate_fingerprint()
|
|
56
|
-
self.session.headers.update(self.headers)
|
|
57
|
-
self.session.proxies = proxies
|
|
58
|
-
|
|
59
|
-
# Add Vercel AI specific headers
|
|
60
|
-
self.session.headers.update({
|
|
61
|
-
"authority": "chat.vercel.ai",
|
|
62
|
-
"accept": "*/*",
|
|
63
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
64
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
65
|
-
"content-type": "application/json",
|
|
66
|
-
"dnt": "1",
|
|
67
|
-
"origin": "https://chat.vercel.ai",
|
|
68
|
-
"priority": "u=1, i",
|
|
69
|
-
"referer": "https://chat.vercel.ai/",
|
|
70
|
-
"sec-ch-ua-mobile": "?0",
|
|
71
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
72
|
-
"sec-fetch-dest": "empty",
|
|
73
|
-
"sec-fetch-mode": "cors",
|
|
74
|
-
"sec-fetch-site": "same-origin",
|
|
75
|
-
"sec-gpc": "1",
|
|
76
|
-
"x-kpsdk-c": "1-Cl4OUDwFNA",
|
|
77
|
-
"x-kpsdk-cd": json.dumps({
|
|
78
|
-
"workTime": int(time.time() * 1000),
|
|
79
|
-
"id": str(uuid.uuid4()),
|
|
80
|
-
"answers": [5, 5],
|
|
81
|
-
"duration": 26.9,
|
|
82
|
-
"d": 1981,
|
|
83
|
-
"st": int(time.time() * 1000) - 1000,
|
|
84
|
-
"rst": int(time.time() * 1000) - 500
|
|
85
|
-
}),
|
|
86
|
-
"x-kpsdk-ct": str(uuid.uuid4()),
|
|
87
|
-
"x-kpsdk-r": "1-B1NfB2A",
|
|
88
|
-
"x-kpsdk-v": "j-1.0.0"
|
|
89
|
-
})
|
|
90
|
-
|
|
91
|
-
# Add cookies
|
|
92
|
-
self.session.cookies.update({
|
|
93
|
-
"KP_UIDz": str(uuid.uuid4()),
|
|
94
|
-
"KP_UIDz-ssn": str(uuid.uuid4())
|
|
95
|
-
})
|
|
96
|
-
|
|
97
|
-
self.__available_optimizers = (
|
|
98
|
-
method
|
|
99
|
-
for method in dir(Optimizers)
|
|
100
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
101
|
-
)
|
|
102
|
-
Conversation.intro = (
|
|
103
|
-
AwesomePrompts().get_act(
|
|
104
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
105
|
-
)
|
|
106
|
-
if act
|
|
107
|
-
else intro or Conversation.intro
|
|
108
|
-
)
|
|
109
|
-
self.conversation = Conversation(
|
|
110
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
111
|
-
)
|
|
112
|
-
self.conversation.history_offset = history_offset
|
|
113
|
-
|
|
114
|
-
def ask(
|
|
115
|
-
self,
|
|
116
|
-
prompt: str,
|
|
117
|
-
stream: bool = False,
|
|
118
|
-
raw: bool = False,
|
|
119
|
-
optimizer: str = None,
|
|
120
|
-
conversationally: bool = False,
|
|
121
|
-
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
122
|
-
"""Chat with AI"""
|
|
123
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
124
|
-
if optimizer:
|
|
125
|
-
if optimizer in self.__available_optimizers:
|
|
126
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
127
|
-
conversation_prompt if conversationally else prompt
|
|
128
|
-
)
|
|
129
|
-
else:
|
|
130
|
-
raise Exception(
|
|
131
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
payload = {
|
|
135
|
-
"id": "guest",
|
|
136
|
-
"messages": [
|
|
137
|
-
{
|
|
138
|
-
"id": str(uuid.uuid4()),
|
|
139
|
-
"createdAt": "2025-03-29T09:13:16.992Z",
|
|
140
|
-
"role": "user",
|
|
141
|
-
"content": conversation_prompt,
|
|
142
|
-
"parts": [{"type": "text", "text": conversation_prompt}]
|
|
143
|
-
}
|
|
144
|
-
],
|
|
145
|
-
"selectedChatModelId": self.model
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
def for_stream():
|
|
149
|
-
response = self.session.post(
|
|
150
|
-
self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
|
|
151
|
-
)
|
|
152
|
-
if not response.ok:
|
|
153
|
-
error_msg = f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
154
|
-
raise exceptions.FailedToGenerateResponseError(error_msg)
|
|
155
|
-
|
|
156
|
-
streaming_response = ""
|
|
157
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
158
|
-
if line:
|
|
159
|
-
match = re.search(r'0:"(.*?)"', line)
|
|
160
|
-
if match:
|
|
161
|
-
content = match.group(1)
|
|
162
|
-
streaming_response += content
|
|
163
|
-
yield content if raw else dict(text=content)
|
|
164
|
-
self.last_response.update(dict(text=streaming_response))
|
|
165
|
-
self.conversation.update_chat_history(
|
|
166
|
-
prompt, self.get_message(self.last_response)
|
|
167
|
-
)
|
|
168
|
-
|
|
169
|
-
def for_non_stream():
|
|
170
|
-
for _ in for_stream():
|
|
171
|
-
pass
|
|
172
|
-
return self.last_response
|
|
173
|
-
|
|
174
|
-
return for_stream() if stream else for_non_stream()
|
|
175
|
-
|
|
176
|
-
def chat(
|
|
177
|
-
self,
|
|
178
|
-
prompt: str,
|
|
179
|
-
stream: bool = False,
|
|
180
|
-
optimizer: str = None,
|
|
181
|
-
conversationally: bool = False,
|
|
182
|
-
) -> str:
|
|
183
|
-
"""Generate response `str`"""
|
|
184
|
-
def for_stream():
|
|
185
|
-
for response in self.ask(
|
|
186
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
187
|
-
):
|
|
188
|
-
yield self.get_message(response)
|
|
189
|
-
|
|
190
|
-
def for_non_stream():
|
|
191
|
-
return self.get_message(
|
|
192
|
-
self.ask(
|
|
193
|
-
prompt,
|
|
194
|
-
False,
|
|
195
|
-
optimizer=optimizer,
|
|
196
|
-
conversationally=conversationally,
|
|
197
|
-
)
|
|
198
|
-
)
|
|
199
|
-
|
|
200
|
-
return for_stream() if stream else for_non_stream()
|
|
201
|
-
|
|
202
|
-
def get_message(self, response: dict) -> str:
|
|
203
|
-
"""Retrieves message only from response"""
|
|
204
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
205
|
-
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
206
|
-
|
|
207
|
-
if __name__ == "__main__":
|
|
208
|
-
print("-" * 80)
|
|
209
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
210
|
-
print("-" * 80)
|
|
211
|
-
|
|
212
|
-
# Test all available models
|
|
213
|
-
working = 0
|
|
214
|
-
total = len(VercelAI.AVAILABLE_MODELS)
|
|
215
|
-
|
|
216
|
-
for model in VercelAI.AVAILABLE_MODELS:
|
|
217
|
-
try:
|
|
218
|
-
test_ai = VercelAI(model=model, timeout=60)
|
|
219
|
-
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
220
|
-
response_text = ""
|
|
221
|
-
for chunk in response:
|
|
222
|
-
response_text += chunk
|
|
223
|
-
print(f"\r{model:<50} {'Testing...':<10}", end="", flush=True)
|
|
224
|
-
|
|
225
|
-
if response_text and len(response_text.strip()) > 0:
|
|
226
|
-
status = "✓"
|
|
227
|
-
# Truncate response if too long
|
|
228
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
229
|
-
else:
|
|
230
|
-
status = "✗"
|
|
231
|
-
display_text = "Empty or invalid response"
|
|
232
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
233
|
-
except Exception as e:
|
|
234
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/WebSim.py
DELETED
|
@@ -1,228 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import string
|
|
4
|
-
import random
|
|
5
|
-
from typing import Any, Dict, Union
|
|
6
|
-
|
|
7
|
-
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts
|
|
10
|
-
from webscout.AIbase import Provider
|
|
11
|
-
from webscout import exceptions
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
|
|
14
|
-
class WebSim(Provider):
|
|
15
|
-
"""
|
|
16
|
-
A class to interact with the WebSim API.
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
url = "https://websim.ai"
|
|
20
|
-
chat_api_endpoint = "https://websim.ai/api/v1/inference/run_chat_completion"
|
|
21
|
-
image_api_endpoint = "https://websim.ai/api/v1/inference/run_image_generation"
|
|
22
|
-
|
|
23
|
-
image_models = ['flux']
|
|
24
|
-
AVAILABLE_MODELS = ['gemini-1.5-flash', 'gemini-1.5-pro', 'gemini-flash', 'gemini-pro', 'gemini-flash-thinking'] + image_models
|
|
25
|
-
|
|
26
|
-
@staticmethod
|
|
27
|
-
def generate_project_id(for_image=False):
|
|
28
|
-
"""
|
|
29
|
-
Generate a project ID in the appropriate format
|
|
30
|
-
|
|
31
|
-
For chat: format like 'ke3_xh5gai3gjkmruomu'
|
|
32
|
-
For image: format like 'kx0m131_rzz66qb2xoy7'
|
|
33
|
-
"""
|
|
34
|
-
chars = string.ascii_lowercase + string.digits
|
|
35
|
-
|
|
36
|
-
if for_image:
|
|
37
|
-
first_part = ''.join(random.choices(chars, k=7))
|
|
38
|
-
second_part = ''.join(random.choices(chars, k=12))
|
|
39
|
-
return f"{first_part}_{second_part}"
|
|
40
|
-
else:
|
|
41
|
-
prefix = ''.join(random.choices(chars, k=3))
|
|
42
|
-
suffix = ''.join(random.choices(chars, k=15))
|
|
43
|
-
return f"{prefix}_{suffix}"
|
|
44
|
-
|
|
45
|
-
def __init__(
|
|
46
|
-
self,
|
|
47
|
-
is_conversation: bool = True,
|
|
48
|
-
max_tokens: int = 2049,
|
|
49
|
-
timeout: int = 30,
|
|
50
|
-
intro: str = None,
|
|
51
|
-
filepath: str = None,
|
|
52
|
-
update_file: bool = True,
|
|
53
|
-
proxies: dict = {},
|
|
54
|
-
history_offset: int = 10250,
|
|
55
|
-
act: str = None,
|
|
56
|
-
model: str = 'gemini-1.5-pro',
|
|
57
|
-
aspect_ratio: str = "1:1"
|
|
58
|
-
):
|
|
59
|
-
"""Initializes the WebSim API client."""
|
|
60
|
-
if model not in self.AVAILABLE_MODELS:
|
|
61
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
62
|
-
self.agent = LitAgent()
|
|
63
|
-
self.headers = {
|
|
64
|
-
'accept': '*/*',
|
|
65
|
-
'accept-language': 'en-US,en;q=0.9',
|
|
66
|
-
'content-type': 'text/plain;charset=UTF-8',
|
|
67
|
-
'origin': 'https://websim.ai',
|
|
68
|
-
'user-agent': self.agent.random(),
|
|
69
|
-
'websim-flags;': ''
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
self.session = requests.Session()
|
|
73
|
-
self.session.headers.update(self.headers)
|
|
74
|
-
self.session.proxies.update(proxies)
|
|
75
|
-
|
|
76
|
-
self.is_conversation = is_conversation
|
|
77
|
-
self.max_tokens_to_sample = max_tokens
|
|
78
|
-
self.timeout = timeout
|
|
79
|
-
self.last_response = {}
|
|
80
|
-
self.model = model
|
|
81
|
-
self.aspect_ratio = aspect_ratio
|
|
82
|
-
|
|
83
|
-
self.__available_optimizers = (
|
|
84
|
-
method
|
|
85
|
-
for method in dir(Optimizers)
|
|
86
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
87
|
-
)
|
|
88
|
-
Conversation.intro = (
|
|
89
|
-
AwesomePrompts().get_act(
|
|
90
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
91
|
-
)
|
|
92
|
-
if act
|
|
93
|
-
else intro or Conversation.intro
|
|
94
|
-
)
|
|
95
|
-
|
|
96
|
-
self.conversation = Conversation(
|
|
97
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
98
|
-
)
|
|
99
|
-
self.conversation.history_offset = history_offset
|
|
100
|
-
|
|
101
|
-
def ask(
|
|
102
|
-
self,
|
|
103
|
-
prompt: str,
|
|
104
|
-
stream: bool = False,
|
|
105
|
-
raw: bool = False,
|
|
106
|
-
optimizer: str = None,
|
|
107
|
-
conversationally: bool = False,
|
|
108
|
-
) -> Dict[str, Any]:
|
|
109
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
110
|
-
if optimizer:
|
|
111
|
-
if optimizer in self.__available_optimizers:
|
|
112
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
113
|
-
conversation_prompt if conversationally else prompt
|
|
114
|
-
)
|
|
115
|
-
else:
|
|
116
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
117
|
-
|
|
118
|
-
is_image_request = self.model in self.image_models
|
|
119
|
-
project_id = self.generate_project_id(for_image=is_image_request)
|
|
120
|
-
|
|
121
|
-
if is_image_request:
|
|
122
|
-
self.headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/ai-image-prompt-generator'
|
|
123
|
-
return self._handle_image_request(project_id, conversation_prompt)
|
|
124
|
-
else:
|
|
125
|
-
self.headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/zelos-ai-assistant'
|
|
126
|
-
return self._handle_chat_request(project_id, conversation_prompt)
|
|
127
|
-
|
|
128
|
-
def _handle_image_request(self, project_id: str, prompt: str) -> Dict[str, Any]:
|
|
129
|
-
try:
|
|
130
|
-
data = {
|
|
131
|
-
"project_id": project_id,
|
|
132
|
-
"prompt": prompt,
|
|
133
|
-
"aspect_ratio": self.aspect_ratio
|
|
134
|
-
}
|
|
135
|
-
response = self.session.post(
|
|
136
|
-
self.image_api_endpoint,
|
|
137
|
-
json=data,
|
|
138
|
-
timeout=self.timeout
|
|
139
|
-
)
|
|
140
|
-
response.raise_for_status()
|
|
141
|
-
response_json = response.json()
|
|
142
|
-
image_url = response_json.get("url")
|
|
143
|
-
if image_url:
|
|
144
|
-
self.last_response = {"text": image_url}
|
|
145
|
-
self.conversation.update_chat_history(prompt, image_url)
|
|
146
|
-
return {"text": image_url}
|
|
147
|
-
raise exceptions.FailedToGenerateResponseError("No image URL found in response")
|
|
148
|
-
except requests.RequestException as e:
|
|
149
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
150
|
-
|
|
151
|
-
def _handle_chat_request(self, project_id: str, prompt: str) -> Dict[str, Any]:
|
|
152
|
-
max_retries = 3
|
|
153
|
-
retry_count = 0
|
|
154
|
-
last_error = None
|
|
155
|
-
|
|
156
|
-
while retry_count < max_retries:
|
|
157
|
-
try:
|
|
158
|
-
data = {
|
|
159
|
-
"project_id": project_id,
|
|
160
|
-
"messages": [{"role": "user", "content": prompt}]
|
|
161
|
-
}
|
|
162
|
-
response = self.session.post(
|
|
163
|
-
self.chat_api_endpoint,
|
|
164
|
-
json=data,
|
|
165
|
-
timeout=self.timeout
|
|
166
|
-
)
|
|
167
|
-
|
|
168
|
-
if response.status_code == 429:
|
|
169
|
-
last_error = exceptions.FailedToGenerateResponseError(
|
|
170
|
-
f"Rate limit exceeded: {response.text}"
|
|
171
|
-
)
|
|
172
|
-
retry_count += 1
|
|
173
|
-
if retry_count < max_retries:
|
|
174
|
-
continue
|
|
175
|
-
raise last_error
|
|
176
|
-
|
|
177
|
-
response.raise_for_status()
|
|
178
|
-
response_json = response.json()
|
|
179
|
-
content = response_json.get("content", "")
|
|
180
|
-
|
|
181
|
-
self.last_response = {"text": content}
|
|
182
|
-
self.conversation.update_chat_history(prompt, content)
|
|
183
|
-
return {"text": content.strip()}
|
|
184
|
-
|
|
185
|
-
except requests.RequestException as e:
|
|
186
|
-
if "Rate limit exceeded" in str(e) and retry_count < max_retries:
|
|
187
|
-
retry_count += 1
|
|
188
|
-
else:
|
|
189
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
190
|
-
|
|
191
|
-
raise last_error or exceptions.FailedToGenerateResponseError("Max retries exceeded")
|
|
192
|
-
|
|
193
|
-
def chat(
|
|
194
|
-
self,
|
|
195
|
-
prompt: str,
|
|
196
|
-
stream: bool = False,
|
|
197
|
-
optimizer: str = None,
|
|
198
|
-
conversationally: bool = False,
|
|
199
|
-
) -> str:
|
|
200
|
-
return self.get_message(
|
|
201
|
-
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
202
|
-
)
|
|
203
|
-
|
|
204
|
-
def get_message(self, response: dict) -> str:
|
|
205
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
206
|
-
return response["text"]
|
|
207
|
-
|
|
208
|
-
if __name__ == "__main__":
|
|
209
|
-
print("-" * 80)
|
|
210
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
211
|
-
print("-" * 80)
|
|
212
|
-
|
|
213
|
-
for model in WebSim.AVAILABLE_MODELS:
|
|
214
|
-
try:
|
|
215
|
-
test_ai = WebSim(model=model, timeout=60)
|
|
216
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
217
|
-
|
|
218
|
-
if response and len(response.strip()) > 0:
|
|
219
|
-
status = "✓"
|
|
220
|
-
# Clean and truncate response
|
|
221
|
-
clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
222
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
223
|
-
else:
|
|
224
|
-
status = "✗"
|
|
225
|
-
display_text = "Empty or invalid response"
|
|
226
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
227
|
-
except Exception as e:
|
|
228
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|