webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional, Union
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
from curl_cffi.requests import Session
|
|
4
|
+
from webscout import exceptions
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout.litagent import LitAgent
|
|
10
|
+
|
|
11
|
+
class AskSteve(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the AskSteve API.
|
|
14
|
+
"""
|
|
15
|
+
AVAILABLE_MODELS = ["Gemini"]
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
is_conversation: bool = True,
|
|
19
|
+
max_tokens: int = 600,
|
|
20
|
+
timeout: int = 30,
|
|
21
|
+
intro: str = None,
|
|
22
|
+
filepath: str = None,
|
|
23
|
+
update_file: bool = True,
|
|
24
|
+
proxies: dict = {},
|
|
25
|
+
history_offset: int = 10250,
|
|
26
|
+
act: str = None,
|
|
27
|
+
) -> None:
|
|
28
|
+
"""Instantiates AskSteve
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
32
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
33
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
34
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
35
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
36
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
37
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
38
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
39
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
40
|
+
system_prompt (str, optional): System prompt for AskSteve. Defaults to the provided string.
|
|
41
|
+
"""
|
|
42
|
+
self.session = Session() # Use curl_cffi Session
|
|
43
|
+
self.is_conversation = is_conversation
|
|
44
|
+
self.max_tokens_to_sample = max_tokens
|
|
45
|
+
self.api_endpoint = "https://quickstart.asksteve.to/quickStartRequest"
|
|
46
|
+
self.timeout = timeout
|
|
47
|
+
self.last_response = {}
|
|
48
|
+
self.headers = {
|
|
49
|
+
"accept": "*/*",
|
|
50
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
51
|
+
"accept-language": "en-US,en;q=0.9",
|
|
52
|
+
"content-type": "text/plain;charset=UTF-8",
|
|
53
|
+
"origin": "chrome-extension://gldebcpkoojijledacjeboaehblhfbjg",
|
|
54
|
+
"priority": "u=1, i",
|
|
55
|
+
"sec-fetch-dest": "empty",
|
|
56
|
+
"sec-fetch-mode": "cors",
|
|
57
|
+
"sec-fetch-site": "none",
|
|
58
|
+
"sec-fetch-storage-access": "active",
|
|
59
|
+
"user-agent": LitAgent().random(),
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
self.__available_optimizers = (
|
|
63
|
+
method
|
|
64
|
+
for method in dir(Optimizers)
|
|
65
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
66
|
+
)
|
|
67
|
+
self.session.headers.update(self.headers)
|
|
68
|
+
Conversation.intro = (
|
|
69
|
+
AwesomePrompts().get_act(
|
|
70
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
71
|
+
)
|
|
72
|
+
if act
|
|
73
|
+
else intro or Conversation.intro
|
|
74
|
+
)
|
|
75
|
+
self.conversation = Conversation(
|
|
76
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
77
|
+
)
|
|
78
|
+
self.conversation.history_offset = history_offset
|
|
79
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
80
|
+
@staticmethod
|
|
81
|
+
def _asksteve_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
82
|
+
"""Extracts content from AskSteve JSON response."""
|
|
83
|
+
if isinstance(chunk, dict) and "candidates" in chunk and len(chunk["candidates"]) > 0:
|
|
84
|
+
parts = chunk["candidates"][0].get("content", {}).get("parts", [])
|
|
85
|
+
if parts and isinstance(parts[0].get("text"), str):
|
|
86
|
+
return parts[0]["text"]
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
def ask(
|
|
90
|
+
self,
|
|
91
|
+
prompt: str,
|
|
92
|
+
stream: bool = False,
|
|
93
|
+
raw: bool = False,
|
|
94
|
+
optimizer: str = None,
|
|
95
|
+
conversationally: bool = False,
|
|
96
|
+
) -> dict:
|
|
97
|
+
"""Chat with AI
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
prompt (str): Prompt to be send.
|
|
101
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
102
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
103
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
104
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
105
|
+
Returns:
|
|
106
|
+
dict : {}
|
|
107
|
+
```json
|
|
108
|
+
{
|
|
109
|
+
"text" : "How may I assist you today?"
|
|
110
|
+
}
|
|
111
|
+
```
|
|
112
|
+
"""
|
|
113
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
114
|
+
if optimizer:
|
|
115
|
+
if optimizer in self.__available_optimizers:
|
|
116
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
117
|
+
conversation_prompt if conversationally else prompt
|
|
118
|
+
)
|
|
119
|
+
else:
|
|
120
|
+
raise Exception(
|
|
121
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
payload = {
|
|
125
|
+
"key": "asksteve",
|
|
126
|
+
"prompt": conversation_prompt
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
# This API doesn't stream, so we process the full response
|
|
131
|
+
try:
|
|
132
|
+
response = self.session.post(
|
|
133
|
+
self.api_endpoint,
|
|
134
|
+
headers=self.headers,
|
|
135
|
+
json=payload,
|
|
136
|
+
stream=False, # API doesn't stream
|
|
137
|
+
timeout=self.timeout,
|
|
138
|
+
impersonate="chrome120" # Add impersonate
|
|
139
|
+
)
|
|
140
|
+
response.raise_for_status()
|
|
141
|
+
response_text_raw = response.text # Get raw text
|
|
142
|
+
|
|
143
|
+
# Process the full JSON text using sanitize_stream
|
|
144
|
+
processed_stream = sanitize_stream(
|
|
145
|
+
data=response_text_raw,
|
|
146
|
+
to_json=True, # Parse the whole text as JSON
|
|
147
|
+
intro_value=None,
|
|
148
|
+
content_extractor=self._asksteve_extractor, # Use the specific extractor
|
|
149
|
+
yield_raw_on_error=False
|
|
150
|
+
)
|
|
151
|
+
# Extract the single result
|
|
152
|
+
text = next(processed_stream, None)
|
|
153
|
+
text = text if isinstance(text, str) else "" # Ensure it's a string
|
|
154
|
+
|
|
155
|
+
self.last_response.update(dict(text=text))
|
|
156
|
+
self.conversation.update_chat_history(
|
|
157
|
+
prompt, self.get_message(self.last_response)
|
|
158
|
+
)
|
|
159
|
+
# Always return a dict for consistency
|
|
160
|
+
return {"text": text} if raw else self.last_response
|
|
161
|
+
|
|
162
|
+
except CurlError as e:
|
|
163
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
164
|
+
except Exception as e: # Catch other potential errors
|
|
165
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}") from e
|
|
166
|
+
|
|
167
|
+
def chat(
|
|
168
|
+
self,
|
|
169
|
+
prompt: str,
|
|
170
|
+
stream: bool = False,
|
|
171
|
+
optimizer: str = None,
|
|
172
|
+
conversationally: bool = False,
|
|
173
|
+
) -> str:
|
|
174
|
+
"""Generate response `str`
|
|
175
|
+
Args:
|
|
176
|
+
prompt (str): Prompt to be send.
|
|
177
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
178
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
179
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
180
|
+
Returns:
|
|
181
|
+
str: Response generated
|
|
182
|
+
"""
|
|
183
|
+
|
|
184
|
+
response_data = self.ask(
|
|
185
|
+
prompt,
|
|
186
|
+
stream=False, # Always False for this API
|
|
187
|
+
raw=False, # Get the dict back
|
|
188
|
+
optimizer=optimizer,
|
|
189
|
+
conversationally=conversationally,
|
|
190
|
+
)
|
|
191
|
+
if stream:
|
|
192
|
+
def stream_wrapper():
|
|
193
|
+
yield self.get_message(response_data)
|
|
194
|
+
return stream_wrapper()
|
|
195
|
+
else:
|
|
196
|
+
return self.get_message(response_data)
|
|
197
|
+
|
|
198
|
+
def get_message(self, response) -> str:
|
|
199
|
+
"""Retrieves message only from response
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
response (dict or str): Response generated by `self.ask` or a string
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
str: Message extracted
|
|
206
|
+
"""
|
|
207
|
+
if isinstance(response, dict):
|
|
208
|
+
return response.get("text", "") # Use .get for safety
|
|
209
|
+
elif isinstance(response, str):
|
|
210
|
+
return response
|
|
211
|
+
else:
|
|
212
|
+
raise TypeError(f"Unsupported response type: {type(response)}")
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
if __name__ == "__main__":
|
|
216
|
+
from rich import print
|
|
217
|
+
ai = AskSteve()
|
|
218
|
+
response = ai.chat("write a short poem about AI", stream=True)
|
|
219
|
+
for chunk in response:
|
|
220
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
|
|
2
|
+
import re
|
|
3
|
+
import curl_cffi
|
|
4
|
+
from curl_cffi.requests import Session
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
from typing import Any, Dict, Optional, Generator, List, Union
|
|
8
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.litagent import LitAgent as UserAgent
|
|
12
|
+
|
|
13
|
+
class Cerebras(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the Cerebras API using a cookie for authentication.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
AVAILABLE_MODELS = [
|
|
19
|
+
"llama3.1-8b",
|
|
20
|
+
"llama-3.3-70b",
|
|
21
|
+
"deepseek-r1-distill-llama-70b",
|
|
22
|
+
"llama-4-scout-17b-16e-instruct",
|
|
23
|
+
"qwen-3-32b",
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
is_conversation: bool = True,
|
|
31
|
+
max_tokens: int = 2049,
|
|
32
|
+
timeout: int = 30,
|
|
33
|
+
intro: str = None,
|
|
34
|
+
filepath: str = None,
|
|
35
|
+
update_file: bool = True,
|
|
36
|
+
proxies: dict = {},
|
|
37
|
+
history_offset: int = 10250,
|
|
38
|
+
act: str = None,
|
|
39
|
+
cookie_path: str = "cookie.json",
|
|
40
|
+
model: str = "llama3.1-8b",
|
|
41
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
42
|
+
):
|
|
43
|
+
# Validate model choice
|
|
44
|
+
if model not in self.AVAILABLE_MODELS:
|
|
45
|
+
raise ValueError(
|
|
46
|
+
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
# Initialize basic settings first
|
|
50
|
+
self.timeout = timeout
|
|
51
|
+
self.model = model
|
|
52
|
+
self.system_prompt = system_prompt
|
|
53
|
+
self.is_conversation = is_conversation
|
|
54
|
+
self.max_tokens_to_sample = max_tokens
|
|
55
|
+
self.last_response = {}
|
|
56
|
+
|
|
57
|
+
self.session = Session() # Initialize curl_cffi session
|
|
58
|
+
|
|
59
|
+
# Get API key first
|
|
60
|
+
try:
|
|
61
|
+
self.api_key = self.get_demo_api_key(cookie_path)
|
|
62
|
+
except Exception as e:
|
|
63
|
+
raise exceptions.APIConnectionError(f"Failed to initialize Cerebras client: {e}")
|
|
64
|
+
|
|
65
|
+
# Initialize optimizers
|
|
66
|
+
self.__available_optimizers = (
|
|
67
|
+
method
|
|
68
|
+
for method in dir(Optimizers)
|
|
69
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
# Initialize conversation settings
|
|
73
|
+
Conversation.intro = (
|
|
74
|
+
AwesomePrompts().get_act(
|
|
75
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
76
|
+
)
|
|
77
|
+
if act
|
|
78
|
+
else None
|
|
79
|
+
)
|
|
80
|
+
self.conversation = Conversation(
|
|
81
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
82
|
+
)
|
|
83
|
+
self.conversation.history_offset = history_offset
|
|
84
|
+
|
|
85
|
+
# Apply proxies to the session
|
|
86
|
+
self.session.proxies = proxies
|
|
87
|
+
|
|
88
|
+
# Rest of the class implementation remains the same...
|
|
89
|
+
@staticmethod
|
|
90
|
+
def extract_query(text: str) -> str:
|
|
91
|
+
"""Extracts the first code block from the given text."""
|
|
92
|
+
pattern = r"```(.*?)```"
|
|
93
|
+
matches = re.findall(pattern, text, re.DOTALL)
|
|
94
|
+
return matches[0].strip() if matches else text.strip()
|
|
95
|
+
|
|
96
|
+
@staticmethod
|
|
97
|
+
def refiner(text: str) -> str:
|
|
98
|
+
"""Refines the input text by removing surrounding quotes."""
|
|
99
|
+
return text.strip('"')
|
|
100
|
+
|
|
101
|
+
@staticmethod
|
|
102
|
+
def _cerebras_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
103
|
+
"""Extracts content from Cerebras stream JSON objects."""
|
|
104
|
+
if isinstance(chunk, dict):
|
|
105
|
+
return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
|
106
|
+
return None
|
|
107
|
+
|
|
108
|
+
def get_demo_api_key(self, cookie_path: str) -> str: # Keep this using requests or switch to curl_cffi
|
|
109
|
+
"""Retrieves the demo API key using the provided cookie."""
|
|
110
|
+
try:
|
|
111
|
+
with open(cookie_path, "r") as file:
|
|
112
|
+
cookies = {item["name"]: item["value"] for item in json.load(file)}
|
|
113
|
+
except FileNotFoundError:
|
|
114
|
+
raise FileNotFoundError(f"Cookie file not found at path: {cookie_path}")
|
|
115
|
+
except json.JSONDecodeError:
|
|
116
|
+
raise json.JSONDecodeError("Invalid JSON format in the cookie file.", "", 0)
|
|
117
|
+
|
|
118
|
+
headers = {
|
|
119
|
+
"Accept": "*/*",
|
|
120
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
121
|
+
"Content-Type": "application/json",
|
|
122
|
+
"Origin": "https://inference.cerebras.ai",
|
|
123
|
+
"Referer": "https://inference.cerebras.ai/",
|
|
124
|
+
"user-agent": UserAgent().random(),
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
json_data = {
|
|
128
|
+
"operationName": "GetMyDemoApiKey",
|
|
129
|
+
"variables": {},
|
|
130
|
+
"query": "query GetMyDemoApiKey {\n GetMyDemoApiKey\n}",
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
try:
|
|
134
|
+
# Use the initialized curl_cffi session
|
|
135
|
+
response = self.session.post(
|
|
136
|
+
"https://inference.cerebras.ai/api/graphql",
|
|
137
|
+
cookies=cookies,
|
|
138
|
+
headers=headers,
|
|
139
|
+
json=json_data,
|
|
140
|
+
timeout=self.timeout,
|
|
141
|
+
impersonate="chrome120" # Add impersonate
|
|
142
|
+
)
|
|
143
|
+
response.raise_for_status()
|
|
144
|
+
api_key = response.json().get("data", {}).get("GetMyDemoApiKey")
|
|
145
|
+
return api_key
|
|
146
|
+
except curl_cffi.CurlError as e:
|
|
147
|
+
raise exceptions.APIConnectionError(f"Failed to retrieve API key: {e}")
|
|
148
|
+
except KeyError:
|
|
149
|
+
raise exceptions.InvalidResponseError("API key not found in response.")
|
|
150
|
+
|
|
151
|
+
def _make_request(self, messages: List[Dict], stream: bool = False) -> Union[Dict, Generator]:
|
|
152
|
+
"""Make a request to the Cerebras API."""
|
|
153
|
+
headers = {
|
|
154
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
155
|
+
"Content-Type": "application/json",
|
|
156
|
+
"User-Agent": UserAgent().random(),
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
payload = {
|
|
160
|
+
"model": self.model,
|
|
161
|
+
"messages": messages,
|
|
162
|
+
"stream": stream
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
try:
|
|
166
|
+
# Use the initialized curl_cffi session
|
|
167
|
+
response = self.session.post(
|
|
168
|
+
"https://api.cerebras.ai/v1/chat/completions",
|
|
169
|
+
headers=headers,
|
|
170
|
+
json=payload,
|
|
171
|
+
stream=stream,
|
|
172
|
+
timeout=self.timeout,
|
|
173
|
+
impersonate="chrome120" # Add impersonate
|
|
174
|
+
)
|
|
175
|
+
response.raise_for_status()
|
|
176
|
+
|
|
177
|
+
if stream:
|
|
178
|
+
def generate_stream():
|
|
179
|
+
# Use sanitize_stream
|
|
180
|
+
processed_stream = sanitize_stream(
|
|
181
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
182
|
+
intro_value="data:",
|
|
183
|
+
to_json=True, # Stream sends JSON
|
|
184
|
+
content_extractor=self._cerebras_extractor, # Use the specific extractor
|
|
185
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
186
|
+
)
|
|
187
|
+
for content_chunk in processed_stream:
|
|
188
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
189
|
+
yield content_chunk # Yield the extracted text chunk
|
|
190
|
+
|
|
191
|
+
return generate_stream()
|
|
192
|
+
else:
|
|
193
|
+
response_json = response.json()
|
|
194
|
+
# Extract content for non-streaming response
|
|
195
|
+
content = response_json.get("choices", [{}])[0].get("message", {}).get("content")
|
|
196
|
+
return content if content else "" # Return empty string if not found
|
|
197
|
+
|
|
198
|
+
except curl_cffi.CurlError as e:
|
|
199
|
+
raise exceptions.APIConnectionError(f"Request failed (CurlError): {e}") from e
|
|
200
|
+
except Exception as e: # Catch other potential errors
|
|
201
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
202
|
+
|
|
203
|
+
def ask(
|
|
204
|
+
self,
|
|
205
|
+
prompt: str,
|
|
206
|
+
stream: bool = False,
|
|
207
|
+
raw: bool = False, # Add raw parameter for consistency
|
|
208
|
+
optimizer: str = None,
|
|
209
|
+
conversationally: bool = False,
|
|
210
|
+
) -> Union[Dict, Generator]:
|
|
211
|
+
"""Send a prompt to the model and get a response."""
|
|
212
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
213
|
+
if optimizer:
|
|
214
|
+
if optimizer in self.__available_optimizers:
|
|
215
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
216
|
+
conversation_prompt if conversationally else prompt
|
|
217
|
+
)
|
|
218
|
+
else:
|
|
219
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
220
|
+
|
|
221
|
+
messages = [
|
|
222
|
+
{"role": "system", "content": self.system_prompt},
|
|
223
|
+
{"role": "user", "content": conversation_prompt}
|
|
224
|
+
]
|
|
225
|
+
|
|
226
|
+
try:
|
|
227
|
+
response = self._make_request(messages, stream)
|
|
228
|
+
|
|
229
|
+
if stream:
|
|
230
|
+
# Wrap the generator to yield dicts or raw strings
|
|
231
|
+
def stream_wrapper():
|
|
232
|
+
full_text = ""
|
|
233
|
+
for chunk in response:
|
|
234
|
+
full_text += chunk
|
|
235
|
+
yield chunk if raw else {"text": chunk}
|
|
236
|
+
# Update history after stream finishes
|
|
237
|
+
self.last_response = {"text": full_text}
|
|
238
|
+
self.conversation.update_chat_history(prompt, full_text)
|
|
239
|
+
return stream_wrapper()
|
|
240
|
+
else:
|
|
241
|
+
# Non-streaming response is already the full text string
|
|
242
|
+
self.last_response = {"text": response}
|
|
243
|
+
self.conversation.update_chat_history(prompt, response)
|
|
244
|
+
return self.last_response if not raw else response # Return dict or raw string
|
|
245
|
+
|
|
246
|
+
except Exception as e:
|
|
247
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during request: {e}")
|
|
248
|
+
|
|
249
|
+
def chat(
|
|
250
|
+
self,
|
|
251
|
+
prompt: str,
|
|
252
|
+
stream: bool = False,
|
|
253
|
+
optimizer: str = None,
|
|
254
|
+
conversationally: bool = False,
|
|
255
|
+
) -> Union[str, Generator]:
|
|
256
|
+
"""Chat with the model."""
|
|
257
|
+
# Ask returns a generator for stream=True, dict/str for stream=False
|
|
258
|
+
response_gen_or_dict = self.ask(prompt, stream, raw=False, optimizer=optimizer, conversationally=conversationally)
|
|
259
|
+
|
|
260
|
+
if stream:
|
|
261
|
+
# Wrap the generator from ask() to get message text
|
|
262
|
+
def stream_wrapper():
|
|
263
|
+
for chunk_dict in response_gen_or_dict:
|
|
264
|
+
yield self.get_message(chunk_dict)
|
|
265
|
+
return stream_wrapper()
|
|
266
|
+
else:
|
|
267
|
+
# Non-streaming response is already a dict
|
|
268
|
+
return self.get_message(response_gen_or_dict)
|
|
269
|
+
|
|
270
|
+
def get_message(self, response: str) -> str:
|
|
271
|
+
"""Retrieves message from response."""
|
|
272
|
+
# Updated to handle dict input from ask()
|
|
273
|
+
assert isinstance(response, dict), "Response should be of dict data-type only for get_message"
|
|
274
|
+
return response.get("text", "")
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
if __name__ == "__main__":
|
|
278
|
+
from rich import print
|
|
279
|
+
|
|
280
|
+
# Example usage
|
|
281
|
+
cerebras = Cerebras(
|
|
282
|
+
cookie_path=r'cookies.json',
|
|
283
|
+
model='llama3.1-8b',
|
|
284
|
+
system_prompt="You are a helpful AI assistant."
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
# Test with streaming
|
|
288
|
+
response = cerebras.chat("Hello!", stream=True)
|
|
289
|
+
for chunk in response:
|
|
290
|
+
print(chunk, end="", flush=True)
|