webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from typing import Optional, Union, Any, AsyncGenerator, Dict
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
class TurboSeek(Provider):
|
|
14
|
+
"""
|
|
15
|
+
This class provides methods for interacting with the TurboSeek API.
|
|
16
|
+
"""
|
|
17
|
+
AVAILABLE_MODELS = ["Llama 3.1 70B"]
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 600,
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
model: str = "Llama 3.1 70B" # Note: model parameter is not used by the API endpoint
|
|
31
|
+
):
|
|
32
|
+
"""Instantiates TurboSeek
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
36
|
+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
37
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
38
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
39
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
40
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
41
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
42
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
43
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
44
|
+
"""
|
|
45
|
+
# Initialize curl_cffi Session
|
|
46
|
+
self.session = Session()
|
|
47
|
+
self.is_conversation = is_conversation
|
|
48
|
+
self.max_tokens_to_sample = max_tokens
|
|
49
|
+
self.chat_endpoint = "https://www.turboseek.io/api/getAnswer"
|
|
50
|
+
self.stream_chunk_size = 64
|
|
51
|
+
self.timeout = timeout
|
|
52
|
+
self.last_response = {}
|
|
53
|
+
self.headers = {
|
|
54
|
+
"accept": "*/*",
|
|
55
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
56
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
57
|
+
"content-type": "application/json",
|
|
58
|
+
"dnt": "1",
|
|
59
|
+
"origin": "https://www.turboseek.io",
|
|
60
|
+
"priority": "u=1, i",
|
|
61
|
+
"referer": "https://www.turboseek.io/?ref=taaft&utm_source=taaft&utm_medium=referral",
|
|
62
|
+
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
63
|
+
"sec-ch-ua-mobile": "?0",
|
|
64
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
65
|
+
"sec-fetch-dest": "empty",
|
|
66
|
+
"sec-fetch-mode": "cors",
|
|
67
|
+
"sec-fetch-site": "same-origin",
|
|
68
|
+
"user-agent": LitAgent().random(),
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
self.__available_optimizers = (
|
|
72
|
+
method
|
|
73
|
+
for method in dir(Optimizers)
|
|
74
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
75
|
+
)
|
|
76
|
+
# Update curl_cffi session headers and proxies
|
|
77
|
+
self.session.headers.update(self.headers)
|
|
78
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
79
|
+
Conversation.intro = (
|
|
80
|
+
AwesomePrompts().get_act(
|
|
81
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
82
|
+
)
|
|
83
|
+
if act
|
|
84
|
+
else intro or Conversation.intro
|
|
85
|
+
)
|
|
86
|
+
self.conversation = Conversation(
|
|
87
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
88
|
+
)
|
|
89
|
+
self.conversation.history_offset = history_offset
|
|
90
|
+
|
|
91
|
+
@staticmethod
|
|
92
|
+
def _turboseek_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
93
|
+
"""Extracts content from TurboSeek stream JSON objects."""
|
|
94
|
+
if isinstance(chunk, dict) and "text" in chunk:
|
|
95
|
+
return chunk.get("text") # json.loads already handles unicode escapes
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
def ask(
|
|
99
|
+
self,
|
|
100
|
+
prompt: str,
|
|
101
|
+
stream: bool = False,
|
|
102
|
+
raw: bool = False,
|
|
103
|
+
optimizer: str = None,
|
|
104
|
+
conversationally: bool = False,
|
|
105
|
+
) -> dict:
|
|
106
|
+
"""Chat with AI
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
prompt (str): Prompt to be send.
|
|
110
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
111
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
112
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
113
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
114
|
+
Returns:
|
|
115
|
+
dict : {}
|
|
116
|
+
```json
|
|
117
|
+
{
|
|
118
|
+
"text" : "How may I assist you today?"
|
|
119
|
+
}
|
|
120
|
+
```
|
|
121
|
+
"""
|
|
122
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
123
|
+
if optimizer:
|
|
124
|
+
if optimizer in self.__available_optimizers:
|
|
125
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
126
|
+
conversation_prompt if conversationally else prompt
|
|
127
|
+
)
|
|
128
|
+
else:
|
|
129
|
+
raise Exception(
|
|
130
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
payload = {
|
|
134
|
+
"question": conversation_prompt,
|
|
135
|
+
"sources": []
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
def for_stream():
|
|
139
|
+
try: # Add try block for CurlError
|
|
140
|
+
# Use curl_cffi session post with impersonate
|
|
141
|
+
response = self.session.post(
|
|
142
|
+
self.chat_endpoint,
|
|
143
|
+
json=payload,
|
|
144
|
+
stream=True,
|
|
145
|
+
timeout=self.timeout,
|
|
146
|
+
impersonate="chrome120", # Try a different impersonation profile
|
|
147
|
+
)
|
|
148
|
+
if not response.ok:
|
|
149
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
150
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
streaming_text = ""
|
|
154
|
+
# Use sanitize_stream with the custom extractor
|
|
155
|
+
processed_stream = sanitize_stream(
|
|
156
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
157
|
+
intro_value="data:",
|
|
158
|
+
to_json=True, # Stream sends JSON
|
|
159
|
+
content_extractor=self._turboseek_extractor, # Use the specific extractor
|
|
160
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
for content_chunk in processed_stream:
|
|
164
|
+
# content_chunk is the string extracted by _turboseek_extractor
|
|
165
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
166
|
+
streaming_text += content_chunk
|
|
167
|
+
self.last_response.update(dict(text=streaming_text)) # Update last_response incrementally
|
|
168
|
+
yield dict(text=content_chunk) if not raw else content_chunk # Yield dict or raw string
|
|
169
|
+
|
|
170
|
+
# Update conversation history after stream finishes
|
|
171
|
+
if streaming_text: # Only update if content was received
|
|
172
|
+
self.conversation.update_chat_history(
|
|
173
|
+
prompt, streaming_text # Use the fully aggregated text
|
|
174
|
+
)
|
|
175
|
+
except CurlError as e: # Catch CurlError
|
|
176
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
177
|
+
except Exception as e: # Catch other potential exceptions
|
|
178
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def for_non_stream():
|
|
182
|
+
# Aggregate the stream using the updated for_stream logic
|
|
183
|
+
full_text = ""
|
|
184
|
+
try:
|
|
185
|
+
# Ensure raw=False so for_stream yields dicts
|
|
186
|
+
for chunk_data in for_stream():
|
|
187
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
188
|
+
full_text += chunk_data["text"]
|
|
189
|
+
elif isinstance(chunk_data, str): # Handle case where raw=True was passed
|
|
190
|
+
full_text += chunk_data
|
|
191
|
+
except Exception as e:
|
|
192
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {e}") from e
|
|
193
|
+
# last_response and history are updated within for_stream
|
|
194
|
+
# Ensure last_response reflects the complete aggregated text
|
|
195
|
+
self.last_response = {"text": full_text}
|
|
196
|
+
return self.last_response
|
|
197
|
+
|
|
198
|
+
return for_stream() if stream else for_non_stream()
|
|
199
|
+
|
|
200
|
+
def chat(
|
|
201
|
+
self,
|
|
202
|
+
prompt: str,
|
|
203
|
+
stream: bool = False,
|
|
204
|
+
optimizer: str = None,
|
|
205
|
+
conversationally: bool = False,
|
|
206
|
+
) -> str:
|
|
207
|
+
"""Generate response `str`
|
|
208
|
+
Args:
|
|
209
|
+
prompt (str): Prompt to be send.
|
|
210
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
211
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
212
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
213
|
+
Returns:
|
|
214
|
+
str: Response generated
|
|
215
|
+
"""
|
|
216
|
+
|
|
217
|
+
def for_stream():
|
|
218
|
+
for response in self.ask(
|
|
219
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
220
|
+
):
|
|
221
|
+
yield self.get_message(response)
|
|
222
|
+
|
|
223
|
+
def for_non_stream():
|
|
224
|
+
return self.get_message(
|
|
225
|
+
self.ask(
|
|
226
|
+
prompt,
|
|
227
|
+
False,
|
|
228
|
+
optimizer=optimizer,
|
|
229
|
+
conversationally=conversationally,
|
|
230
|
+
)
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
return for_stream() if stream else for_non_stream()
|
|
234
|
+
|
|
235
|
+
def get_message(self, response: dict) -> str:
|
|
236
|
+
"""Retrieves message only from response
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
response (dict): Response generated by `self.ask`
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
str: Message extracted
|
|
243
|
+
"""
|
|
244
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
245
|
+
# Unicode escapes are handled by json.loads within sanitize_stream
|
|
246
|
+
return response.get("text", "")
|
|
247
|
+
|
|
248
|
+
if __name__ == '__main__':
|
|
249
|
+
# Ensure curl_cffi is installed
|
|
250
|
+
from rich import print
|
|
251
|
+
try: # Add try-except block for testing
|
|
252
|
+
ai = TurboSeek(timeout=60)
|
|
253
|
+
print("[bold blue]Testing Stream:[/bold blue]")
|
|
254
|
+
response_stream = ai.chat("yooooooooooo", stream=True)
|
|
255
|
+
for chunk in response_stream:
|
|
256
|
+
print(chunk, end="", flush=True)
|
|
257
|
+
# Optional: Test non-stream
|
|
258
|
+
# print("[bold blue]Testing Non-Stream:[/bold blue]")
|
|
259
|
+
# response_non_stream = ai.chat("What is the capital of France?", stream=False)
|
|
260
|
+
# print(response_non_stream)
|
|
261
|
+
# print("[bold green]Non-Stream Test Complete.[/bold green]")
|
|
262
|
+
|
|
263
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
264
|
+
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
265
|
+
except Exception as e:
|
|
266
|
+
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
from typing import Optional, Union, Any, Dict
|
|
2
|
+
import re
|
|
3
|
+
from uuid import uuid4
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
|
+
from curl_cffi.requests import Session
|
|
12
|
+
from curl_cffi import CurlError
|
|
13
|
+
|
|
14
|
+
class TypefullyAI(Provider):
|
|
15
|
+
AVAILABLE_MODELS = ["openai:gpt-4o-mini", "openai:gpt-4o", "anthropic:claude-3-5-haiku-20241022", "groq:llama-3.3-70b-versatile"]
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
is_conversation: bool = True,
|
|
20
|
+
max_tokens: int = 600,
|
|
21
|
+
timeout: int = 30,
|
|
22
|
+
intro: str = None,
|
|
23
|
+
filepath: str = None,
|
|
24
|
+
update_file: bool = True,
|
|
25
|
+
proxies: dict = {},
|
|
26
|
+
history_offset: int = 10250,
|
|
27
|
+
act: str = None,
|
|
28
|
+
system_prompt: str = "You're a helpful assistant.",
|
|
29
|
+
model: str = "openai:gpt-4o-mini",
|
|
30
|
+
):
|
|
31
|
+
self.session = Session()
|
|
32
|
+
self.is_conversation = is_conversation
|
|
33
|
+
self.max_tokens_to_sample = max_tokens
|
|
34
|
+
self.api_endpoint = "https://typefully.com/tools/ai/api/completion"
|
|
35
|
+
self.timeout = timeout
|
|
36
|
+
self.last_response = {}
|
|
37
|
+
self.system_prompt = system_prompt
|
|
38
|
+
self.model = model
|
|
39
|
+
self.output_length = max_tokens
|
|
40
|
+
self.agent = LitAgent()
|
|
41
|
+
self.headers = {
|
|
42
|
+
"authority": "typefully.com",
|
|
43
|
+
"accept": "*/*",
|
|
44
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
45
|
+
"accept-language": "en-US,en;q=0.9",
|
|
46
|
+
"content-type": "application/json",
|
|
47
|
+
"dnt": "1",
|
|
48
|
+
"origin": "https://typefully.com",
|
|
49
|
+
"referer": "https://typefully.com/tools/ai/chat-gpt-alternative",
|
|
50
|
+
"sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
51
|
+
"sec-ch-ua-mobile": "?0",
|
|
52
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
53
|
+
"user-agent": self.agent.random()
|
|
54
|
+
}
|
|
55
|
+
self.__available_optimizers = (
|
|
56
|
+
method
|
|
57
|
+
for method in dir(Optimizers)
|
|
58
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
59
|
+
)
|
|
60
|
+
self.session.headers.update(self.headers)
|
|
61
|
+
self.session.proxies = proxies
|
|
62
|
+
Conversation.intro = (
|
|
63
|
+
AwesomePrompts().get_act(
|
|
64
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
65
|
+
)
|
|
66
|
+
if act
|
|
67
|
+
else intro or Conversation.intro
|
|
68
|
+
)
|
|
69
|
+
self.conversation = Conversation(
|
|
70
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
71
|
+
)
|
|
72
|
+
self.conversation.history_offset = history_offset
|
|
73
|
+
|
|
74
|
+
@staticmethod
|
|
75
|
+
def _typefully_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
76
|
+
if isinstance(chunk, str):
|
|
77
|
+
if isinstance(chunk, bytes):
|
|
78
|
+
chunk = chunk.decode('utf-8', errors='replace')
|
|
79
|
+
match = re.search(r'0:"(.*?)"', chunk)
|
|
80
|
+
if match:
|
|
81
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
82
|
+
return content.replace('\\', '\\').replace('\\"', '"')
|
|
83
|
+
return None
|
|
84
|
+
|
|
85
|
+
def ask(
|
|
86
|
+
self,
|
|
87
|
+
prompt: str,
|
|
88
|
+
stream: bool = False,
|
|
89
|
+
raw: bool = False,
|
|
90
|
+
optimizer: str = None,
|
|
91
|
+
conversationally: bool = False,
|
|
92
|
+
) -> Dict[str, Any]:
|
|
93
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
94
|
+
if optimizer:
|
|
95
|
+
if optimizer in self.__available_optimizers:
|
|
96
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
97
|
+
conversation_prompt if conversationally else prompt
|
|
98
|
+
)
|
|
99
|
+
else:
|
|
100
|
+
raise Exception(
|
|
101
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
102
|
+
)
|
|
103
|
+
payload = {
|
|
104
|
+
"prompt": conversation_prompt,
|
|
105
|
+
"systemPrompt": self.system_prompt,
|
|
106
|
+
"modelIdentifier": self.model,
|
|
107
|
+
"outputLength": self.output_length
|
|
108
|
+
}
|
|
109
|
+
def for_stream():
|
|
110
|
+
try:
|
|
111
|
+
response = self.session.post(
|
|
112
|
+
self.api_endpoint,
|
|
113
|
+
headers=self.headers,
|
|
114
|
+
json=payload,
|
|
115
|
+
stream=True,
|
|
116
|
+
timeout=self.timeout,
|
|
117
|
+
impersonate="chrome120"
|
|
118
|
+
)
|
|
119
|
+
if not response.ok:
|
|
120
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
121
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
122
|
+
)
|
|
123
|
+
streaming_text = ""
|
|
124
|
+
processed_stream = sanitize_stream(
|
|
125
|
+
data=response.iter_content(chunk_size=None),
|
|
126
|
+
intro_value=None,
|
|
127
|
+
to_json=False,
|
|
128
|
+
content_extractor=self._typefully_extractor,
|
|
129
|
+
)
|
|
130
|
+
for content_chunk in processed_stream:
|
|
131
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
132
|
+
streaming_text += content_chunk
|
|
133
|
+
yield content_chunk if raw else dict(text=content_chunk)
|
|
134
|
+
self.last_response.update(dict(text=streaming_text))
|
|
135
|
+
self.conversation.update_chat_history(
|
|
136
|
+
prompt, self.get_message(self.last_response)
|
|
137
|
+
)
|
|
138
|
+
except CurlError as e:
|
|
139
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
140
|
+
except Exception as e:
|
|
141
|
+
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e}")
|
|
142
|
+
def for_non_stream():
|
|
143
|
+
for _ in for_stream():
|
|
144
|
+
pass
|
|
145
|
+
return self.last_response
|
|
146
|
+
return for_stream() if stream else for_non_stream()
|
|
147
|
+
|
|
148
|
+
def chat(
|
|
149
|
+
self,
|
|
150
|
+
prompt: str,
|
|
151
|
+
stream: bool = False,
|
|
152
|
+
optimizer: str = None,
|
|
153
|
+
conversationally: bool = False,
|
|
154
|
+
) -> str:
|
|
155
|
+
def for_stream():
|
|
156
|
+
for response in self.ask(
|
|
157
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
158
|
+
):
|
|
159
|
+
yield self.get_message(response)
|
|
160
|
+
def for_non_stream():
|
|
161
|
+
return self.get_message(
|
|
162
|
+
self.ask(
|
|
163
|
+
prompt,
|
|
164
|
+
False,
|
|
165
|
+
optimizer=optimizer,
|
|
166
|
+
conversationally=conversationally,
|
|
167
|
+
)
|
|
168
|
+
)
|
|
169
|
+
return for_stream() if stream else for_non_stream()
|
|
170
|
+
|
|
171
|
+
def get_message(self, response: dict) -> str:
|
|
172
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
173
|
+
text = response.get("text", "")
|
|
174
|
+
try:
|
|
175
|
+
formatted_text = text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
176
|
+
return formatted_text
|
|
177
|
+
except Exception:
|
|
178
|
+
return text
|
|
179
|
+
|
|
180
|
+
if __name__ == "__main__":
|
|
181
|
+
print("-" * 80)
|
|
182
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
183
|
+
print("-" * 80)
|
|
184
|
+
working = 0
|
|
185
|
+
total = len(TypefullyAI.AVAILABLE_MODELS)
|
|
186
|
+
for model in TypefullyAI.AVAILABLE_MODELS:
|
|
187
|
+
try:
|
|
188
|
+
test_ai = TypefullyAI(model=model, timeout=60)
|
|
189
|
+
response_stream = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
190
|
+
response_text = ""
|
|
191
|
+
for chunk in response_stream:
|
|
192
|
+
response_text += chunk
|
|
193
|
+
if response_text and len(response_text.strip()) > 0:
|
|
194
|
+
status = "OK"
|
|
195
|
+
clean_text = response_text.strip()
|
|
196
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
197
|
+
else:
|
|
198
|
+
status = "FAIL (Stream)"
|
|
199
|
+
display_text = "Empty or invalid stream response"
|
|
200
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
201
|
+
except Exception as e:
|
|
202
|
+
print(f"\r{model:<50} {'FAIL':<10} {str(e)}")
|