webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
webscout/Provider/PI.py
ADDED
|
@@ -0,0 +1,429 @@
|
|
|
1
|
+
from uuid import uuid4
|
|
2
|
+
from curl_cffi.requests import Session
|
|
3
|
+
from curl_cffi import CurlError
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
import threading
|
|
7
|
+
from webscout.AIutel import Optimizers
|
|
8
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
9
|
+
from webscout.AIutel import AwesomePrompts
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
from typing import Dict, Union, Any, Optional
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
|
+
from webscout import exceptions
|
|
14
|
+
|
|
15
|
+
class PiAI(Provider):
|
|
16
|
+
"""
|
|
17
|
+
PiAI is a provider class for interacting with the Pi.ai chat API.
|
|
18
|
+
|
|
19
|
+
Attributes:
|
|
20
|
+
knowledge_cutoff (str): The knowledge cutoff date for the model
|
|
21
|
+
AVAILABLE_VOICES (Dict[str, int]): Available voice options for audio responses
|
|
22
|
+
AVAILABLE_MODELS (List[str]): Available model options for the API
|
|
23
|
+
"""
|
|
24
|
+
AVAILABLE_MODELS = ["inflection_3_pi"]
|
|
25
|
+
AVAILABLE_VOICES: Dict[str, int] = {
|
|
26
|
+
"voice1": 1,
|
|
27
|
+
"voice2": 2,
|
|
28
|
+
"voice3": 3,
|
|
29
|
+
"voice4": 4,
|
|
30
|
+
"voice5": 5,
|
|
31
|
+
"voice6": 6,
|
|
32
|
+
"voice7": 7,
|
|
33
|
+
"voice8": 8
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
is_conversation: bool = True,
|
|
39
|
+
max_tokens: int = 2048, # Note: max_tokens is not used by this API
|
|
40
|
+
timeout: int = 30,
|
|
41
|
+
intro: str = None,
|
|
42
|
+
filepath: str = None,
|
|
43
|
+
update_file: bool = True,
|
|
44
|
+
proxies: dict = {},
|
|
45
|
+
history_offset: int = 10250,
|
|
46
|
+
act: str = None,
|
|
47
|
+
voice: bool = False,
|
|
48
|
+
voice_name: str = "voice3",
|
|
49
|
+
output_file: str = "PiAI.mp3",
|
|
50
|
+
model: str = "inflection_3_pi", # Note: model is not used by this API
|
|
51
|
+
):
|
|
52
|
+
"""
|
|
53
|
+
Initializes PiAI with voice support.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
voice (bool): Enable/disable voice output
|
|
57
|
+
voice_name (str): Name of the voice to use (if None, uses default)
|
|
58
|
+
output_file (str): Path to save voice output (default: PiAI.mp3)
|
|
59
|
+
"""
|
|
60
|
+
# Voice settings
|
|
61
|
+
self.voice_enabled = voice
|
|
62
|
+
self.voice_name = voice_name
|
|
63
|
+
self.output_file = output_file
|
|
64
|
+
|
|
65
|
+
if voice and voice_name and voice_name not in self.AVAILABLE_VOICES:
|
|
66
|
+
raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self.AVAILABLE_VOICES.keys())}")
|
|
67
|
+
|
|
68
|
+
# Initialize curl_cffi Session instead of cloudscraper/requests
|
|
69
|
+
self.session = Session()
|
|
70
|
+
self.primary_url = 'https://pi.ai/api/chat'
|
|
71
|
+
self.fallback_url = 'https://pi.ai/api/v2/chat'
|
|
72
|
+
self.url = self.primary_url
|
|
73
|
+
self.headers = {
|
|
74
|
+
'Accept': 'text/event-stream',
|
|
75
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
76
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
77
|
+
'Content-Type': 'application/json',
|
|
78
|
+
'DNT': '1',
|
|
79
|
+
'Origin': 'https://pi.ai',
|
|
80
|
+
'Referer': 'https://pi.ai/talk',
|
|
81
|
+
'Sec-Fetch-Dest': 'empty',
|
|
82
|
+
'Sec-Fetch-Mode': 'cors',
|
|
83
|
+
'Sec-Fetch-Site': 'same-origin',
|
|
84
|
+
'User-Agent': LitAgent().random(),
|
|
85
|
+
'X-Api-Version': '3'
|
|
86
|
+
}
|
|
87
|
+
self.cookies = {
|
|
88
|
+
'__cf_bm': uuid4().hex
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# Update curl_cffi session headers, proxies, and cookies
|
|
92
|
+
self.session.headers.update(self.headers)
|
|
93
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
94
|
+
# Set cookies on the session object for curl_cffi
|
|
95
|
+
for name, value in self.cookies.items():
|
|
96
|
+
self.session.cookies.set(name, value)
|
|
97
|
+
|
|
98
|
+
self.is_conversation = is_conversation
|
|
99
|
+
self.max_tokens_to_sample = max_tokens
|
|
100
|
+
self.timeout = timeout
|
|
101
|
+
self.last_response = {} if self.is_conversation else {'text': ""}
|
|
102
|
+
self.conversation_id = None
|
|
103
|
+
|
|
104
|
+
self.__available_optimizers = (
|
|
105
|
+
method
|
|
106
|
+
for method in dir(Optimizers)
|
|
107
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# Setup conversation
|
|
111
|
+
Conversation.intro = (
|
|
112
|
+
AwesomePrompts().get_act(
|
|
113
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
114
|
+
) if act else intro or Conversation.intro
|
|
115
|
+
)
|
|
116
|
+
self.conversation = Conversation(
|
|
117
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
118
|
+
)
|
|
119
|
+
self.conversation.history_offset = history_offset
|
|
120
|
+
self.session.proxies = proxies
|
|
121
|
+
|
|
122
|
+
if self.is_conversation:
|
|
123
|
+
self.start_conversation()
|
|
124
|
+
|
|
125
|
+
@staticmethod
|
|
126
|
+
def _pi_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
127
|
+
"""Extracts text content from PiAI stream JSON objects."""
|
|
128
|
+
if isinstance(chunk, dict) and 'text' in chunk and chunk['text'] is not None:
|
|
129
|
+
return chunk.get("text")
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
def start_conversation(self) -> str:
|
|
133
|
+
"""
|
|
134
|
+
Initializes a new conversation and returns the conversation ID.
|
|
135
|
+
"""
|
|
136
|
+
try:
|
|
137
|
+
# Use curl_cffi session post with impersonate
|
|
138
|
+
# Cookies are handled by the session
|
|
139
|
+
response = self.session.post(
|
|
140
|
+
"https://pi.ai/api/chat/start",
|
|
141
|
+
# headers are set on the session
|
|
142
|
+
# cookies=self.cookies, # Handled by session
|
|
143
|
+
json={},
|
|
144
|
+
timeout=self.timeout,
|
|
145
|
+
# proxies are set on the session
|
|
146
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
147
|
+
)
|
|
148
|
+
response.raise_for_status() # Check for HTTP errors
|
|
149
|
+
|
|
150
|
+
data = response.json()
|
|
151
|
+
# Ensure the expected structure before accessing
|
|
152
|
+
if 'conversations' in data and data['conversations'] and 'sid' in data['conversations'][0]:
|
|
153
|
+
self.conversation_id = data['conversations'][0]['sid']
|
|
154
|
+
return self.conversation_id
|
|
155
|
+
else:
|
|
156
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected response structure from start API: {data}")
|
|
157
|
+
|
|
158
|
+
except CurlError as e: # Catch CurlError
|
|
159
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to start conversation (CurlError): {e}") from e
|
|
160
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
|
|
161
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
162
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to start conversation ({type(e).__name__}): {e} - {err_text}") from e
|
|
163
|
+
|
|
164
|
+
def ask(
|
|
165
|
+
self,
|
|
166
|
+
prompt: str,
|
|
167
|
+
stream: bool = False,
|
|
168
|
+
raw: bool = False,
|
|
169
|
+
optimizer: str = None,
|
|
170
|
+
conversationally: bool = False,
|
|
171
|
+
voice: bool = None,
|
|
172
|
+
voice_name: str = None,
|
|
173
|
+
output_file: str = None
|
|
174
|
+
) -> dict:
|
|
175
|
+
"""
|
|
176
|
+
Interact with Pi.ai by sending a prompt and receiving a response.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
prompt (str): The prompt to send
|
|
180
|
+
stream (bool): Whether to stream the response
|
|
181
|
+
raw (bool): Return raw response format
|
|
182
|
+
optimizer (str): Prompt optimizer to use
|
|
183
|
+
conversationally (bool): Use conversation context
|
|
184
|
+
voice (bool): Override default voice setting
|
|
185
|
+
voice_name (str): Override default voice name
|
|
186
|
+
output_file (str): Override default output file path
|
|
187
|
+
"""
|
|
188
|
+
# Voice configuration
|
|
189
|
+
voice = self.voice_enabled if voice is None else voice
|
|
190
|
+
voice_name = self.voice_name if voice_name is None else voice_name
|
|
191
|
+
output_file = self.output_file if output_file is None else output_file
|
|
192
|
+
|
|
193
|
+
if voice and voice_name and voice_name not in self.AVAILABLE_VOICES:
|
|
194
|
+
raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self.AVAILABLE_VOICES.keys())}")
|
|
195
|
+
|
|
196
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
197
|
+
if optimizer:
|
|
198
|
+
if optimizer in self.__available_optimizers:
|
|
199
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
200
|
+
conversation_prompt if conversationally else prompt
|
|
201
|
+
)
|
|
202
|
+
else:
|
|
203
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
204
|
+
|
|
205
|
+
data = {
|
|
206
|
+
'text': conversation_prompt,
|
|
207
|
+
'conversation': self.conversation_id
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
def process_stream():
|
|
211
|
+
try: # Add outer try block for error handling
|
|
212
|
+
# Try primary URL first
|
|
213
|
+
current_url = self.url
|
|
214
|
+
response = self.session.post(
|
|
215
|
+
current_url,
|
|
216
|
+
# headers are set on the session
|
|
217
|
+
# cookies are handled by the session
|
|
218
|
+
json=data,
|
|
219
|
+
stream=True,
|
|
220
|
+
timeout=self.timeout,
|
|
221
|
+
# proxies are set on the session
|
|
222
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# If primary URL fails, try fallback URL
|
|
226
|
+
if not response.ok and current_url == self.primary_url:
|
|
227
|
+
current_url = self.fallback_url
|
|
228
|
+
response = self.session.post(
|
|
229
|
+
current_url,
|
|
230
|
+
# headers are set on the session
|
|
231
|
+
# cookies are handled by the session
|
|
232
|
+
json=data,
|
|
233
|
+
stream=True,
|
|
234
|
+
timeout=self.timeout,
|
|
235
|
+
# proxies are set on the session
|
|
236
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
response.raise_for_status() # Check for HTTP errors after potential fallback
|
|
240
|
+
|
|
241
|
+
# --- Process response content ---
|
|
242
|
+
# Note: curl_cffi's response.content might behave differently for streams.
|
|
243
|
+
# It's often better to iterate directly.
|
|
244
|
+
# output_str = response.content.decode('utf-8') # Avoid reading full content at once for streams
|
|
245
|
+
|
|
246
|
+
sids = []
|
|
247
|
+
streaming_text = ""
|
|
248
|
+
full_raw_data_for_sids = "" # Accumulate raw data to find SIDs later
|
|
249
|
+
|
|
250
|
+
# Iterate over bytes and decode manually
|
|
251
|
+
for line_bytes in response.iter_lines():
|
|
252
|
+
if line_bytes:
|
|
253
|
+
line = line_bytes.decode('utf-8')
|
|
254
|
+
full_raw_data_for_sids += line + "\n" # Accumulate for SID extraction
|
|
255
|
+
|
|
256
|
+
if line.startswith("data: "):
|
|
257
|
+
json_line_str = line[6:] # Get the JSON part as string
|
|
258
|
+
try:
|
|
259
|
+
# Process this single JSON line string with sanitize_stream
|
|
260
|
+
processed_gen = sanitize_stream(
|
|
261
|
+
data=json_line_str,
|
|
262
|
+
to_json=True,
|
|
263
|
+
content_extractor=self._pi_extractor
|
|
264
|
+
)
|
|
265
|
+
chunk_text = next(processed_gen, None) # Get the single extracted text item
|
|
266
|
+
if chunk_text and isinstance(chunk_text, str):
|
|
267
|
+
streaming_text += chunk_text
|
|
268
|
+
yield {"text": streaming_text} # Always yield dict with aggregated text
|
|
269
|
+
except (StopIteration, json.JSONDecodeError, UnicodeDecodeError):
|
|
270
|
+
continue # Skip if sanitize_stream fails or yields nothing
|
|
271
|
+
# Extract SIDs after processing the stream
|
|
272
|
+
sids = re.findall(r'"sid":"(.*?)"', full_raw_data_for_sids)
|
|
273
|
+
second_sid = sids[1] if len(sids) >= 2 else None
|
|
274
|
+
|
|
275
|
+
if voice and voice_name and second_sid:
|
|
276
|
+
threading.Thread(
|
|
277
|
+
target=self.download_audio_threaded,
|
|
278
|
+
args=(voice_name, second_sid, output_file)
|
|
279
|
+
).start()
|
|
280
|
+
|
|
281
|
+
# Update history and last response after stream finishes
|
|
282
|
+
self.last_response = dict(text=streaming_text)
|
|
283
|
+
self.conversation.update_chat_history(
|
|
284
|
+
prompt, streaming_text
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
except CurlError as e: # Catch CurlError
|
|
288
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed (CurlError): {e}") from e
|
|
289
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
290
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
291
|
+
raise exceptions.FailedToGenerateResponseError(f"API request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
if stream:
|
|
295
|
+
return process_stream()
|
|
296
|
+
else:
|
|
297
|
+
# For non-stream, collect all responses and return the final one
|
|
298
|
+
final_text = ""
|
|
299
|
+
# process_stream always yields dicts now
|
|
300
|
+
for res in process_stream():
|
|
301
|
+
if isinstance(res, dict) and "text" in res:
|
|
302
|
+
final_text = res["text"] # Keep updating with the latest aggregated text
|
|
303
|
+
|
|
304
|
+
# last_response and history are updated within process_stream
|
|
305
|
+
# Return the final aggregated response dict or raw text
|
|
306
|
+
return final_text if raw else self.last_response
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def chat(
|
|
310
|
+
self,
|
|
311
|
+
prompt: str,
|
|
312
|
+
stream: bool = False,
|
|
313
|
+
optimizer: str = None,
|
|
314
|
+
conversationally: bool = False,
|
|
315
|
+
voice: bool = None,
|
|
316
|
+
voice_name: str = None,
|
|
317
|
+
output_file: str = None
|
|
318
|
+
) -> str:
|
|
319
|
+
"""
|
|
320
|
+
Generates a response based on the provided prompt.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
prompt (str): The prompt to send
|
|
324
|
+
stream (bool): Whether to stream the response
|
|
325
|
+
optimizer (str): Prompt optimizer to use
|
|
326
|
+
conversationally (bool): Use conversation context
|
|
327
|
+
voice (bool): Override default voice setting
|
|
328
|
+
voice_name (str): Override default voice name
|
|
329
|
+
output_file (str): Override default output file path
|
|
330
|
+
"""
|
|
331
|
+
# Use instance defaults if not specified
|
|
332
|
+
voice = self.voice_enabled if voice is None else voice
|
|
333
|
+
voice_name = self.voice_name if voice_name is None else voice_name
|
|
334
|
+
output_file = self.output_file if output_file is None else output_file
|
|
335
|
+
|
|
336
|
+
if voice and voice_name and voice_name not in self.AVAILABLE_VOICES:
|
|
337
|
+
raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self.AVAILABLE_VOICES.keys())}")
|
|
338
|
+
|
|
339
|
+
if stream:
|
|
340
|
+
def stream_generator():
|
|
341
|
+
# ask() yields dicts or raw JSON objects when streaming
|
|
342
|
+
gen = self.ask(
|
|
343
|
+
prompt,
|
|
344
|
+
stream=True,
|
|
345
|
+
raw=False, # Ensure ask yields dicts for get_message
|
|
346
|
+
optimizer=optimizer,
|
|
347
|
+
conversationally=conversationally,
|
|
348
|
+
voice=voice,
|
|
349
|
+
voice_name=voice_name,
|
|
350
|
+
output_file=output_file
|
|
351
|
+
)
|
|
352
|
+
for response_dict in gen:
|
|
353
|
+
# get_message expects dict
|
|
354
|
+
yield self.get_message(response_dict)
|
|
355
|
+
return stream_generator()
|
|
356
|
+
else:
|
|
357
|
+
# ask() returns dict or raw text when not streaming
|
|
358
|
+
response_data = self.ask(
|
|
359
|
+
prompt,
|
|
360
|
+
stream=False,
|
|
361
|
+
raw=False, # Ensure ask returns dict for get_message
|
|
362
|
+
optimizer=optimizer,
|
|
363
|
+
conversationally=conversationally,
|
|
364
|
+
voice=voice,
|
|
365
|
+
voice_name=voice_name,
|
|
366
|
+
output_file=output_file
|
|
367
|
+
)
|
|
368
|
+
# get_message expects dict
|
|
369
|
+
return self.get_message(response_data)
|
|
370
|
+
|
|
371
|
+
def get_message(self, response: dict) -> str:
|
|
372
|
+
"""Retrieves message only from response"""
|
|
373
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
374
|
+
return response["text"]
|
|
375
|
+
|
|
376
|
+
def download_audio_threaded(self, voice_name: str, second_sid: str, output_file: str) -> None:
|
|
377
|
+
"""Downloads audio in a separate thread."""
|
|
378
|
+
params = {
|
|
379
|
+
'mode': 'eager',
|
|
380
|
+
'voice': f'voice{self.AVAILABLE_VOICES[voice_name]}',
|
|
381
|
+
'messageSid': second_sid,
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
try:
|
|
385
|
+
# Use curl_cffi session get with impersonate
|
|
386
|
+
audio_response = self.session.get(
|
|
387
|
+
'https://pi.ai/api/chat/voice',
|
|
388
|
+
params=params,
|
|
389
|
+
# cookies are handled by the session
|
|
390
|
+
# headers are set on the session
|
|
391
|
+
timeout=self.timeout,
|
|
392
|
+
# proxies are set on the session
|
|
393
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
394
|
+
)
|
|
395
|
+
audio_response.raise_for_status() # Check for HTTP errors
|
|
396
|
+
|
|
397
|
+
with open(output_file, "wb") as file:
|
|
398
|
+
file.write(audio_response.content)
|
|
399
|
+
|
|
400
|
+
except CurlError: # Catch CurlError
|
|
401
|
+
# Optionally log the error
|
|
402
|
+
pass
|
|
403
|
+
except Exception: # Catch other potential exceptions
|
|
404
|
+
# Optionally log the error
|
|
405
|
+
pass
|
|
406
|
+
|
|
407
|
+
if __name__ == '__main__':
|
|
408
|
+
# Ensure curl_cffi is installed
|
|
409
|
+
from rich import print
|
|
410
|
+
try: # Add try-except block for testing
|
|
411
|
+
ai = PiAI(timeout=60)
|
|
412
|
+
print("[bold blue]Testing Chat (Stream):[/bold blue]")
|
|
413
|
+
response = ai.chat(input(">>> "), stream=True)
|
|
414
|
+
full_response = ""
|
|
415
|
+
for chunk in response:
|
|
416
|
+
print(chunk, end="", flush=True)
|
|
417
|
+
full_response += chunk
|
|
418
|
+
print("\n[bold green]Stream Test Complete.[/bold green]")
|
|
419
|
+
|
|
420
|
+
# Optional: Test non-stream
|
|
421
|
+
# print("\n[bold blue]Testing Chat (Non-Stream):[/bold blue]")
|
|
422
|
+
# response_non_stream = ai.chat("Hello again", stream=False)
|
|
423
|
+
# print(response_non_stream)
|
|
424
|
+
# print("[bold green]Non-Stream Test Complete.[/bold green]")
|
|
425
|
+
|
|
426
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
427
|
+
print(f"\n[bold red]API Error:[/bold red] {e}")
|
|
428
|
+
except Exception as e:
|
|
429
|
+
print(f"\n[bold red]An unexpected error occurred:[/bold red] {e}")
|