webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -58
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +33 -11
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -314
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -315
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -342
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -207
- webscout/Provider/Deepinfra.py +370 -369
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -387
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -162
- webscout/Provider/GeminiProxy.py +167 -166
- webscout/Provider/GithubChat.py +371 -370
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -379
- webscout/Provider/Jadve.py +282 -297
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +206 -384
- webscout/Provider/LambdaChat.py +423 -425
- webscout/Provider/Nemotron.py +244 -245
- webscout/Provider/Netwrck.py +248 -247
- webscout/Provider/OLLAMA.py +395 -394
- webscout/Provider/OPENAI/Cloudflare.py +394 -395
- webscout/Provider/OPENAI/FalconH1.py +452 -457
- webscout/Provider/OPENAI/FreeGemini.py +297 -299
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
- webscout/Provider/OPENAI/NEMOTRON.py +241 -244
- webscout/Provider/OPENAI/PI.py +428 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +466 -467
- webscout/Provider/OPENAI/__init__.py +33 -59
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +249 -269
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +593 -588
- webscout/Provider/OPENAI/chatgptclone.py +521 -524
- webscout/Provider/OPENAI/chatsandbox.py +202 -177
- webscout/Provider/OPENAI/deepinfra.py +319 -315
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -311
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -354
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -529
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -348
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +244 -496
- webscout/Provider/PI.py +405 -404
- webscout/Provider/Perplexitylabs.py +430 -431
- webscout/Provider/QwenLM.py +272 -254
- webscout/Provider/STT/__init__.py +32 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -12
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -10
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -236
- webscout/Provider/TextPollinationsAI.py +311 -318
- webscout/Provider/TogetherAI.py +356 -357
- webscout/Provider/TwoAI.py +313 -569
- webscout/Provider/TypliAI.py +312 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
- webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +251 -250
- webscout/Provider/VercelAI.py +256 -255
- webscout/Provider/WiseCat.py +232 -231
- webscout/Provider/WrDoChat.py +367 -366
- webscout/Provider/__init__.py +33 -86
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -334
- webscout/Provider/cerebras.py +446 -340
- webscout/Provider/chatglm.py +394 -214
- webscout/Provider/cleeai.py +211 -212
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -265
- webscout/Provider/julius.py +223 -222
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +384 -457
- webscout/Provider/searchchat.py +292 -291
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -364
- webscout/Provider/turboseek.py +274 -265
- webscout/Provider/typefully.py +208 -207
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +372 -371
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/Qwen3.py +0 -304
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- webscout/tempid.py +0 -128
- webscout-8.3.6.dist-info/RECORD +0 -327
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
webscout/Provider/OPENAI/PI.py
CHANGED
|
@@ -1,427 +1,428 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
from curl_cffi import CurlError
|
|
3
|
-
import json
|
|
4
|
-
import time
|
|
5
|
-
import uuid
|
|
6
|
-
import re
|
|
7
|
-
import threading
|
|
8
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
|
-
from uuid import uuid4
|
|
10
|
-
|
|
11
|
-
# Import base classes and utility structures
|
|
12
|
-
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
13
|
-
from webscout.Provider.OPENAI.utils import (
|
|
14
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
15
|
-
ChatCompletionMessage, CompletionUsage
|
|
16
|
-
)
|
|
17
|
-
|
|
18
|
-
# Attempt to import LitAgent, fallback if not available
|
|
19
|
-
try:
|
|
20
|
-
from webscout.litagent import LitAgent
|
|
21
|
-
except ImportError:
|
|
22
|
-
pass
|
|
23
|
-
|
|
24
|
-
# --- PI.ai Client ---
|
|
25
|
-
|
|
26
|
-
class Completions(BaseCompletions):
|
|
27
|
-
def __init__(self, client: 'PiAI'):
|
|
28
|
-
self._client = client
|
|
29
|
-
|
|
30
|
-
def create(
|
|
31
|
-
self,
|
|
32
|
-
*,
|
|
33
|
-
model: str,
|
|
34
|
-
messages: List[Dict[str, str]],
|
|
35
|
-
max_tokens: Optional[int] = 2048,
|
|
36
|
-
stream: bool = False,
|
|
37
|
-
temperature: Optional[float] = None,
|
|
38
|
-
top_p: Optional[float] = None,
|
|
39
|
-
timeout: Optional[int] = None,
|
|
40
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
41
|
-
voice: bool = False,
|
|
42
|
-
voice_name: str = "voice3",
|
|
43
|
-
output_file: str = "PiAI.mp3",
|
|
44
|
-
**kwargs: Any
|
|
45
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
46
|
-
"""
|
|
47
|
-
Creates a model response for the given chat conversation.
|
|
48
|
-
Mimics openai.chat.completions.create with Pi.ai specific features.
|
|
49
|
-
"""
|
|
50
|
-
# Validate voice settings
|
|
51
|
-
if voice and voice_name not in self._client.AVAILABLE_VOICES:
|
|
52
|
-
raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self._client.AVAILABLE_VOICES.keys())}")
|
|
53
|
-
|
|
54
|
-
# Use format_prompt from utils.py to convert OpenAI messages format to Pi.ai prompt
|
|
55
|
-
from webscout.Provider.OPENAI.utils import format_prompt, count_tokens
|
|
56
|
-
prompt = format_prompt(messages, do_continue=True, add_special_tokens=True)
|
|
57
|
-
|
|
58
|
-
# Ensure conversation is started
|
|
59
|
-
if not self._client.conversation_id:
|
|
60
|
-
self._client.start_conversation()
|
|
61
|
-
|
|
62
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
63
|
-
created_time = int(time.time())
|
|
64
|
-
|
|
65
|
-
# Use count_tokens for prompt token counting
|
|
66
|
-
prompt_tokens = count_tokens(prompt)
|
|
67
|
-
|
|
68
|
-
if stream:
|
|
69
|
-
return self._create_stream(
|
|
70
|
-
request_id, created_time, model, prompt,
|
|
71
|
-
timeout, proxies, voice, voice_name, output_file, prompt_tokens
|
|
72
|
-
)
|
|
73
|
-
else:
|
|
74
|
-
return self._create_non_stream(
|
|
75
|
-
request_id, created_time, model, prompt,
|
|
76
|
-
timeout, proxies, voice, voice_name, output_file, prompt_tokens
|
|
77
|
-
)
|
|
78
|
-
|
|
79
|
-
def _create_stream(
|
|
80
|
-
self, request_id: str, created_time: int, model: str, prompt: str,
|
|
81
|
-
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None,
|
|
82
|
-
voice: bool = False, voice_name: str = "voice3", output_file: str = "PiAI.mp3",
|
|
83
|
-
prompt_tokens: Optional[int] = None
|
|
84
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
85
|
-
|
|
86
|
-
data = {
|
|
87
|
-
'text': prompt,
|
|
88
|
-
'conversation': self._client.conversation_id
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
try:
|
|
92
|
-
# Try primary URL first
|
|
93
|
-
current_url = self._client.primary_url
|
|
94
|
-
response = self._client.session.post(
|
|
95
|
-
current_url,
|
|
96
|
-
json=data,
|
|
97
|
-
stream=True,
|
|
98
|
-
timeout=timeout or self._client.timeout,
|
|
99
|
-
impersonate="chrome110"
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
# If primary URL fails, try fallback URL
|
|
103
|
-
if not response.ok and current_url == self._client.primary_url:
|
|
104
|
-
current_url = self._client.fallback_url
|
|
105
|
-
response = self._client.session.post(
|
|
106
|
-
current_url,
|
|
107
|
-
json=data,
|
|
108
|
-
stream=True,
|
|
109
|
-
timeout=timeout or self._client.timeout,
|
|
110
|
-
impersonate="chrome110"
|
|
111
|
-
)
|
|
112
|
-
|
|
113
|
-
response.raise_for_status()
|
|
114
|
-
|
|
115
|
-
# Track token usage across chunks
|
|
116
|
-
# prompt_tokens = len(prompt.split()) if prompt else 0
|
|
117
|
-
completion_tokens = 0
|
|
118
|
-
total_tokens = prompt_tokens
|
|
119
|
-
|
|
120
|
-
sids = []
|
|
121
|
-
streaming_text = ""
|
|
122
|
-
full_raw_data_for_sids = ""
|
|
123
|
-
|
|
124
|
-
# Process streaming response
|
|
125
|
-
for line_bytes in response.iter_lines():
|
|
126
|
-
if line_bytes:
|
|
127
|
-
line = line_bytes.decode('utf-8')
|
|
128
|
-
full_raw_data_for_sids += line + "\n"
|
|
129
|
-
|
|
130
|
-
if line.startswith("data: "):
|
|
131
|
-
json_line_str = line[6:]
|
|
132
|
-
try:
|
|
133
|
-
chunk_data = json.loads(json_line_str)
|
|
134
|
-
content = chunk_data.get('text', '')
|
|
135
|
-
|
|
136
|
-
if content:
|
|
137
|
-
# Calculate incremental content
|
|
138
|
-
new_content = content[len(streaming_text):] if len(content) > len(streaming_text) else content
|
|
139
|
-
streaming_text = content
|
|
140
|
-
completion_tokens += len(new_content.split()) if new_content else 0
|
|
141
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
142
|
-
|
|
143
|
-
# Create OpenAI-compatible chunk
|
|
144
|
-
delta = ChoiceDelta(
|
|
145
|
-
content=new_content,
|
|
146
|
-
role="assistant"
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
choice = Choice(
|
|
150
|
-
index=0,
|
|
151
|
-
delta=delta,
|
|
152
|
-
finish_reason=None
|
|
153
|
-
)
|
|
154
|
-
|
|
155
|
-
chunk = ChatCompletionChunk(
|
|
156
|
-
id=request_id,
|
|
157
|
-
choices=[choice],
|
|
158
|
-
created=created_time,
|
|
159
|
-
model=model
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
yield chunk
|
|
163
|
-
|
|
164
|
-
except (json.JSONDecodeError, KeyError):
|
|
165
|
-
continue
|
|
166
|
-
|
|
167
|
-
# Send final chunk with finish_reason
|
|
168
|
-
final_choice = Choice(
|
|
169
|
-
index=0,
|
|
170
|
-
delta=ChoiceDelta(),
|
|
171
|
-
finish_reason="stop"
|
|
172
|
-
)
|
|
173
|
-
|
|
174
|
-
final_chunk = ChatCompletionChunk(
|
|
175
|
-
id=request_id,
|
|
176
|
-
choices=[final_choice],
|
|
177
|
-
created=created_time,
|
|
178
|
-
model=model
|
|
179
|
-
)
|
|
180
|
-
|
|
181
|
-
yield final_chunk
|
|
182
|
-
|
|
183
|
-
# Handle voice generation
|
|
184
|
-
if voice and voice_name:
|
|
185
|
-
sids = re.findall(r'"sid":"(.*?)"', full_raw_data_for_sids)
|
|
186
|
-
second_sid = sids[1] if len(sids) >= 2 else None
|
|
187
|
-
if second_sid:
|
|
188
|
-
threading.Thread(
|
|
189
|
-
target=self._client.download_audio_threaded,
|
|
190
|
-
args=(voice_name, second_sid, output_file)
|
|
191
|
-
).start()
|
|
192
|
-
|
|
193
|
-
except CurlError as e:
|
|
194
|
-
raise IOError(f"PI.ai request failed (CurlError): {e}") from e
|
|
195
|
-
except Exception as e:
|
|
196
|
-
raise IOError(f"PI.ai request failed: {e}") from e
|
|
197
|
-
|
|
198
|
-
def _create_non_stream(
|
|
199
|
-
self, request_id: str, created_time: int, model: str, prompt: str,
|
|
200
|
-
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None,
|
|
201
|
-
voice: bool = False, voice_name: str = "voice3", output_file: str = "PiAI.mp3",
|
|
202
|
-
prompt_tokens: Optional[int] = None
|
|
203
|
-
) -> ChatCompletion:
|
|
204
|
-
|
|
205
|
-
# Collect streaming response into a single response
|
|
206
|
-
full_content = ""
|
|
207
|
-
completion_tokens = 0
|
|
208
|
-
# prompt_tokens = len(prompt.split()) if prompt else 0 # replaced
|
|
209
|
-
|
|
210
|
-
# Use provided prompt_tokens if available
|
|
211
|
-
if prompt_tokens is None:
|
|
212
|
-
from webscout.Provider.OPENAI.utils import count_tokens
|
|
213
|
-
prompt_tokens = count_tokens(prompt)
|
|
214
|
-
|
|
215
|
-
for chunk in self._create_stream(
|
|
216
|
-
request_id, created_time, model, prompt,
|
|
217
|
-
timeout, proxies, voice, voice_name, output_file, prompt_tokens
|
|
218
|
-
):
|
|
219
|
-
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
|
|
220
|
-
full_content += chunk.choices[0].delta.content
|
|
221
|
-
completion_tokens += len(chunk.choices[0].delta.content.split())
|
|
222
|
-
|
|
223
|
-
# Create final completion response
|
|
224
|
-
message = ChatCompletionMessage(
|
|
225
|
-
role="assistant",
|
|
226
|
-
content=full_content
|
|
227
|
-
)
|
|
228
|
-
|
|
229
|
-
choice = Choice(
|
|
230
|
-
index=0,
|
|
231
|
-
message=message,
|
|
232
|
-
finish_reason="stop"
|
|
233
|
-
)
|
|
234
|
-
|
|
235
|
-
usage = CompletionUsage(
|
|
236
|
-
prompt_tokens=prompt_tokens,
|
|
237
|
-
completion_tokens=completion_tokens,
|
|
238
|
-
total_tokens=prompt_tokens + completion_tokens
|
|
239
|
-
)
|
|
240
|
-
|
|
241
|
-
completion = ChatCompletion(
|
|
242
|
-
id=request_id,
|
|
243
|
-
choices=[choice],
|
|
244
|
-
created=created_time,
|
|
245
|
-
model=model,
|
|
246
|
-
usage=usage
|
|
247
|
-
)
|
|
248
|
-
|
|
249
|
-
return completion
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
class Chat(BaseChat):
|
|
253
|
-
def __init__(self, client: 'PiAI'):
|
|
254
|
-
self.completions = Completions(client)
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
class PiAI(OpenAICompatibleProvider):
|
|
258
|
-
"""
|
|
259
|
-
PiAI provider following OpenAI-compatible interface.
|
|
260
|
-
|
|
261
|
-
Supports Pi.ai specific features like voice generation and conversation management.
|
|
262
|
-
"""
|
|
263
|
-
|
|
264
|
-
AVAILABLE_MODELS = ["inflection_3_pi"]
|
|
265
|
-
AVAILABLE_VOICES: Dict[str, int] = {
|
|
266
|
-
"voice1": 1,
|
|
267
|
-
"voice2": 2,
|
|
268
|
-
"voice3": 3,
|
|
269
|
-
"voice4": 4,
|
|
270
|
-
"voice5": 5,
|
|
271
|
-
"voice6": 6,
|
|
272
|
-
"voice7": 7,
|
|
273
|
-
"voice8": 8
|
|
274
|
-
}
|
|
275
|
-
|
|
276
|
-
def __init__(
|
|
277
|
-
self,
|
|
278
|
-
api_key: Optional[str] = None,
|
|
279
|
-
timeout: int = 30,
|
|
280
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
281
|
-
**kwargs: Any
|
|
282
|
-
):
|
|
283
|
-
"""
|
|
284
|
-
Initialize PI.ai provider.
|
|
285
|
-
|
|
286
|
-
Args:
|
|
287
|
-
api_key: Not used for Pi.ai but kept for compatibility
|
|
288
|
-
timeout: Request timeout in seconds
|
|
289
|
-
proxies: Proxy configuration
|
|
290
|
-
**kwargs: Additional arguments
|
|
291
|
-
"""
|
|
292
|
-
|
|
293
|
-
self.
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
'Accept': '
|
|
305
|
-
'
|
|
306
|
-
'
|
|
307
|
-
'
|
|
308
|
-
'
|
|
309
|
-
'
|
|
310
|
-
'
|
|
311
|
-
'Sec-Fetch-
|
|
312
|
-
'
|
|
313
|
-
'
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
'
|
|
368
|
-
'
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
print(
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
import re
|
|
7
|
+
import threading
|
|
8
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
|
+
from uuid import uuid4
|
|
10
|
+
|
|
11
|
+
# Import base classes and utility structures
|
|
12
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
13
|
+
from webscout.Provider.OPENAI.utils import (
|
|
14
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
15
|
+
ChatCompletionMessage, CompletionUsage
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
# Attempt to import LitAgent, fallback if not available
|
|
19
|
+
try:
|
|
20
|
+
from webscout.litagent import LitAgent
|
|
21
|
+
except ImportError:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
# --- PI.ai Client ---
|
|
25
|
+
|
|
26
|
+
class Completions(BaseCompletions):
|
|
27
|
+
def __init__(self, client: 'PiAI'):
|
|
28
|
+
self._client = client
|
|
29
|
+
|
|
30
|
+
def create(
|
|
31
|
+
self,
|
|
32
|
+
*,
|
|
33
|
+
model: str,
|
|
34
|
+
messages: List[Dict[str, str]],
|
|
35
|
+
max_tokens: Optional[int] = 2048,
|
|
36
|
+
stream: bool = False,
|
|
37
|
+
temperature: Optional[float] = None,
|
|
38
|
+
top_p: Optional[float] = None,
|
|
39
|
+
timeout: Optional[int] = None,
|
|
40
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
41
|
+
voice: bool = False,
|
|
42
|
+
voice_name: str = "voice3",
|
|
43
|
+
output_file: str = "PiAI.mp3",
|
|
44
|
+
**kwargs: Any
|
|
45
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
46
|
+
"""
|
|
47
|
+
Creates a model response for the given chat conversation.
|
|
48
|
+
Mimics openai.chat.completions.create with Pi.ai specific features.
|
|
49
|
+
"""
|
|
50
|
+
# Validate voice settings
|
|
51
|
+
if voice and voice_name not in self._client.AVAILABLE_VOICES:
|
|
52
|
+
raise ValueError(f"Voice '{voice_name}' not available. Choose from: {list(self._client.AVAILABLE_VOICES.keys())}")
|
|
53
|
+
|
|
54
|
+
# Use format_prompt from utils.py to convert OpenAI messages format to Pi.ai prompt
|
|
55
|
+
from webscout.Provider.OPENAI.utils import format_prompt, count_tokens
|
|
56
|
+
prompt = format_prompt(messages, do_continue=True, add_special_tokens=True)
|
|
57
|
+
|
|
58
|
+
# Ensure conversation is started
|
|
59
|
+
if not self._client.conversation_id:
|
|
60
|
+
self._client.start_conversation()
|
|
61
|
+
|
|
62
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
63
|
+
created_time = int(time.time())
|
|
64
|
+
|
|
65
|
+
# Use count_tokens for prompt token counting
|
|
66
|
+
prompt_tokens = count_tokens(prompt)
|
|
67
|
+
|
|
68
|
+
if stream:
|
|
69
|
+
return self._create_stream(
|
|
70
|
+
request_id, created_time, model, prompt,
|
|
71
|
+
timeout, proxies, voice, voice_name, output_file, prompt_tokens
|
|
72
|
+
)
|
|
73
|
+
else:
|
|
74
|
+
return self._create_non_stream(
|
|
75
|
+
request_id, created_time, model, prompt,
|
|
76
|
+
timeout, proxies, voice, voice_name, output_file, prompt_tokens
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
def _create_stream(
|
|
80
|
+
self, request_id: str, created_time: int, model: str, prompt: str,
|
|
81
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None,
|
|
82
|
+
voice: bool = False, voice_name: str = "voice3", output_file: str = "PiAI.mp3",
|
|
83
|
+
prompt_tokens: Optional[int] = None
|
|
84
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
85
|
+
|
|
86
|
+
data = {
|
|
87
|
+
'text': prompt,
|
|
88
|
+
'conversation': self._client.conversation_id
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
try:
|
|
92
|
+
# Try primary URL first
|
|
93
|
+
current_url = self._client.primary_url
|
|
94
|
+
response = self._client.session.post(
|
|
95
|
+
current_url,
|
|
96
|
+
json=data,
|
|
97
|
+
stream=True,
|
|
98
|
+
timeout=timeout or self._client.timeout,
|
|
99
|
+
impersonate="chrome110"
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
# If primary URL fails, try fallback URL
|
|
103
|
+
if not response.ok and current_url == self._client.primary_url:
|
|
104
|
+
current_url = self._client.fallback_url
|
|
105
|
+
response = self._client.session.post(
|
|
106
|
+
current_url,
|
|
107
|
+
json=data,
|
|
108
|
+
stream=True,
|
|
109
|
+
timeout=timeout or self._client.timeout,
|
|
110
|
+
impersonate="chrome110"
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
response.raise_for_status()
|
|
114
|
+
|
|
115
|
+
# Track token usage across chunks
|
|
116
|
+
# prompt_tokens = len(prompt.split()) if prompt else 0
|
|
117
|
+
completion_tokens = 0
|
|
118
|
+
total_tokens = prompt_tokens
|
|
119
|
+
|
|
120
|
+
sids = []
|
|
121
|
+
streaming_text = ""
|
|
122
|
+
full_raw_data_for_sids = ""
|
|
123
|
+
|
|
124
|
+
# Process streaming response
|
|
125
|
+
for line_bytes in response.iter_lines():
|
|
126
|
+
if line_bytes:
|
|
127
|
+
line = line_bytes.decode('utf-8')
|
|
128
|
+
full_raw_data_for_sids += line + "\n"
|
|
129
|
+
|
|
130
|
+
if line.startswith("data: "):
|
|
131
|
+
json_line_str = line[6:]
|
|
132
|
+
try:
|
|
133
|
+
chunk_data = json.loads(json_line_str)
|
|
134
|
+
content = chunk_data.get('text', '')
|
|
135
|
+
|
|
136
|
+
if content:
|
|
137
|
+
# Calculate incremental content
|
|
138
|
+
new_content = content[len(streaming_text):] if len(content) > len(streaming_text) else content
|
|
139
|
+
streaming_text = content
|
|
140
|
+
completion_tokens += len(new_content.split()) if new_content else 0
|
|
141
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
142
|
+
|
|
143
|
+
# Create OpenAI-compatible chunk
|
|
144
|
+
delta = ChoiceDelta(
|
|
145
|
+
content=new_content,
|
|
146
|
+
role="assistant"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
choice = Choice(
|
|
150
|
+
index=0,
|
|
151
|
+
delta=delta,
|
|
152
|
+
finish_reason=None
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
chunk = ChatCompletionChunk(
|
|
156
|
+
id=request_id,
|
|
157
|
+
choices=[choice],
|
|
158
|
+
created=created_time,
|
|
159
|
+
model=model
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
yield chunk
|
|
163
|
+
|
|
164
|
+
except (json.JSONDecodeError, KeyError):
|
|
165
|
+
continue
|
|
166
|
+
|
|
167
|
+
# Send final chunk with finish_reason
|
|
168
|
+
final_choice = Choice(
|
|
169
|
+
index=0,
|
|
170
|
+
delta=ChoiceDelta(),
|
|
171
|
+
finish_reason="stop"
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
final_chunk = ChatCompletionChunk(
|
|
175
|
+
id=request_id,
|
|
176
|
+
choices=[final_choice],
|
|
177
|
+
created=created_time,
|
|
178
|
+
model=model
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
yield final_chunk
|
|
182
|
+
|
|
183
|
+
# Handle voice generation
|
|
184
|
+
if voice and voice_name:
|
|
185
|
+
sids = re.findall(r'"sid":"(.*?)"', full_raw_data_for_sids)
|
|
186
|
+
second_sid = sids[1] if len(sids) >= 2 else None
|
|
187
|
+
if second_sid:
|
|
188
|
+
threading.Thread(
|
|
189
|
+
target=self._client.download_audio_threaded,
|
|
190
|
+
args=(voice_name, second_sid, output_file)
|
|
191
|
+
).start()
|
|
192
|
+
|
|
193
|
+
except CurlError as e:
|
|
194
|
+
raise IOError(f"PI.ai request failed (CurlError): {e}") from e
|
|
195
|
+
except Exception as e:
|
|
196
|
+
raise IOError(f"PI.ai request failed: {e}") from e
|
|
197
|
+
|
|
198
|
+
def _create_non_stream(
|
|
199
|
+
self, request_id: str, created_time: int, model: str, prompt: str,
|
|
200
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None,
|
|
201
|
+
voice: bool = False, voice_name: str = "voice3", output_file: str = "PiAI.mp3",
|
|
202
|
+
prompt_tokens: Optional[int] = None
|
|
203
|
+
) -> ChatCompletion:
|
|
204
|
+
|
|
205
|
+
# Collect streaming response into a single response
|
|
206
|
+
full_content = ""
|
|
207
|
+
completion_tokens = 0
|
|
208
|
+
# prompt_tokens = len(prompt.split()) if prompt else 0 # replaced
|
|
209
|
+
|
|
210
|
+
# Use provided prompt_tokens if available
|
|
211
|
+
if prompt_tokens is None:
|
|
212
|
+
from webscout.Provider.OPENAI.utils import count_tokens
|
|
213
|
+
prompt_tokens = count_tokens(prompt)
|
|
214
|
+
|
|
215
|
+
for chunk in self._create_stream(
|
|
216
|
+
request_id, created_time, model, prompt,
|
|
217
|
+
timeout, proxies, voice, voice_name, output_file, prompt_tokens
|
|
218
|
+
):
|
|
219
|
+
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
|
|
220
|
+
full_content += chunk.choices[0].delta.content
|
|
221
|
+
completion_tokens += len(chunk.choices[0].delta.content.split())
|
|
222
|
+
|
|
223
|
+
# Create final completion response
|
|
224
|
+
message = ChatCompletionMessage(
|
|
225
|
+
role="assistant",
|
|
226
|
+
content=full_content
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
choice = Choice(
|
|
230
|
+
index=0,
|
|
231
|
+
message=message,
|
|
232
|
+
finish_reason="stop"
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
usage = CompletionUsage(
|
|
236
|
+
prompt_tokens=prompt_tokens,
|
|
237
|
+
completion_tokens=completion_tokens,
|
|
238
|
+
total_tokens=prompt_tokens + completion_tokens
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
completion = ChatCompletion(
|
|
242
|
+
id=request_id,
|
|
243
|
+
choices=[choice],
|
|
244
|
+
created=created_time,
|
|
245
|
+
model=model,
|
|
246
|
+
usage=usage
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
return completion
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class Chat(BaseChat):
|
|
253
|
+
def __init__(self, client: 'PiAI'):
|
|
254
|
+
self.completions = Completions(client)
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
class PiAI(OpenAICompatibleProvider):
|
|
258
|
+
"""
|
|
259
|
+
PiAI provider following OpenAI-compatible interface.
|
|
260
|
+
|
|
261
|
+
Supports Pi.ai specific features like voice generation and conversation management.
|
|
262
|
+
"""
|
|
263
|
+
|
|
264
|
+
AVAILABLE_MODELS = ["inflection_3_pi"]
|
|
265
|
+
AVAILABLE_VOICES: Dict[str, int] = {
|
|
266
|
+
"voice1": 1,
|
|
267
|
+
"voice2": 2,
|
|
268
|
+
"voice3": 3,
|
|
269
|
+
"voice4": 4,
|
|
270
|
+
"voice5": 5,
|
|
271
|
+
"voice6": 6,
|
|
272
|
+
"voice7": 7,
|
|
273
|
+
"voice8": 8
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
def __init__(
|
|
277
|
+
self,
|
|
278
|
+
api_key: Optional[str] = None,
|
|
279
|
+
timeout: int = 30,
|
|
280
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
281
|
+
**kwargs: Any
|
|
282
|
+
):
|
|
283
|
+
"""
|
|
284
|
+
Initialize PI.ai provider.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
api_key: Not used for Pi.ai but kept for compatibility
|
|
288
|
+
timeout: Request timeout in seconds
|
|
289
|
+
proxies: Proxy configuration
|
|
290
|
+
**kwargs: Additional arguments
|
|
291
|
+
"""
|
|
292
|
+
super().__init__(proxies=proxies)
|
|
293
|
+
self.timeout = timeout
|
|
294
|
+
self.conversation_id = None
|
|
295
|
+
|
|
296
|
+
# Setup URLs
|
|
297
|
+
self.primary_url = 'https://pi.ai/api/chat'
|
|
298
|
+
self.fallback_url = 'https://pi.ai/api/v2/chat'
|
|
299
|
+
|
|
300
|
+
# Setup headers
|
|
301
|
+
self.headers = {
|
|
302
|
+
'Accept': 'text/event-stream',
|
|
303
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
304
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
305
|
+
'Content-Type': 'application/json',
|
|
306
|
+
'DNT': '1',
|
|
307
|
+
'Origin': 'https://pi.ai',
|
|
308
|
+
'Referer': 'https://pi.ai/talk',
|
|
309
|
+
'Sec-Fetch-Dest': 'empty',
|
|
310
|
+
'Sec-Fetch-Mode': 'cors',
|
|
311
|
+
'Sec-Fetch-Site': 'same-origin',
|
|
312
|
+
'User-Agent': LitAgent().random() if 'LitAgent' in globals() else 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
|
|
313
|
+
'X-Api-Version': '3'
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
# Setup cookies
|
|
317
|
+
self.cookies = {
|
|
318
|
+
'__cf_bm': uuid4().hex
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
# Replace the base session with curl_cffi Session
|
|
322
|
+
self.session = Session()
|
|
323
|
+
|
|
324
|
+
# Configure session
|
|
325
|
+
self.session.headers.update(self.headers)
|
|
326
|
+
if proxies:
|
|
327
|
+
self.session.proxies = proxies
|
|
328
|
+
|
|
329
|
+
# Set cookies on the session
|
|
330
|
+
for name, value in self.cookies.items():
|
|
331
|
+
self.session.cookies.set(name, value)
|
|
332
|
+
|
|
333
|
+
# Initialize chat interface
|
|
334
|
+
self.chat = Chat(self)
|
|
335
|
+
|
|
336
|
+
# Start conversation
|
|
337
|
+
self.start_conversation()
|
|
338
|
+
|
|
339
|
+
def start_conversation(self) -> str:
|
|
340
|
+
"""
|
|
341
|
+
Initializes a new conversation and returns the conversation ID.
|
|
342
|
+
"""
|
|
343
|
+
try:
|
|
344
|
+
response = self.session.post(
|
|
345
|
+
"https://pi.ai/api/chat/start",
|
|
346
|
+
json={},
|
|
347
|
+
timeout=self.timeout,
|
|
348
|
+
impersonate="chrome110"
|
|
349
|
+
)
|
|
350
|
+
response.raise_for_status()
|
|
351
|
+
|
|
352
|
+
data = response.json()
|
|
353
|
+
if 'conversations' in data and data['conversations'] and 'sid' in data['conversations'][0]:
|
|
354
|
+
self.conversation_id = data['conversations'][0]['sid']
|
|
355
|
+
return self.conversation_id
|
|
356
|
+
else:
|
|
357
|
+
raise IOError(f"Unexpected response structure from start API: {data}")
|
|
358
|
+
|
|
359
|
+
except CurlError as e:
|
|
360
|
+
raise IOError(f"Failed to start conversation (CurlError): {e}") from e
|
|
361
|
+
except Exception as e:
|
|
362
|
+
raise IOError(f"Failed to start conversation: {e}") from e
|
|
363
|
+
|
|
364
|
+
def download_audio_threaded(self, voice_name: str, second_sid: str, output_file: str) -> None:
|
|
365
|
+
"""Downloads audio in a separate thread."""
|
|
366
|
+
params = {
|
|
367
|
+
'mode': 'eager',
|
|
368
|
+
'voice': f'voice{self.AVAILABLE_VOICES[voice_name]}',
|
|
369
|
+
'messageSid': second_sid,
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
try:
|
|
373
|
+
audio_response = self.session.get(
|
|
374
|
+
'https://pi.ai/api/chat/voice',
|
|
375
|
+
params=params,
|
|
376
|
+
timeout=self.timeout,
|
|
377
|
+
impersonate="chrome110"
|
|
378
|
+
)
|
|
379
|
+
audio_response.raise_for_status()
|
|
380
|
+
|
|
381
|
+
with open(output_file, "wb") as file:
|
|
382
|
+
file.write(audio_response.content)
|
|
383
|
+
|
|
384
|
+
except (CurlError, Exception):
|
|
385
|
+
# Optionally log the error
|
|
386
|
+
pass
|
|
387
|
+
|
|
388
|
+
@property
|
|
389
|
+
def models(self):
|
|
390
|
+
"""Return available models in OpenAI-compatible format."""
|
|
391
|
+
class _ModelList:
|
|
392
|
+
def list(inner_self):
|
|
393
|
+
return PiAI.AVAILABLE_MODELS
|
|
394
|
+
return _ModelList()
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
# Example usage
|
|
398
|
+
if __name__ == "__main__":
|
|
399
|
+
# Test the OpenAI-compatible interface
|
|
400
|
+
client = PiAI()
|
|
401
|
+
|
|
402
|
+
# Test streaming
|
|
403
|
+
print("Testing streaming response:")
|
|
404
|
+
response = client.chat.completions.create(
|
|
405
|
+
model="inflection_3_pi",
|
|
406
|
+
messages=[
|
|
407
|
+
{"role": "user", "content": "Hello! Say 'Hi' in one word."}
|
|
408
|
+
],
|
|
409
|
+
stream=True
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
for chunk in response:
|
|
413
|
+
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
|
|
414
|
+
print(chunk.choices[0].delta.content, end="", flush=True)
|
|
415
|
+
print()
|
|
416
|
+
|
|
417
|
+
# Test non-streaming
|
|
418
|
+
print("\nTesting non-streaming response:")
|
|
419
|
+
response = client.chat.completions.create(
|
|
420
|
+
model="inflection_3_pi",
|
|
421
|
+
messages=[
|
|
422
|
+
{"role": "user", "content": "Tell me a short joke."}
|
|
423
|
+
],
|
|
424
|
+
stream=False
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
print(response.choices[0].message.content)
|
|
428
|
+
print(f"Usage: {response.usage}")
|