webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -58
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +33 -11
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -314
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -315
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -342
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -207
- webscout/Provider/Deepinfra.py +370 -369
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -387
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -162
- webscout/Provider/GeminiProxy.py +167 -166
- webscout/Provider/GithubChat.py +371 -370
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -379
- webscout/Provider/Jadve.py +282 -297
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +206 -384
- webscout/Provider/LambdaChat.py +423 -425
- webscout/Provider/Nemotron.py +244 -245
- webscout/Provider/Netwrck.py +248 -247
- webscout/Provider/OLLAMA.py +395 -394
- webscout/Provider/OPENAI/Cloudflare.py +394 -395
- webscout/Provider/OPENAI/FalconH1.py +452 -457
- webscout/Provider/OPENAI/FreeGemini.py +297 -299
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
- webscout/Provider/OPENAI/NEMOTRON.py +241 -244
- webscout/Provider/OPENAI/PI.py +428 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +466 -467
- webscout/Provider/OPENAI/__init__.py +33 -59
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +249 -269
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +593 -588
- webscout/Provider/OPENAI/chatgptclone.py +521 -524
- webscout/Provider/OPENAI/chatsandbox.py +202 -177
- webscout/Provider/OPENAI/deepinfra.py +319 -315
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -311
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -354
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -529
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -348
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +244 -496
- webscout/Provider/PI.py +405 -404
- webscout/Provider/Perplexitylabs.py +430 -431
- webscout/Provider/QwenLM.py +272 -254
- webscout/Provider/STT/__init__.py +32 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -12
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -10
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -236
- webscout/Provider/TextPollinationsAI.py +311 -318
- webscout/Provider/TogetherAI.py +356 -357
- webscout/Provider/TwoAI.py +313 -569
- webscout/Provider/TypliAI.py +312 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
- webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +251 -250
- webscout/Provider/VercelAI.py +256 -255
- webscout/Provider/WiseCat.py +232 -231
- webscout/Provider/WrDoChat.py +367 -366
- webscout/Provider/__init__.py +33 -86
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -334
- webscout/Provider/cerebras.py +446 -340
- webscout/Provider/chatglm.py +394 -214
- webscout/Provider/cleeai.py +211 -212
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -265
- webscout/Provider/julius.py +223 -222
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +384 -457
- webscout/Provider/searchchat.py +292 -291
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -364
- webscout/Provider/turboseek.py +274 -265
- webscout/Provider/typefully.py +208 -207
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +372 -371
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/Qwen3.py +0 -304
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- webscout/tempid.py +0 -128
- webscout-8.3.6.dist-info/RECORD +0 -327
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,309 +1,309 @@
|
|
|
1
|
-
from curl_cffi.requests import Session
|
|
2
|
-
import uuid
|
|
3
|
-
import re
|
|
4
|
-
from typing import Any, Dict, Generator, Optional, Union
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import exceptions
|
|
10
|
-
from webscout.litagent import LitAgent
|
|
11
|
-
|
|
12
|
-
class StandardInputAI(Provider):
|
|
13
|
-
"""
|
|
14
|
-
A class to interact with the Standard Input chat API.
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
AVAILABLE_MODELS = {
|
|
18
|
-
"standard-quick": "quick",
|
|
19
|
-
"standard-reasoning": "quick", # Same model but with reasoning enabled
|
|
20
|
-
}
|
|
21
|
-
|
|
22
|
-
def __init__(
|
|
23
|
-
self,
|
|
24
|
-
is_conversation: bool = True,
|
|
25
|
-
max_tokens: int = 2049,
|
|
26
|
-
timeout: int = 30,
|
|
27
|
-
intro: str = None,
|
|
28
|
-
filepath: str = None,
|
|
29
|
-
update_file: bool = True,
|
|
30
|
-
proxies: dict = {},
|
|
31
|
-
history_offset: int = 10250,
|
|
32
|
-
act: str = None,
|
|
33
|
-
model: str = "standard-quick",
|
|
34
|
-
chat_id: str = None,
|
|
35
|
-
user_id: str = None,
|
|
36
|
-
browser: str = "chrome",
|
|
37
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
38
|
-
enable_reasoning: bool = False,
|
|
39
|
-
):
|
|
40
|
-
"""
|
|
41
|
-
Initializes the Standard Input API client.
|
|
42
|
-
|
|
43
|
-
Args:
|
|
44
|
-
is_conversation (bool): Whether to maintain conversation history.
|
|
45
|
-
max_tokens (int): Maximum number of tokens to generate.
|
|
46
|
-
timeout (int): Request timeout in seconds.
|
|
47
|
-
intro (str): Introduction text for the conversation.
|
|
48
|
-
filepath (str): Path to save conversation history.
|
|
49
|
-
update_file (bool): Whether to update the conversation history file.
|
|
50
|
-
proxies (dict): Proxy configuration for requests.
|
|
51
|
-
history_offset (int): Maximum history length in characters.
|
|
52
|
-
act (str): Persona for the AI to adopt.
|
|
53
|
-
model (str): Model to use, must be one of AVAILABLE_MODELS.
|
|
54
|
-
chat_id (str): Unique identifier for the chat session.
|
|
55
|
-
user_id (str): Unique identifier for the user.
|
|
56
|
-
browser (str): Browser to emulate in requests.
|
|
57
|
-
system_prompt (str): System prompt for the AI.
|
|
58
|
-
enable_reasoning (bool): Whether to enable reasoning feature.
|
|
59
|
-
"""
|
|
60
|
-
if model not in self.AVAILABLE_MODELS:
|
|
61
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
62
|
-
|
|
63
|
-
self.url = "https://chat.standard-input.com/api/chat"
|
|
64
|
-
|
|
65
|
-
# Initialize LitAgent for user agent generation
|
|
66
|
-
self.agent = LitAgent()
|
|
67
|
-
# Use fingerprinting to create a consistent browser identity
|
|
68
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
69
|
-
self.system_prompt = system_prompt
|
|
70
|
-
|
|
71
|
-
# Use the fingerprint for headers
|
|
72
|
-
self.headers = {
|
|
73
|
-
"accept": "*/*",
|
|
74
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
75
|
-
"accept-language": self.fingerprint["accept_language"],
|
|
76
|
-
"content-type": "application/json",
|
|
77
|
-
"dnt": "1",
|
|
78
|
-
"origin": "https://chat.standard-input.com",
|
|
79
|
-
"referer": "https://chat.standard-input.com/",
|
|
80
|
-
"sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
81
|
-
"sec-ch-ua-mobile": "?0",
|
|
82
|
-
"sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
|
|
83
|
-
"sec-fetch-dest": "empty",
|
|
84
|
-
"sec-fetch-mode": "cors",
|
|
85
|
-
"sec-fetch-site": "same-origin",
|
|
86
|
-
"sec-gpc": "1",
|
|
87
|
-
"user-agent": self.fingerprint["user_agent"],
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
# Default cookies - these should be updated for production use
|
|
91
|
-
self.cookies = {
|
|
92
|
-
"auth-chat": '''%7B%22user%22%3A%7B%22id%22%3A%2243a26ebd-7691-4a5a-8321-12aff017af86%22%2C%22email%22%3A%22iu511inmev%40illubd.com%22%2C%22accountId%22%3A%22057d78c9-06db-48eb-aeaa-0efdbaeb9446%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiNDNhMjZlYmQtNzY5MS00YTVhLTgzMzEtMTJhZmYwMTdhZjg2IiwiZW1haWwiOiJpdTUxMWlubWV2QGlsbHViZC5jb20iLCJhY2NvdW50SWQiOiIwNTdkNzhjOS0wNmRiLTQ4ZWItYWVhYS0wZWZkYmFlYjk0NDYiLCJwcm92aWRlciI6InBhc3N3b3JkIn0sImF1ZCI6InN0YW5kYXJkLWlucHV0LWlvcyIsImlzcyI6Imh0dHBzOi8vYXV0aC5zdGFuZGFyZC1pbnB1dC5jb20iLCJzdWIiOiJ1c2VyOjRmYWMzMTllZjA4MDRiZmMiLCJleHAiOjE3NDU0MDU5MDN9.d3VsEq-UCNsQWkiPlTVw7caS0wTXfCYe6yeFLeb4Ce6ZYTIFFn685SF-aKvLOxaYaq7Pyk4D2qr24riPVhxUWQ%22%2C%22refresh%22%3A%22user%3A4fac319ef0804bfc%3A3a757177-5507-4a36-9356-492f5ed06105%22%7D%7D''',
|
|
93
|
-
"auth": '''%7B%22user%22%3A%7B%22id%22%3A%22c51e291f-8f44-439d-a38b-9ea147581a13%22%2C%22email%22%3A%22r6cigexlsb%40mrotzis.com%22%2C%22accountId%22%3A%22599fd4ce-04a2-40f6-a78f-d33d0059b77f%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiYzUxZTI5MWYtOGY0NC00MzlkLWEzOGItOWVhMTQ3NTgxYTEzIiwiZW1haWwiOiJyNmNpZ2V4bHNiQG1yb3R6aXMuY29tIiwiYWNjb3VudElkIjoiNTk5ZmQ0Y2UtMDRhMi00MGY2LWE3OGYtZDMzZDAwNTliNzdmIiwicHJvdmlkZXIiOiJwYXNzd29yZCJ9LCJhdWQiOiJzdGFuZGFyZC1pbnB1dC1pb3MiLCJpc3MiOiJodHRwczovL2F1dGguc3RhbmRhcmQtaW5wdXQuY29tIiwic3ViIjoidXNlcjo4Y2FmMjRkYzUxNDc4MmNkIiwiZXhwIjoxNzQ2NzI0MTU3fQ.a3970nBJkd8JoU-khRA2JlRMuYeJ7378QS4ZL446kOkDi35uTwuC4qGrWH9efk9GkFaVcWPtYeOJjRb7f2SeJA%22%2C%22refresh%22%3A%22user%3A8caf24dc514782cd%3A14e24386-8443-4df0-ae25-234ad59218ef%22%7D%7D''',
|
|
94
|
-
"sidebar:state": "true",
|
|
95
|
-
"ph_phc_f3wUUyCfmKlKtkc2pfT7OsdcW2mBEVGN2A87yEYbG3c_posthog": '''%7B%22distinct_id%22%3A%220195c7cc-ac8f-79ff-b901-e14a78fc2a67%22%2C%22%24sesid%22%3A%5B1744688627860%2C%220196377f-9f12-77e6-a9ea-0e9669423803%22%2C1744687832850%5D%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fstandard-input.com%2F%22%7D%7D'''
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
self.session = Session() # Use curl_cffi Session
|
|
99
|
-
self.session.headers.update(self.headers)
|
|
100
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
101
|
-
|
|
102
|
-
self.is_conversation = is_conversation
|
|
103
|
-
self.max_tokens_to_sample = max_tokens
|
|
104
|
-
self.timeout = timeout
|
|
105
|
-
self.last_response = {}
|
|
106
|
-
self.model = model
|
|
107
|
-
self.chat_id = chat_id or str(uuid.uuid4())
|
|
108
|
-
self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
|
|
109
|
-
self.enable_reasoning = enable_reasoning
|
|
110
|
-
|
|
111
|
-
self.__available_optimizers = (
|
|
112
|
-
method
|
|
113
|
-
for method in dir(Optimizers)
|
|
114
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
115
|
-
)
|
|
116
|
-
Conversation.intro = (
|
|
117
|
-
AwesomePrompts().get_act(
|
|
118
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
119
|
-
)
|
|
120
|
-
if act
|
|
121
|
-
else intro or Conversation.intro
|
|
122
|
-
)
|
|
123
|
-
|
|
124
|
-
self.conversation = Conversation(
|
|
125
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
126
|
-
)
|
|
127
|
-
self.conversation.history_offset = history_offset
|
|
128
|
-
|
|
129
|
-
def refresh_identity(self, browser: str = None):
|
|
130
|
-
"""
|
|
131
|
-
Refreshes the browser identity fingerprint.
|
|
132
|
-
|
|
133
|
-
Args:
|
|
134
|
-
browser: Specific browser to use for the new fingerprint
|
|
135
|
-
"""
|
|
136
|
-
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
137
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
138
|
-
|
|
139
|
-
# Update headers with new fingerprint
|
|
140
|
-
self.headers.update({
|
|
141
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
142
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["sec-ch-ua"],
|
|
143
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
144
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
145
|
-
})
|
|
146
|
-
|
|
147
|
-
# Update session headers
|
|
148
|
-
for header, value in self.headers.items():
|
|
149
|
-
self.session.headers[header] = value
|
|
150
|
-
|
|
151
|
-
return self.fingerprint
|
|
152
|
-
|
|
153
|
-
@staticmethod
|
|
154
|
-
def _standardinput_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
155
|
-
"""Extracts content from the StandardInput stream format '0:"..."'."""
|
|
156
|
-
if isinstance(chunk, str):
|
|
157
|
-
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
158
|
-
if match:
|
|
159
|
-
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
160
|
-
content = match.group(1).encode().decode('unicode_escape')
|
|
161
|
-
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
162
|
-
return None
|
|
163
|
-
|
|
164
|
-
def ask(
|
|
165
|
-
self,
|
|
166
|
-
prompt: str,
|
|
167
|
-
optimizer: str = None,
|
|
168
|
-
conversationally: bool = False,
|
|
169
|
-
raw: bool = False, # Added raw parameter
|
|
170
|
-
) -> Union[Dict[str, Any], Generator[str, None, None]]:
|
|
171
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
172
|
-
if optimizer:
|
|
173
|
-
if optimizer in self.__available_optimizers:
|
|
174
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
175
|
-
conversation_prompt if conversationally else prompt
|
|
176
|
-
)
|
|
177
|
-
else:
|
|
178
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
179
|
-
|
|
180
|
-
# Prepare the messages
|
|
181
|
-
messages = [
|
|
182
|
-
{"role": "system", "content": self.system_prompt},
|
|
183
|
-
{"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
|
|
184
|
-
]
|
|
185
|
-
|
|
186
|
-
# Prepare the request payload
|
|
187
|
-
payload = {
|
|
188
|
-
"id": self.chat_id,
|
|
189
|
-
"messages": messages,
|
|
190
|
-
"modelId": self.AVAILABLE_MODELS[self.model],
|
|
191
|
-
"enabledFeatures": ["reasoning"] if self.enable_reasoning or self.model == "standard-reasoning" else []
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
try:
|
|
195
|
-
# Use curl_cffi post with impersonate
|
|
196
|
-
response = self.session.post(
|
|
197
|
-
self.url,
|
|
198
|
-
cookies=self.cookies,
|
|
199
|
-
json=payload,
|
|
200
|
-
stream=True,
|
|
201
|
-
timeout=self.timeout,
|
|
202
|
-
impersonate="chrome120" # Add impersonate
|
|
203
|
-
)
|
|
204
|
-
|
|
205
|
-
if response.status_code != 200:
|
|
206
|
-
try:
|
|
207
|
-
error_content = response.text
|
|
208
|
-
except:
|
|
209
|
-
error_content = "<could not read response content>"
|
|
210
|
-
|
|
211
|
-
if response.status_code in [403, 429]:
|
|
212
|
-
self.refresh_identity()
|
|
213
|
-
response = self.session.post(
|
|
214
|
-
self.url, cookies=self.cookies, json=payload, stream=True,
|
|
215
|
-
timeout=self.timeout, impersonate="chrome120"
|
|
216
|
-
)
|
|
217
|
-
if not response.ok:
|
|
218
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
219
|
-
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
|
|
220
|
-
)
|
|
221
|
-
else:
|
|
222
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
223
|
-
f"Request failed with status code {response.status_code}. Response: {error_content}"
|
|
224
|
-
)
|
|
225
|
-
|
|
226
|
-
full_response = ""
|
|
227
|
-
# Use sanitize_stream
|
|
228
|
-
processed_stream = sanitize_stream(
|
|
229
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
230
|
-
intro_value=None, # No simple prefix
|
|
231
|
-
to_json=False, # Content is not JSON
|
|
232
|
-
content_extractor=self._standardinput_extractor # Use the specific extractor
|
|
233
|
-
)
|
|
234
|
-
for content_chunk in processed_stream:
|
|
235
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
236
|
-
full_response += content_chunk
|
|
237
|
-
if raw:
|
|
238
|
-
yield content_chunk
|
|
239
|
-
self.last_response = {"text": full_response}
|
|
240
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
241
|
-
if raw:
|
|
242
|
-
return full_response
|
|
243
|
-
return {"text": full_response}
|
|
244
|
-
except Exception as e:
|
|
245
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
246
|
-
|
|
247
|
-
def chat(
|
|
248
|
-
self,
|
|
249
|
-
prompt: str,
|
|
250
|
-
optimizer: str = None,
|
|
251
|
-
conversationally: bool = False,
|
|
252
|
-
raw: bool = False, # Added raw parameter
|
|
253
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
254
|
-
def for_stream():
|
|
255
|
-
gen = self.ask(
|
|
256
|
-
prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
|
|
257
|
-
)
|
|
258
|
-
if hasattr(gen, '__iter__') and not isinstance(gen, dict):
|
|
259
|
-
for chunk in gen:
|
|
260
|
-
if raw:
|
|
261
|
-
yield chunk
|
|
262
|
-
else:
|
|
263
|
-
yield self.get_message({"text": chunk})
|
|
264
|
-
else:
|
|
265
|
-
if raw:
|
|
266
|
-
yield gen if isinstance(gen, str) else self.get_message(gen)
|
|
267
|
-
else:
|
|
268
|
-
yield self.get_message(gen)
|
|
269
|
-
def for_non_stream():
|
|
270
|
-
result = self.ask(
|
|
271
|
-
prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
|
|
272
|
-
)
|
|
273
|
-
if raw:
|
|
274
|
-
return result if isinstance(result, str) else self.get_message(result)
|
|
275
|
-
else:
|
|
276
|
-
return self.get_message(result)
|
|
277
|
-
return for_stream() if raw else for_non_stream()
|
|
278
|
-
|
|
279
|
-
if __name__ == "__main__":
|
|
280
|
-
print("-" * 100)
|
|
281
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
282
|
-
print("-" * 100)
|
|
283
|
-
|
|
284
|
-
test_prompt = "Say 'Hello' in one word"
|
|
285
|
-
|
|
286
|
-
# Test each model
|
|
287
|
-
for model in StandardInputAI.AVAILABLE_MODELS:
|
|
288
|
-
print(f"\rTesting {model}...", end="")
|
|
289
|
-
|
|
290
|
-
try:
|
|
291
|
-
test_ai = StandardInputAI(model=model, timeout=120) # Increased timeout
|
|
292
|
-
response = test_ai.chat(test_prompt)
|
|
293
|
-
|
|
294
|
-
if response and len(response.strip()) > 0:
|
|
295
|
-
status = "✓"
|
|
296
|
-
# Clean and truncate response
|
|
297
|
-
clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
298
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
299
|
-
else:
|
|
300
|
-
status = "✗"
|
|
301
|
-
display_text = "Empty or invalid response"
|
|
302
|
-
|
|
303
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
304
|
-
except Exception as e:
|
|
305
|
-
error_msg = str(e)
|
|
306
|
-
# Truncate very long error messages
|
|
307
|
-
if len(error_msg) > 100:
|
|
308
|
-
error_msg = error_msg[:97] + "..."
|
|
309
|
-
print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
import uuid
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Dict, Generator, Optional, Union
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
|
+
|
|
12
|
+
class StandardInputAI(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the Standard Input chat API.
|
|
15
|
+
"""
|
|
16
|
+
required_auth = False
|
|
17
|
+
AVAILABLE_MODELS = {
|
|
18
|
+
"standard-quick": "quick",
|
|
19
|
+
"standard-reasoning": "quick", # Same model but with reasoning enabled
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
is_conversation: bool = True,
|
|
25
|
+
max_tokens: int = 2049,
|
|
26
|
+
timeout: int = 30,
|
|
27
|
+
intro: str = None,
|
|
28
|
+
filepath: str = None,
|
|
29
|
+
update_file: bool = True,
|
|
30
|
+
proxies: dict = {},
|
|
31
|
+
history_offset: int = 10250,
|
|
32
|
+
act: str = None,
|
|
33
|
+
model: str = "standard-quick",
|
|
34
|
+
chat_id: str = None,
|
|
35
|
+
user_id: str = None,
|
|
36
|
+
browser: str = "chrome",
|
|
37
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
38
|
+
enable_reasoning: bool = False,
|
|
39
|
+
):
|
|
40
|
+
"""
|
|
41
|
+
Initializes the Standard Input API client.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
is_conversation (bool): Whether to maintain conversation history.
|
|
45
|
+
max_tokens (int): Maximum number of tokens to generate.
|
|
46
|
+
timeout (int): Request timeout in seconds.
|
|
47
|
+
intro (str): Introduction text for the conversation.
|
|
48
|
+
filepath (str): Path to save conversation history.
|
|
49
|
+
update_file (bool): Whether to update the conversation history file.
|
|
50
|
+
proxies (dict): Proxy configuration for requests.
|
|
51
|
+
history_offset (int): Maximum history length in characters.
|
|
52
|
+
act (str): Persona for the AI to adopt.
|
|
53
|
+
model (str): Model to use, must be one of AVAILABLE_MODELS.
|
|
54
|
+
chat_id (str): Unique identifier for the chat session.
|
|
55
|
+
user_id (str): Unique identifier for the user.
|
|
56
|
+
browser (str): Browser to emulate in requests.
|
|
57
|
+
system_prompt (str): System prompt for the AI.
|
|
58
|
+
enable_reasoning (bool): Whether to enable reasoning feature.
|
|
59
|
+
"""
|
|
60
|
+
if model not in self.AVAILABLE_MODELS:
|
|
61
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
62
|
+
|
|
63
|
+
self.url = "https://chat.standard-input.com/api/chat"
|
|
64
|
+
|
|
65
|
+
# Initialize LitAgent for user agent generation
|
|
66
|
+
self.agent = LitAgent()
|
|
67
|
+
# Use fingerprinting to create a consistent browser identity
|
|
68
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
69
|
+
self.system_prompt = system_prompt
|
|
70
|
+
|
|
71
|
+
# Use the fingerprint for headers
|
|
72
|
+
self.headers = {
|
|
73
|
+
"accept": "*/*",
|
|
74
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
75
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
76
|
+
"content-type": "application/json",
|
|
77
|
+
"dnt": "1",
|
|
78
|
+
"origin": "https://chat.standard-input.com",
|
|
79
|
+
"referer": "https://chat.standard-input.com/",
|
|
80
|
+
"sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
81
|
+
"sec-ch-ua-mobile": "?0",
|
|
82
|
+
"sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
|
|
83
|
+
"sec-fetch-dest": "empty",
|
|
84
|
+
"sec-fetch-mode": "cors",
|
|
85
|
+
"sec-fetch-site": "same-origin",
|
|
86
|
+
"sec-gpc": "1",
|
|
87
|
+
"user-agent": self.fingerprint["user_agent"],
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
# Default cookies - these should be updated for production use
|
|
91
|
+
self.cookies = {
|
|
92
|
+
"auth-chat": '''%7B%22user%22%3A%7B%22id%22%3A%2243a26ebd-7691-4a5a-8321-12aff017af86%22%2C%22email%22%3A%22iu511inmev%40illubd.com%22%2C%22accountId%22%3A%22057d78c9-06db-48eb-aeaa-0efdbaeb9446%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiNDNhMjZlYmQtNzY5MS00YTVhLTgzMzEtMTJhZmYwMTdhZjg2IiwiZW1haWwiOiJpdTUxMWlubWV2QGlsbHViZC5jb20iLCJhY2NvdW50SWQiOiIwNTdkNzhjOS0wNmRiLTQ4ZWItYWVhYS0wZWZkYmFlYjk0NDYiLCJwcm92aWRlciI6InBhc3N3b3JkIn0sImF1ZCI6InN0YW5kYXJkLWlucHV0LWlvcyIsImlzcyI6Imh0dHBzOi8vYXV0aC5zdGFuZGFyZC1pbnB1dC5jb20iLCJzdWIiOiJ1c2VyOjRmYWMzMTllZjA4MDRiZmMiLCJleHAiOjE3NDU0MDU5MDN9.d3VsEq-UCNsQWkiPlTVw7caS0wTXfCYe6yeFLeb4Ce6ZYTIFFn685SF-aKvLOxaYaq7Pyk4D2qr24riPVhxUWQ%22%2C%22refresh%22%3A%22user%3A4fac319ef0804bfc%3A3a757177-5507-4a36-9356-492f5ed06105%22%7D%7D''',
|
|
93
|
+
"auth": '''%7B%22user%22%3A%7B%22id%22%3A%22c51e291f-8f44-439d-a38b-9ea147581a13%22%2C%22email%22%3A%22r6cigexlsb%40mrotzis.com%22%2C%22accountId%22%3A%22599fd4ce-04a2-40f6-a78f-d33d0059b77f%22%2C%22provider%22%3A%22password%22%7D%2C%22tokens%22%3A%7B%22access%22%3A%22eyJhbGciOiJFUzI1NiIsImtpZCI6Ijg1NDhmZWY1LTk5MjYtNDk2Yi1hMjI2LTQ5OTExYjllYzU2NSIsInR5cCI6IkpXVCJ9.eyJtb2RlIjoiYWNjZXNzIiwidHlwZSI6InVzZXIiLCJwcm9wZXJ0aWVzIjp7ImlkIjoiYzUxZTI5MWYtOGY0NC00MzlkLWEzOGItOWVhMTQ3NTgxYTEzIiwiZW1haWwiOiJyNmNpZ2V4bHNiQG1yb3R6aXMuY29tIiwiYWNjb3VudElkIjoiNTk5ZmQ0Y2UtMDRhMi00MGY2LWE3OGYtZDMzZDAwNTliNzdmIiwicHJvdmlkZXIiOiJwYXNzd29yZCJ9LCJhdWQiOiJzdGFuZGFyZC1pbnB1dC1pb3MiLCJpc3MiOiJodHRwczovL2F1dGguc3RhbmRhcmQtaW5wdXQuY29tIiwic3ViIjoidXNlcjo4Y2FmMjRkYzUxNDc4MmNkIiwiZXhwIjoxNzQ2NzI0MTU3fQ.a3970nBJkd8JoU-khRA2JlRMuYeJ7378QS4ZL446kOkDi35uTwuC4qGrWH9efk9GkFaVcWPtYeOJjRb7f2SeJA%22%2C%22refresh%22%3A%22user%3A8caf24dc514782cd%3A14e24386-8443-4df0-ae25-234ad59218ef%22%7D%7D''',
|
|
94
|
+
"sidebar:state": "true",
|
|
95
|
+
"ph_phc_f3wUUyCfmKlKtkc2pfT7OsdcW2mBEVGN2A87yEYbG3c_posthog": '''%7B%22distinct_id%22%3A%220195c7cc-ac8f-79ff-b901-e14a78fc2a67%22%2C%22%24sesid%22%3A%5B1744688627860%2C%220196377f-9f12-77e6-a9ea-0e9669423803%22%2C1744687832850%5D%2C%22%24initial_person_info%22%3A%7B%22r%22%3A%22%24direct%22%2C%22u%22%3A%22https%3A%2F%2Fstandard-input.com%2F%22%7D%7D'''
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
self.session = Session() # Use curl_cffi Session
|
|
99
|
+
self.session.headers.update(self.headers)
|
|
100
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
101
|
+
|
|
102
|
+
self.is_conversation = is_conversation
|
|
103
|
+
self.max_tokens_to_sample = max_tokens
|
|
104
|
+
self.timeout = timeout
|
|
105
|
+
self.last_response = {}
|
|
106
|
+
self.model = model
|
|
107
|
+
self.chat_id = chat_id or str(uuid.uuid4())
|
|
108
|
+
self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
|
|
109
|
+
self.enable_reasoning = enable_reasoning
|
|
110
|
+
|
|
111
|
+
self.__available_optimizers = (
|
|
112
|
+
method
|
|
113
|
+
for method in dir(Optimizers)
|
|
114
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
115
|
+
)
|
|
116
|
+
Conversation.intro = (
|
|
117
|
+
AwesomePrompts().get_act(
|
|
118
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
119
|
+
)
|
|
120
|
+
if act
|
|
121
|
+
else intro or Conversation.intro
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
self.conversation = Conversation(
|
|
125
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
126
|
+
)
|
|
127
|
+
self.conversation.history_offset = history_offset
|
|
128
|
+
|
|
129
|
+
def refresh_identity(self, browser: str = None):
|
|
130
|
+
"""
|
|
131
|
+
Refreshes the browser identity fingerprint.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
browser: Specific browser to use for the new fingerprint
|
|
135
|
+
"""
|
|
136
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
137
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
138
|
+
|
|
139
|
+
# Update headers with new fingerprint
|
|
140
|
+
self.headers.update({
|
|
141
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
142
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["sec-ch-ua"],
|
|
143
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
144
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
145
|
+
})
|
|
146
|
+
|
|
147
|
+
# Update session headers
|
|
148
|
+
for header, value in self.headers.items():
|
|
149
|
+
self.session.headers[header] = value
|
|
150
|
+
|
|
151
|
+
return self.fingerprint
|
|
152
|
+
|
|
153
|
+
@staticmethod
|
|
154
|
+
def _standardinput_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
155
|
+
"""Extracts content from the StandardInput stream format '0:"..."'."""
|
|
156
|
+
if isinstance(chunk, str):
|
|
157
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
158
|
+
if match:
|
|
159
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
160
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
161
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
162
|
+
return None
|
|
163
|
+
|
|
164
|
+
def ask(
|
|
165
|
+
self,
|
|
166
|
+
prompt: str,
|
|
167
|
+
optimizer: str = None,
|
|
168
|
+
conversationally: bool = False,
|
|
169
|
+
raw: bool = False, # Added raw parameter
|
|
170
|
+
) -> Union[Dict[str, Any], Generator[str, None, None]]:
|
|
171
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
172
|
+
if optimizer:
|
|
173
|
+
if optimizer in self.__available_optimizers:
|
|
174
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
175
|
+
conversation_prompt if conversationally else prompt
|
|
176
|
+
)
|
|
177
|
+
else:
|
|
178
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
179
|
+
|
|
180
|
+
# Prepare the messages
|
|
181
|
+
messages = [
|
|
182
|
+
{"role": "system", "content": self.system_prompt},
|
|
183
|
+
{"role": "user", "content": conversation_prompt, "parts": [{"type": "text", "text": conversation_prompt}]}
|
|
184
|
+
]
|
|
185
|
+
|
|
186
|
+
# Prepare the request payload
|
|
187
|
+
payload = {
|
|
188
|
+
"id": self.chat_id,
|
|
189
|
+
"messages": messages,
|
|
190
|
+
"modelId": self.AVAILABLE_MODELS[self.model],
|
|
191
|
+
"enabledFeatures": ["reasoning"] if self.enable_reasoning or self.model == "standard-reasoning" else []
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
# Use curl_cffi post with impersonate
|
|
196
|
+
response = self.session.post(
|
|
197
|
+
self.url,
|
|
198
|
+
cookies=self.cookies,
|
|
199
|
+
json=payload,
|
|
200
|
+
stream=True,
|
|
201
|
+
timeout=self.timeout,
|
|
202
|
+
impersonate="chrome120" # Add impersonate
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
if response.status_code != 200:
|
|
206
|
+
try:
|
|
207
|
+
error_content = response.text
|
|
208
|
+
except:
|
|
209
|
+
error_content = "<could not read response content>"
|
|
210
|
+
|
|
211
|
+
if response.status_code in [403, 429]:
|
|
212
|
+
self.refresh_identity()
|
|
213
|
+
response = self.session.post(
|
|
214
|
+
self.url, cookies=self.cookies, json=payload, stream=True,
|
|
215
|
+
timeout=self.timeout, impersonate="chrome120"
|
|
216
|
+
)
|
|
217
|
+
if not response.ok:
|
|
218
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
219
|
+
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}"
|
|
220
|
+
)
|
|
221
|
+
else:
|
|
222
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
223
|
+
f"Request failed with status code {response.status_code}. Response: {error_content}"
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
full_response = ""
|
|
227
|
+
# Use sanitize_stream
|
|
228
|
+
processed_stream = sanitize_stream(
|
|
229
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
230
|
+
intro_value=None, # No simple prefix
|
|
231
|
+
to_json=False, # Content is not JSON
|
|
232
|
+
content_extractor=self._standardinput_extractor # Use the specific extractor
|
|
233
|
+
)
|
|
234
|
+
for content_chunk in processed_stream:
|
|
235
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
236
|
+
full_response += content_chunk
|
|
237
|
+
if raw:
|
|
238
|
+
yield content_chunk
|
|
239
|
+
self.last_response = {"text": full_response}
|
|
240
|
+
self.conversation.update_chat_history(prompt, full_response)
|
|
241
|
+
if raw:
|
|
242
|
+
return full_response
|
|
243
|
+
return {"text": full_response}
|
|
244
|
+
except Exception as e:
|
|
245
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
246
|
+
|
|
247
|
+
def chat(
|
|
248
|
+
self,
|
|
249
|
+
prompt: str,
|
|
250
|
+
optimizer: str = None,
|
|
251
|
+
conversationally: bool = False,
|
|
252
|
+
raw: bool = False, # Added raw parameter
|
|
253
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
254
|
+
def for_stream():
|
|
255
|
+
gen = self.ask(
|
|
256
|
+
prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
|
|
257
|
+
)
|
|
258
|
+
if hasattr(gen, '__iter__') and not isinstance(gen, dict):
|
|
259
|
+
for chunk in gen:
|
|
260
|
+
if raw:
|
|
261
|
+
yield chunk
|
|
262
|
+
else:
|
|
263
|
+
yield self.get_message({"text": chunk})
|
|
264
|
+
else:
|
|
265
|
+
if raw:
|
|
266
|
+
yield gen if isinstance(gen, str) else self.get_message(gen)
|
|
267
|
+
else:
|
|
268
|
+
yield self.get_message(gen)
|
|
269
|
+
def for_non_stream():
|
|
270
|
+
result = self.ask(
|
|
271
|
+
prompt, optimizer=optimizer, conversationally=conversationally, raw=raw
|
|
272
|
+
)
|
|
273
|
+
if raw:
|
|
274
|
+
return result if isinstance(result, str) else self.get_message(result)
|
|
275
|
+
else:
|
|
276
|
+
return self.get_message(result)
|
|
277
|
+
return for_stream() if raw else for_non_stream()
|
|
278
|
+
|
|
279
|
+
if __name__ == "__main__":
|
|
280
|
+
print("-" * 100)
|
|
281
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
282
|
+
print("-" * 100)
|
|
283
|
+
|
|
284
|
+
test_prompt = "Say 'Hello' in one word"
|
|
285
|
+
|
|
286
|
+
# Test each model
|
|
287
|
+
for model in StandardInputAI.AVAILABLE_MODELS:
|
|
288
|
+
print(f"\rTesting {model}...", end="")
|
|
289
|
+
|
|
290
|
+
try:
|
|
291
|
+
test_ai = StandardInputAI(model=model, timeout=120) # Increased timeout
|
|
292
|
+
response = test_ai.chat(test_prompt)
|
|
293
|
+
|
|
294
|
+
if response and len(response.strip()) > 0:
|
|
295
|
+
status = "✓"
|
|
296
|
+
# Clean and truncate response
|
|
297
|
+
clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
298
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
299
|
+
else:
|
|
300
|
+
status = "✗"
|
|
301
|
+
display_text = "Empty or invalid response"
|
|
302
|
+
|
|
303
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
304
|
+
except Exception as e:
|
|
305
|
+
error_msg = str(e)
|
|
306
|
+
# Truncate very long error messages
|
|
307
|
+
if len(error_msg) > 100:
|
|
308
|
+
error_msg = error_msg[:97] + "..."
|
|
309
|
+
print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
|