webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +33 -15
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +703 -250
- webscout/Bard.py +441 -323
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +15 -0
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +333 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +202 -0
- webscout/Provider/AISEARCH/genspark_search.py +324 -0
- webscout/Provider/AISEARCH/hika_search.py +186 -0
- webscout/Provider/AISEARCH/iask_search.py +410 -0
- webscout/Provider/AISEARCH/monica_search.py +220 -0
- webscout/Provider/AISEARCH/scira_search.py +298 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +791 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +369 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +375 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +283 -0
- webscout/Provider/OPENAI/NEMOTRON.py +232 -0
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -0
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +40 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +969 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +494 -0
- webscout/Provider/OPENAI/chatsandbox.py +173 -0
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +322 -0
- webscout/Provider/OPENAI/e2b.py +1414 -0
- webscout/Provider/OPENAI/exaai.py +417 -0
- webscout/Provider/OPENAI/exachat.py +444 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +326 -0
- webscout/Provider/OPENAI/groq.py +364 -0
- webscout/Provider/OPENAI/heckai.py +308 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +389 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +357 -0
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +477 -0
- webscout/Provider/OPENAI/sonus.py +304 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +364 -0
- webscout/Provider/OPENAI/uncovrAI.py +463 -0
- webscout/Provider/OPENAI/utils.py +318 -0
- webscout/Provider/OPENAI/venice.py +431 -0
- webscout/Provider/OPENAI/wisecat.py +387 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +365 -0
- webscout/Provider/OPENAI/yep.py +382 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/README.md +82 -0
- webscout/Provider/TTI/__init__.py +7 -0
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +10 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +475 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +174 -0
- webscout/Provider/ai4chat.py +174 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +224 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/client.py +70 -0
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +404 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +210 -0
- webscout/scout/core/scout.py +607 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +478 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
- webscout-8.2.9.dist-info/RECORD +289 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,357 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import uuid
|
|
6
|
+
import re
|
|
7
|
+
import urllib.parse
|
|
8
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
|
+
|
|
10
|
+
from webscout.Extra.tempmail import get_random_email
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
# Import base classes and utilities from OPENAI provider stack
|
|
14
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
15
|
+
from webscout.Provider.OPENAI.utils import (
|
|
16
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
17
|
+
ChatCompletionMessage, CompletionUsage
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# Attempt to import LitAgent for browser fingerprinting
|
|
21
|
+
try:
|
|
22
|
+
from webscout.litagent import LitAgent
|
|
23
|
+
except ImportError: # pragma: no cover - LitAgent optional
|
|
24
|
+
LitAgent = None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Completions(BaseCompletions):
|
|
28
|
+
"""TwoAI chat completions compatible with OpenAI format."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, client: 'TwoAI'):
|
|
31
|
+
self._client = client
|
|
32
|
+
|
|
33
|
+
def create(
|
|
34
|
+
self,
|
|
35
|
+
*,
|
|
36
|
+
model: str,
|
|
37
|
+
messages: List[Dict[str, Any]],
|
|
38
|
+
max_tokens: Optional[int] = 2049,
|
|
39
|
+
stream: bool = False,
|
|
40
|
+
temperature: Optional[float] = None,
|
|
41
|
+
top_p: Optional[float] = None,
|
|
42
|
+
**kwargs: Any
|
|
43
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
44
|
+
"""Create a chat completion using TwoAI."""
|
|
45
|
+
payload = {
|
|
46
|
+
"model": model,
|
|
47
|
+
"messages": messages,
|
|
48
|
+
"max_tokens": max_tokens,
|
|
49
|
+
"stream": stream,
|
|
50
|
+
}
|
|
51
|
+
if temperature is not None:
|
|
52
|
+
payload["temperature"] = temperature
|
|
53
|
+
if top_p is not None:
|
|
54
|
+
payload["top_p"] = top_p
|
|
55
|
+
|
|
56
|
+
payload.update(kwargs)
|
|
57
|
+
|
|
58
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
59
|
+
created_time = int(time.time())
|
|
60
|
+
|
|
61
|
+
if stream:
|
|
62
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
63
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
64
|
+
|
|
65
|
+
def _create_stream(
|
|
66
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
67
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
68
|
+
try:
|
|
69
|
+
response = self._client.session.post(
|
|
70
|
+
self._client.base_url,
|
|
71
|
+
headers=self._client.headers,
|
|
72
|
+
json=payload,
|
|
73
|
+
stream=True,
|
|
74
|
+
timeout=self._client.timeout,
|
|
75
|
+
)
|
|
76
|
+
response.raise_for_status()
|
|
77
|
+
|
|
78
|
+
prompt_tokens = 0
|
|
79
|
+
completion_tokens = 0
|
|
80
|
+
total_tokens = 0
|
|
81
|
+
|
|
82
|
+
for line in response.iter_lines():
|
|
83
|
+
if not line:
|
|
84
|
+
continue
|
|
85
|
+
decoded = line.decode("utf-8").strip()
|
|
86
|
+
if not decoded.startswith("data: "):
|
|
87
|
+
continue
|
|
88
|
+
json_str = decoded[6:]
|
|
89
|
+
if json_str == "[DONE]":
|
|
90
|
+
break
|
|
91
|
+
try:
|
|
92
|
+
data = json.loads(json_str)
|
|
93
|
+
except json.JSONDecodeError:
|
|
94
|
+
continue
|
|
95
|
+
|
|
96
|
+
choice_data = data.get("choices", [{}])[0]
|
|
97
|
+
delta_data = choice_data.get("delta", {})
|
|
98
|
+
finish_reason = choice_data.get("finish_reason")
|
|
99
|
+
|
|
100
|
+
usage_data = data.get("usage", {})
|
|
101
|
+
if usage_data:
|
|
102
|
+
prompt_tokens = usage_data.get("prompt_tokens", prompt_tokens)
|
|
103
|
+
completion_tokens = usage_data.get(
|
|
104
|
+
"completion_tokens", completion_tokens
|
|
105
|
+
)
|
|
106
|
+
total_tokens = usage_data.get("total_tokens", total_tokens)
|
|
107
|
+
|
|
108
|
+
delta = ChoiceDelta(
|
|
109
|
+
content=delta_data.get("content"),
|
|
110
|
+
role=delta_data.get("role"),
|
|
111
|
+
tool_calls=delta_data.get("tool_calls"),
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
choice = Choice(
|
|
115
|
+
index=choice_data.get("index", 0),
|
|
116
|
+
delta=delta,
|
|
117
|
+
finish_reason=finish_reason,
|
|
118
|
+
logprobs=choice_data.get("logprobs"),
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
chunk = ChatCompletionChunk(
|
|
122
|
+
id=request_id,
|
|
123
|
+
choices=[choice],
|
|
124
|
+
created=created_time,
|
|
125
|
+
model=model,
|
|
126
|
+
system_fingerprint=data.get("system_fingerprint"),
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
yield chunk
|
|
130
|
+
except Exception as e:
|
|
131
|
+
raise IOError(f"TwoAI request failed: {e}") from e
|
|
132
|
+
except Exception as e:
|
|
133
|
+
raise IOError(f"Error processing TwoAI stream: {e}") from e
|
|
134
|
+
|
|
135
|
+
def _create_non_stream(
|
|
136
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
137
|
+
) -> ChatCompletion:
|
|
138
|
+
try:
|
|
139
|
+
response = self._client.session.post(
|
|
140
|
+
self._client.base_url,
|
|
141
|
+
headers=self._client.headers,
|
|
142
|
+
json=payload,
|
|
143
|
+
timeout=self._client.timeout,
|
|
144
|
+
)
|
|
145
|
+
response.raise_for_status()
|
|
146
|
+
data = response.json()
|
|
147
|
+
|
|
148
|
+
choices_data = data.get("choices", [])
|
|
149
|
+
usage_data = data.get("usage", {})
|
|
150
|
+
|
|
151
|
+
choices = []
|
|
152
|
+
for choice_d in choices_data:
|
|
153
|
+
message_d = choice_d.get("message", {})
|
|
154
|
+
message = ChatCompletionMessage(
|
|
155
|
+
role=message_d.get("role", "assistant"),
|
|
156
|
+
content=message_d.get("content", ""),
|
|
157
|
+
tool_calls=message_d.get("tool_calls"),
|
|
158
|
+
)
|
|
159
|
+
choice = Choice(
|
|
160
|
+
index=choice_d.get("index", 0),
|
|
161
|
+
message=message,
|
|
162
|
+
finish_reason=choice_d.get("finish_reason", "stop"),
|
|
163
|
+
)
|
|
164
|
+
choices.append(choice)
|
|
165
|
+
|
|
166
|
+
usage = CompletionUsage(
|
|
167
|
+
prompt_tokens=usage_data.get("prompt_tokens", 0),
|
|
168
|
+
completion_tokens=usage_data.get("completion_tokens", 0),
|
|
169
|
+
total_tokens=usage_data.get("total_tokens", 0),
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
completion = ChatCompletion(
|
|
173
|
+
id=request_id,
|
|
174
|
+
choices=choices,
|
|
175
|
+
created=created_time,
|
|
176
|
+
model=data.get("model", model),
|
|
177
|
+
usage=usage,
|
|
178
|
+
)
|
|
179
|
+
return completion
|
|
180
|
+
except Exception as e:
|
|
181
|
+
raise IOError(f"TwoAI request failed: {e}") from e
|
|
182
|
+
except Exception as e:
|
|
183
|
+
raise IOError(f"Error processing TwoAI response: {e}") from e
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class Chat(BaseChat):
|
|
187
|
+
def __init__(self, client: 'TwoAI'):
|
|
188
|
+
self.completions = Completions(client)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
class TwoAI(OpenAICompatibleProvider):
|
|
192
|
+
"""OpenAI-compatible client for the TwoAI API."""
|
|
193
|
+
|
|
194
|
+
AVAILABLE_MODELS = ["sutra-v2", "sutra-r0"]
|
|
195
|
+
|
|
196
|
+
@staticmethod
|
|
197
|
+
def generate_api_key() -> str:
|
|
198
|
+
"""
|
|
199
|
+
Generate a new Two AI API key using a temporary email.
|
|
200
|
+
"""
|
|
201
|
+
email, provider = get_random_email("tempmailio")
|
|
202
|
+
loops_url = "https://app.loops.so/api/newsletter-form/cm7i4o92h057auy1o74cxbhxo"
|
|
203
|
+
|
|
204
|
+
session = Session()
|
|
205
|
+
session.headers.update({
|
|
206
|
+
'User-Agent': LitAgent().random(),
|
|
207
|
+
'Content-Type': 'application/x-www-form-urlencoded',
|
|
208
|
+
'Origin': 'https://www.two.ai',
|
|
209
|
+
'Referer': 'https://app.loops.so/',
|
|
210
|
+
})
|
|
211
|
+
|
|
212
|
+
form_data = {
|
|
213
|
+
'email': email,
|
|
214
|
+
'userGroup': 'Via Framer',
|
|
215
|
+
'mailingLists': 'cm8ay9cic00x70kjv0bd34k66'
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
encoded_data = urllib.parse.urlencode(form_data)
|
|
219
|
+
response = session.post(loops_url, data=encoded_data, impersonate="chrome120")
|
|
220
|
+
|
|
221
|
+
if response.status_code != 200:
|
|
222
|
+
raise RuntimeError(f"Failed to register for Two AI: {response.status_code} - {response.text}")
|
|
223
|
+
|
|
224
|
+
max_attempts = 15
|
|
225
|
+
attempt = 0
|
|
226
|
+
api_key = None
|
|
227
|
+
wait_time = 2
|
|
228
|
+
|
|
229
|
+
while attempt < max_attempts and not api_key:
|
|
230
|
+
messages = provider.get_messages()
|
|
231
|
+
for message in messages:
|
|
232
|
+
subject = message.get('subject', '')
|
|
233
|
+
sender = ''
|
|
234
|
+
if 'from' in message:
|
|
235
|
+
if isinstance(message['from'], dict):
|
|
236
|
+
sender = message['from'].get('address', '')
|
|
237
|
+
else:
|
|
238
|
+
sender = str(message['from'])
|
|
239
|
+
elif 'sender' in message:
|
|
240
|
+
if isinstance(message['sender'], dict):
|
|
241
|
+
sender = message['sender'].get('address', '')
|
|
242
|
+
else:
|
|
243
|
+
sender = str(message['sender'])
|
|
244
|
+
subject_match = any(keyword in subject.lower() for keyword in
|
|
245
|
+
['welcome', 'confirm', 'verify', 'api', 'key', 'sutra', 'two.ai', 'loops'])
|
|
246
|
+
sender_match = any(keyword in sender.lower() for keyword in
|
|
247
|
+
['two.ai', 'sutra', 'loops.so', 'loops', 'no-reply', 'noreply'])
|
|
248
|
+
is_confirmation = subject_match or sender_match
|
|
249
|
+
|
|
250
|
+
content = None
|
|
251
|
+
if 'body' in message:
|
|
252
|
+
content = message['body']
|
|
253
|
+
elif 'content' in message and 'text' in message['content']:
|
|
254
|
+
content = message['content']['text']
|
|
255
|
+
elif 'html' in message:
|
|
256
|
+
content = message['html']
|
|
257
|
+
elif 'text' in message:
|
|
258
|
+
content = message['text']
|
|
259
|
+
if not content:
|
|
260
|
+
continue
|
|
261
|
+
|
|
262
|
+
# Robust API key extraction with multiple regex patterns
|
|
263
|
+
patterns = [
|
|
264
|
+
r'sutra_[A-Za-z0-9]{60,70}',
|
|
265
|
+
r'sutra_[A-Za-z0-9]{30,}',
|
|
266
|
+
r'sutra_\S+',
|
|
267
|
+
]
|
|
268
|
+
api_key_match = None
|
|
269
|
+
for pat in patterns:
|
|
270
|
+
api_key_match = re.search(pat, content)
|
|
271
|
+
if api_key_match:
|
|
272
|
+
break
|
|
273
|
+
# Also try to extract from labeled section
|
|
274
|
+
if not api_key_match:
|
|
275
|
+
key_section_match = re.search(r'🔑 SUTRA API Key\s*([^\s]+)', content)
|
|
276
|
+
if key_section_match:
|
|
277
|
+
api_key_match = re.search(r'sutra_[A-Za-z0-9]+', key_section_match.group(1))
|
|
278
|
+
if api_key_match:
|
|
279
|
+
api_key = api_key_match.group(0)
|
|
280
|
+
break
|
|
281
|
+
if not api_key:
|
|
282
|
+
attempt += 1
|
|
283
|
+
time.sleep(wait_time)
|
|
284
|
+
if not api_key:
|
|
285
|
+
raise RuntimeError("Failed to get API key from confirmation email")
|
|
286
|
+
return api_key
|
|
287
|
+
|
|
288
|
+
def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
|
|
289
|
+
api_key = self.generate_api_key()
|
|
290
|
+
self.timeout = timeout
|
|
291
|
+
self.base_url = "https://api.two.ai/v2/chat/completions"
|
|
292
|
+
self.api_key = api_key
|
|
293
|
+
self.session = Session()
|
|
294
|
+
|
|
295
|
+
headers: Dict[str, str] = {
|
|
296
|
+
"Content-Type": "application/json",
|
|
297
|
+
"Authorization": f"Bearer {api_key}",
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
if LitAgent is not None:
|
|
301
|
+
try:
|
|
302
|
+
agent = LitAgent()
|
|
303
|
+
fingerprint = agent.generate_fingerprint(browser)
|
|
304
|
+
headers.update({
|
|
305
|
+
"Accept": fingerprint["accept"],
|
|
306
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
307
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
308
|
+
"Cache-Control": "no-cache",
|
|
309
|
+
"Connection": "keep-alive",
|
|
310
|
+
"Origin": "https://chat.two.ai",
|
|
311
|
+
"Pragma": "no-cache",
|
|
312
|
+
"Referer": "https://chat.two.ai/",
|
|
313
|
+
"Sec-Fetch-Dest": "empty",
|
|
314
|
+
"Sec-Fetch-Mode": "cors",
|
|
315
|
+
"Sec-Fetch-Site": "same-site",
|
|
316
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
317
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
318
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
319
|
+
"User-Agent": fingerprint["user_agent"],
|
|
320
|
+
})
|
|
321
|
+
except Exception:
|
|
322
|
+
# Fallback minimal headers if fingerprinting fails
|
|
323
|
+
headers.update({
|
|
324
|
+
"Accept": "application/json",
|
|
325
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
326
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
327
|
+
"User-Agent": "Mozilla/5.0",
|
|
328
|
+
})
|
|
329
|
+
else:
|
|
330
|
+
headers.update({
|
|
331
|
+
"Accept": "application/json",
|
|
332
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
333
|
+
"Accept-Language": "en-US,en;q=0.9",
|
|
334
|
+
"User-Agent": "Mozilla/5.0",
|
|
335
|
+
})
|
|
336
|
+
|
|
337
|
+
self.headers = headers
|
|
338
|
+
self.session.headers.update(headers)
|
|
339
|
+
self.chat = Chat(self)
|
|
340
|
+
|
|
341
|
+
@property
|
|
342
|
+
def models(self):
|
|
343
|
+
class _ModelList:
|
|
344
|
+
def list(inner_self):
|
|
345
|
+
return type(self).AVAILABLE_MODELS
|
|
346
|
+
return _ModelList()
|
|
347
|
+
|
|
348
|
+
if __name__ == "__main__":
|
|
349
|
+
from rich import print
|
|
350
|
+
two_ai = TwoAI()
|
|
351
|
+
resp = two_ai.chat.completions.create(
|
|
352
|
+
model="sutra-v2",
|
|
353
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
354
|
+
stream=True
|
|
355
|
+
)
|
|
356
|
+
for chunk in resp:
|
|
357
|
+
print(chunk, end="")
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# This file marks the directory as a Python package.
|
|
2
|
+
from .deepinfra import *
|
|
3
|
+
from .glider import *
|
|
4
|
+
from .chatgptclone import *
|
|
5
|
+
from .x0gpt import *
|
|
6
|
+
from .wisecat import *
|
|
7
|
+
from .venice import *
|
|
8
|
+
from .exaai import *
|
|
9
|
+
from .typegpt import *
|
|
10
|
+
from .scirachat import *
|
|
11
|
+
from .freeaichat import *
|
|
12
|
+
from .llmchatco import *
|
|
13
|
+
from .yep import * # Add YEPCHAT
|
|
14
|
+
from .heckai import *
|
|
15
|
+
from .sonus import *
|
|
16
|
+
from .exachat import *
|
|
17
|
+
from .netwrck import *
|
|
18
|
+
from .standardinput import *
|
|
19
|
+
from .writecream import *
|
|
20
|
+
from .toolbaz import *
|
|
21
|
+
from .uncovrAI import *
|
|
22
|
+
from .opkfc import *
|
|
23
|
+
from .chatgpt import *
|
|
24
|
+
from .textpollinations import *
|
|
25
|
+
from .typefully import * # Add TypefullyAI
|
|
26
|
+
from .e2b import *
|
|
27
|
+
from .multichat import * # Add MultiChatAI
|
|
28
|
+
from .ai4chat import * # Add AI4Chat
|
|
29
|
+
from .mcpcore import *
|
|
30
|
+
from .flowith import *
|
|
31
|
+
from .chatsandbox import *
|
|
32
|
+
from .c4ai import *
|
|
33
|
+
from .flowith import *
|
|
34
|
+
from .Cloudflare import *
|
|
35
|
+
from .NEMOTRON import *
|
|
36
|
+
from .BLACKBOXAI import *
|
|
37
|
+
from .copilot import * # Add Microsoft Copilot
|
|
38
|
+
from .TwoAI import *
|
|
39
|
+
from .oivscode import * # Add OnRender provider
|
|
40
|
+
from .Qwen3 import *
|
|
@@ -0,0 +1,293 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import urllib.parse
|
|
4
|
+
from curl_cffi.requests import Session, RequestsError
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# --- AI4Chat Client ---
|
|
15
|
+
|
|
16
|
+
class Completions(BaseCompletions):
|
|
17
|
+
def __init__(self, client: 'AI4Chat'):
|
|
18
|
+
self._client = client
|
|
19
|
+
|
|
20
|
+
def create(
|
|
21
|
+
self,
|
|
22
|
+
*,
|
|
23
|
+
model: str,
|
|
24
|
+
messages: List[Dict[str, str]],
|
|
25
|
+
max_tokens: Optional[int] = None,
|
|
26
|
+
stream: bool = False,
|
|
27
|
+
temperature: Optional[float] = None,
|
|
28
|
+
top_p: Optional[float] = None,
|
|
29
|
+
**kwargs: Any
|
|
30
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
31
|
+
"""
|
|
32
|
+
Creates a model response for the given chat conversation.
|
|
33
|
+
Mimics openai.chat.completions.create
|
|
34
|
+
"""
|
|
35
|
+
# Use the format_prompt utility to format the conversation
|
|
36
|
+
from .utils import format_prompt
|
|
37
|
+
|
|
38
|
+
# Format the messages into a single string
|
|
39
|
+
conversation_prompt = format_prompt(messages, add_special_tokens=True, include_system=True)
|
|
40
|
+
|
|
41
|
+
# Set up request parameters
|
|
42
|
+
country_param = kwargs.get("country", self._client.country)
|
|
43
|
+
user_id_param = kwargs.get("user_id", self._client.user_id)
|
|
44
|
+
|
|
45
|
+
# Generate request ID and timestamp
|
|
46
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
47
|
+
created_time = int(time.time())
|
|
48
|
+
|
|
49
|
+
# AI4Chat doesn't support streaming, so we'll simulate it if requested
|
|
50
|
+
if stream:
|
|
51
|
+
return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
52
|
+
else:
|
|
53
|
+
return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
54
|
+
|
|
55
|
+
def _create_stream(
|
|
56
|
+
self, request_id: str, created_time: int, model: str,
|
|
57
|
+
conversation_prompt: str, country: str, user_id: str
|
|
58
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
+
"""Simulate streaming by breaking up the full response into fixed-size character chunks."""
|
|
60
|
+
try:
|
|
61
|
+
# Get the full response first
|
|
62
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
63
|
+
|
|
64
|
+
# Track token usage
|
|
65
|
+
prompt_tokens = count_tokens(conversation_prompt)
|
|
66
|
+
completion_tokens = 0
|
|
67
|
+
|
|
68
|
+
# Stream fixed-size character chunks (e.g., 48 chars)
|
|
69
|
+
buffer = full_response
|
|
70
|
+
chunk_size = 48
|
|
71
|
+
while buffer:
|
|
72
|
+
chunk_text = buffer[:chunk_size]
|
|
73
|
+
buffer = buffer[chunk_size:]
|
|
74
|
+
completion_tokens += count_tokens(chunk_text)
|
|
75
|
+
|
|
76
|
+
if chunk_text.strip():
|
|
77
|
+
# Create the delta object
|
|
78
|
+
delta = ChoiceDelta(
|
|
79
|
+
content=chunk_text,
|
|
80
|
+
role="assistant",
|
|
81
|
+
tool_calls=None
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Create the choice object
|
|
85
|
+
choice = Choice(
|
|
86
|
+
index=0,
|
|
87
|
+
delta=delta,
|
|
88
|
+
finish_reason=None,
|
|
89
|
+
logprobs=None
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Create the chunk object
|
|
93
|
+
chunk = ChatCompletionChunk(
|
|
94
|
+
id=request_id,
|
|
95
|
+
choices=[choice],
|
|
96
|
+
created=created_time,
|
|
97
|
+
model=model,
|
|
98
|
+
system_fingerprint=None
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
yield chunk
|
|
102
|
+
|
|
103
|
+
# Final chunk with finish_reason="stop"
|
|
104
|
+
delta = ChoiceDelta(
|
|
105
|
+
content=None,
|
|
106
|
+
role=None,
|
|
107
|
+
tool_calls=None
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
choice = Choice(
|
|
111
|
+
index=0,
|
|
112
|
+
delta=delta,
|
|
113
|
+
finish_reason="stop",
|
|
114
|
+
logprobs=None
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
chunk = ChatCompletionChunk(
|
|
118
|
+
id=request_id,
|
|
119
|
+
choices=[choice],
|
|
120
|
+
created=created_time,
|
|
121
|
+
model=model,
|
|
122
|
+
system_fingerprint=None
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
yield chunk
|
|
126
|
+
|
|
127
|
+
except RequestsError as e:
|
|
128
|
+
print(f"Error during AI4Chat stream request: {e}")
|
|
129
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
130
|
+
except Exception as e:
|
|
131
|
+
print(f"Unexpected error during AI4Chat stream request: {e}")
|
|
132
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
133
|
+
|
|
134
|
+
def _create_non_stream(
|
|
135
|
+
self, request_id: str, created_time: int, model: str,
|
|
136
|
+
conversation_prompt: str, country: str, user_id: str
|
|
137
|
+
) -> ChatCompletion:
|
|
138
|
+
"""Get a complete response from AI4Chat."""
|
|
139
|
+
try:
|
|
140
|
+
# Get the full response
|
|
141
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
142
|
+
|
|
143
|
+
# Estimate token counts
|
|
144
|
+
prompt_tokens = count_tokens(conversation_prompt)
|
|
145
|
+
completion_tokens = count_tokens(full_response)
|
|
146
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
147
|
+
|
|
148
|
+
# Create the message object
|
|
149
|
+
message = ChatCompletionMessage(
|
|
150
|
+
role="assistant",
|
|
151
|
+
content=full_response
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Create the choice object
|
|
155
|
+
choice = Choice(
|
|
156
|
+
index=0,
|
|
157
|
+
message=message,
|
|
158
|
+
finish_reason="stop"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# Create the usage object
|
|
162
|
+
usage = CompletionUsage(
|
|
163
|
+
prompt_tokens=prompt_tokens,
|
|
164
|
+
completion_tokens=completion_tokens,
|
|
165
|
+
total_tokens=total_tokens
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Create the completion object
|
|
169
|
+
completion = ChatCompletion(
|
|
170
|
+
id=request_id,
|
|
171
|
+
choices=[choice],
|
|
172
|
+
created=created_time,
|
|
173
|
+
model=model,
|
|
174
|
+
usage=usage,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
return completion
|
|
178
|
+
|
|
179
|
+
except RequestsError as e:
|
|
180
|
+
print(f"Error during AI4Chat non-stream request: {e}")
|
|
181
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
182
|
+
except Exception as e:
|
|
183
|
+
print(f"Unexpected error during AI4Chat non-stream request: {e}")
|
|
184
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
185
|
+
|
|
186
|
+
def _get_ai4chat_response(self, prompt: str, country: str, user_id: str) -> str:
|
|
187
|
+
"""Make the actual API request to AI4Chat."""
|
|
188
|
+
# URL encode parameters
|
|
189
|
+
encoded_text = urllib.parse.quote(prompt)
|
|
190
|
+
encoded_country = urllib.parse.quote(country)
|
|
191
|
+
encoded_user_id = urllib.parse.quote(user_id)
|
|
192
|
+
|
|
193
|
+
# Construct the API URL
|
|
194
|
+
url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
|
|
195
|
+
|
|
196
|
+
# Make the request
|
|
197
|
+
try:
|
|
198
|
+
response = self._client.session.get(url, headers=self._client.headers, timeout=self._client.timeout)
|
|
199
|
+
response.raise_for_status()
|
|
200
|
+
except RequestsError as e:
|
|
201
|
+
raise IOError(f"Failed to generate response: {e}")
|
|
202
|
+
|
|
203
|
+
# Process the response text
|
|
204
|
+
response_text = response.text
|
|
205
|
+
|
|
206
|
+
# Remove surrounding quotes if present
|
|
207
|
+
if response_text.startswith('"'):
|
|
208
|
+
response_text = response_text[1:]
|
|
209
|
+
if response_text.endswith('"'):
|
|
210
|
+
response_text = response_text[:-1]
|
|
211
|
+
|
|
212
|
+
# Replace escaped newlines
|
|
213
|
+
response_text = response_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
214
|
+
|
|
215
|
+
return response_text
|
|
216
|
+
|
|
217
|
+
class Chat(BaseChat):
|
|
218
|
+
def __init__(self, client: 'AI4Chat'):
|
|
219
|
+
self.completions = Completions(client)
|
|
220
|
+
|
|
221
|
+
class AI4Chat(OpenAICompatibleProvider):
|
|
222
|
+
"""
|
|
223
|
+
OpenAI-compatible client for AI4Chat API.
|
|
224
|
+
|
|
225
|
+
Usage:
|
|
226
|
+
client = AI4Chat()
|
|
227
|
+
response = client.chat.completions.create(
|
|
228
|
+
model="default",
|
|
229
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
230
|
+
)
|
|
231
|
+
print(response.choices[0].message.content)
|
|
232
|
+
"""
|
|
233
|
+
|
|
234
|
+
AVAILABLE_MODELS = ["default"]
|
|
235
|
+
|
|
236
|
+
def __init__(
|
|
237
|
+
self,
|
|
238
|
+
timeout: int = 30,
|
|
239
|
+
proxies: dict = {},
|
|
240
|
+
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
241
|
+
country: str = "Asia",
|
|
242
|
+
user_id: str = "usersmjb2oaz7y"
|
|
243
|
+
):
|
|
244
|
+
"""
|
|
245
|
+
Initialize the AI4Chat client.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
timeout: Request timeout in seconds
|
|
249
|
+
proxies: Optional proxy configuration
|
|
250
|
+
system_prompt: System prompt to guide the AI's behavior
|
|
251
|
+
country: Country parameter for API
|
|
252
|
+
user_id: User ID for API
|
|
253
|
+
"""
|
|
254
|
+
self.timeout = timeout
|
|
255
|
+
self.proxies = proxies
|
|
256
|
+
self.system_prompt = system_prompt
|
|
257
|
+
self.country = country
|
|
258
|
+
self.user_id = user_id
|
|
259
|
+
|
|
260
|
+
# API endpoint
|
|
261
|
+
self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
|
|
262
|
+
|
|
263
|
+
# Initialize session
|
|
264
|
+
self.session = Session(timeout=timeout, proxies=proxies)
|
|
265
|
+
|
|
266
|
+
# Set headers
|
|
267
|
+
self.headers = {
|
|
268
|
+
"Accept": "*/*",
|
|
269
|
+
"Accept-Language": "id-ID,id;q=0.9",
|
|
270
|
+
"Origin": "https://www.ai4chat.co",
|
|
271
|
+
"Priority": "u=1, i",
|
|
272
|
+
"Referer": "https://www.ai4chat.co/",
|
|
273
|
+
"Sec-CH-UA": '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
|
|
274
|
+
"Sec-CH-UA-Mobile": "?1",
|
|
275
|
+
"Sec-CH-UA-Platform": '"Android"',
|
|
276
|
+
"Sec-Fetch-Dest": "empty",
|
|
277
|
+
"Sec-Fetch-Mode": "cors",
|
|
278
|
+
"Sec-Fetch-Site": "cross-site",
|
|
279
|
+
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36"
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
# Update session headers
|
|
283
|
+
self.session.headers.update(self.headers)
|
|
284
|
+
|
|
285
|
+
# Initialize chat interface
|
|
286
|
+
self.chat = Chat(self)
|
|
287
|
+
|
|
288
|
+
@property
|
|
289
|
+
def models(self):
|
|
290
|
+
class _ModelList:
|
|
291
|
+
def list(inner_self):
|
|
292
|
+
return type(self).AVAILABLE_MODELS
|
|
293
|
+
return _ModelList()
|