webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -22,7 +22,15 @@ from .uncovrAI import *
|
|
|
22
22
|
from .opkfc import *
|
|
23
23
|
from .chatgpt import *
|
|
24
24
|
from .textpollinations import *
|
|
25
|
+
from .typefully import * # Add TypefullyAI
|
|
25
26
|
from .e2b import *
|
|
26
27
|
from .multichat import * # Add MultiChatAI
|
|
27
28
|
from .ai4chat import * # Add AI4Chat
|
|
28
29
|
from .mcpcore import *
|
|
30
|
+
from .flowith import *
|
|
31
|
+
from .chatsandbox import *
|
|
32
|
+
from .c4ai import *
|
|
33
|
+
from .flowith import *
|
|
34
|
+
from .Cloudflare import *
|
|
35
|
+
from .NEMOTRON import *
|
|
36
|
+
from .BLACKBOXAI import *
|
|
@@ -1,286 +1,293 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import urllib.parse
|
|
4
|
-
from curl_cffi.requests import Session, RequestsError
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
# --- AI4Chat Client ---
|
|
15
|
-
|
|
16
|
-
class Completions(BaseCompletions):
|
|
17
|
-
def __init__(self, client: 'AI4Chat'):
|
|
18
|
-
self._client = client
|
|
19
|
-
|
|
20
|
-
def create(
|
|
21
|
-
self,
|
|
22
|
-
*,
|
|
23
|
-
model: str,
|
|
24
|
-
messages: List[Dict[str, str]],
|
|
25
|
-
max_tokens: Optional[int] = None,
|
|
26
|
-
stream: bool = False,
|
|
27
|
-
temperature: Optional[float] = None,
|
|
28
|
-
top_p: Optional[float] = None,
|
|
29
|
-
**kwargs: Any
|
|
30
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
31
|
-
"""
|
|
32
|
-
Creates a model response for the given chat conversation.
|
|
33
|
-
Mimics openai.chat.completions.create
|
|
34
|
-
"""
|
|
35
|
-
# Use the format_prompt utility to format the conversation
|
|
36
|
-
from .utils import format_prompt
|
|
37
|
-
|
|
38
|
-
# Format the messages into a single string
|
|
39
|
-
conversation_prompt = format_prompt(messages, add_special_tokens=True, include_system=True)
|
|
40
|
-
|
|
41
|
-
# Set up request parameters
|
|
42
|
-
country_param = kwargs.get("country", self._client.country)
|
|
43
|
-
user_id_param = kwargs.get("user_id", self._client.user_id)
|
|
44
|
-
|
|
45
|
-
# Generate request ID and timestamp
|
|
46
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
47
|
-
created_time = int(time.time())
|
|
48
|
-
|
|
49
|
-
# AI4Chat doesn't support streaming, so we'll simulate it if requested
|
|
50
|
-
if stream:
|
|
51
|
-
return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
52
|
-
else:
|
|
53
|
-
return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
54
|
-
|
|
55
|
-
def _create_stream(
|
|
56
|
-
self, request_id: str, created_time: int, model: str,
|
|
57
|
-
conversation_prompt: str, country: str, user_id: str
|
|
58
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
-
"""Simulate streaming by breaking up the full response."""
|
|
60
|
-
try:
|
|
61
|
-
# Get the full response first
|
|
62
|
-
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
63
|
-
|
|
64
|
-
# Break it into chunks for simulated streaming
|
|
65
|
-
words = full_response.split()
|
|
66
|
-
chunk_size = max(1, len(words) // 10) # Divide into ~10 chunks
|
|
67
|
-
|
|
68
|
-
# Track token usage
|
|
69
|
-
prompt_tokens = len(conversation_prompt.split())
|
|
70
|
-
completion_tokens = 0
|
|
71
|
-
|
|
72
|
-
# Stream chunks
|
|
73
|
-
for i in range(0, len(words), chunk_size):
|
|
74
|
-
chunk_text = " ".join(words[i:i+chunk_size])
|
|
75
|
-
completion_tokens += len(chunk_text.split())
|
|
76
|
-
|
|
77
|
-
# Create the delta object
|
|
78
|
-
delta = ChoiceDelta(
|
|
79
|
-
content=chunk_text,
|
|
80
|
-
role="assistant",
|
|
81
|
-
tool_calls=None
|
|
82
|
-
)
|
|
83
|
-
|
|
84
|
-
# Create the choice object
|
|
85
|
-
choice = Choice(
|
|
86
|
-
index=0,
|
|
87
|
-
delta=delta,
|
|
88
|
-
finish_reason=None,
|
|
89
|
-
logprobs=None
|
|
90
|
-
)
|
|
91
|
-
|
|
92
|
-
# Create the chunk object
|
|
93
|
-
chunk = ChatCompletionChunk(
|
|
94
|
-
id=request_id,
|
|
95
|
-
choices=[choice],
|
|
96
|
-
created=created_time,
|
|
97
|
-
model=model,
|
|
98
|
-
system_fingerprint=None
|
|
99
|
-
)
|
|
100
|
-
|
|
101
|
-
yield chunk
|
|
102
|
-
|
|
103
|
-
# Final chunk with finish_reason="stop"
|
|
104
|
-
delta = ChoiceDelta(
|
|
105
|
-
content=None,
|
|
106
|
-
role=None,
|
|
107
|
-
tool_calls=None
|
|
108
|
-
)
|
|
109
|
-
|
|
110
|
-
choice = Choice(
|
|
111
|
-
index=0,
|
|
112
|
-
delta=delta,
|
|
113
|
-
finish_reason="stop",
|
|
114
|
-
logprobs=None
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
chunk = ChatCompletionChunk(
|
|
118
|
-
id=request_id,
|
|
119
|
-
choices=[choice],
|
|
120
|
-
created=created_time,
|
|
121
|
-
model=model,
|
|
122
|
-
system_fingerprint=None
|
|
123
|
-
)
|
|
124
|
-
|
|
125
|
-
yield chunk
|
|
126
|
-
|
|
127
|
-
except RequestsError as e:
|
|
128
|
-
print(f"Error during AI4Chat stream request: {e}")
|
|
129
|
-
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
130
|
-
except Exception as e:
|
|
131
|
-
print(f"Unexpected error during AI4Chat stream request: {e}")
|
|
132
|
-
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
133
|
-
|
|
134
|
-
def _create_non_stream(
|
|
135
|
-
self, request_id: str, created_time: int, model: str,
|
|
136
|
-
conversation_prompt: str, country: str, user_id: str
|
|
137
|
-
) -> ChatCompletion:
|
|
138
|
-
"""Get a complete response from AI4Chat."""
|
|
139
|
-
try:
|
|
140
|
-
# Get the full response
|
|
141
|
-
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
142
|
-
|
|
143
|
-
# Estimate token counts
|
|
144
|
-
prompt_tokens = len(conversation_prompt.split())
|
|
145
|
-
completion_tokens = len(full_response.split())
|
|
146
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
147
|
-
|
|
148
|
-
# Create the message object
|
|
149
|
-
message = ChatCompletionMessage(
|
|
150
|
-
role="assistant",
|
|
151
|
-
content=full_response
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
# Create the choice object
|
|
155
|
-
choice = Choice(
|
|
156
|
-
index=0,
|
|
157
|
-
message=message,
|
|
158
|
-
finish_reason="stop"
|
|
159
|
-
)
|
|
160
|
-
|
|
161
|
-
# Create the usage object
|
|
162
|
-
usage = CompletionUsage(
|
|
163
|
-
prompt_tokens=prompt_tokens,
|
|
164
|
-
completion_tokens=completion_tokens,
|
|
165
|
-
total_tokens=total_tokens
|
|
166
|
-
)
|
|
167
|
-
|
|
168
|
-
# Create the completion object
|
|
169
|
-
completion = ChatCompletion(
|
|
170
|
-
id=request_id,
|
|
171
|
-
choices=[choice],
|
|
172
|
-
created=created_time,
|
|
173
|
-
model=model,
|
|
174
|
-
usage=usage,
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
return completion
|
|
178
|
-
|
|
179
|
-
except RequestsError as e:
|
|
180
|
-
print(f"Error during AI4Chat non-stream request: {e}")
|
|
181
|
-
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
182
|
-
except Exception as e:
|
|
183
|
-
print(f"Unexpected error during AI4Chat non-stream request: {e}")
|
|
184
|
-
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
185
|
-
|
|
186
|
-
def _get_ai4chat_response(self, prompt: str, country: str, user_id: str) -> str:
|
|
187
|
-
"""Make the actual API request to AI4Chat."""
|
|
188
|
-
# URL encode parameters
|
|
189
|
-
encoded_text = urllib.parse.quote(prompt)
|
|
190
|
-
encoded_country = urllib.parse.quote(country)
|
|
191
|
-
encoded_user_id = urllib.parse.quote(user_id)
|
|
192
|
-
|
|
193
|
-
# Construct the API URL
|
|
194
|
-
url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
|
|
195
|
-
|
|
196
|
-
# Make the request
|
|
197
|
-
try:
|
|
198
|
-
response = self._client.session.get(url, headers=self._client.headers, timeout=self._client.timeout)
|
|
199
|
-
response.raise_for_status()
|
|
200
|
-
except RequestsError as e:
|
|
201
|
-
raise IOError(f"Failed to generate response: {e}")
|
|
202
|
-
|
|
203
|
-
# Process the response text
|
|
204
|
-
response_text = response.text
|
|
205
|
-
|
|
206
|
-
# Remove surrounding quotes if present
|
|
207
|
-
if response_text.startswith('"'):
|
|
208
|
-
response_text = response_text[1:]
|
|
209
|
-
if response_text.endswith('"'):
|
|
210
|
-
response_text = response_text[:-1]
|
|
211
|
-
|
|
212
|
-
# Replace escaped newlines
|
|
213
|
-
response_text = response_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
214
|
-
|
|
215
|
-
return response_text
|
|
216
|
-
|
|
217
|
-
class Chat(BaseChat):
|
|
218
|
-
def __init__(self, client: 'AI4Chat'):
|
|
219
|
-
self.completions = Completions(client)
|
|
220
|
-
|
|
221
|
-
class AI4Chat(OpenAICompatibleProvider):
|
|
222
|
-
"""
|
|
223
|
-
OpenAI-compatible client for AI4Chat API.
|
|
224
|
-
|
|
225
|
-
Usage:
|
|
226
|
-
client = AI4Chat()
|
|
227
|
-
response = client.chat.completions.create(
|
|
228
|
-
model="default",
|
|
229
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
230
|
-
)
|
|
231
|
-
print(response.choices[0].message.content)
|
|
232
|
-
"""
|
|
233
|
-
|
|
234
|
-
AVAILABLE_MODELS = ["default"]
|
|
235
|
-
|
|
236
|
-
def __init__(
|
|
237
|
-
self,
|
|
238
|
-
timeout: int = 30,
|
|
239
|
-
proxies: dict = {},
|
|
240
|
-
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
241
|
-
country: str = "Asia",
|
|
242
|
-
user_id: str = "usersmjb2oaz7y"
|
|
243
|
-
):
|
|
244
|
-
"""
|
|
245
|
-
Initialize the AI4Chat client.
|
|
246
|
-
|
|
247
|
-
Args:
|
|
248
|
-
timeout: Request timeout in seconds
|
|
249
|
-
proxies: Optional proxy configuration
|
|
250
|
-
system_prompt: System prompt to guide the AI's behavior
|
|
251
|
-
country: Country parameter for API
|
|
252
|
-
user_id: User ID for API
|
|
253
|
-
"""
|
|
254
|
-
self.timeout = timeout
|
|
255
|
-
self.proxies = proxies
|
|
256
|
-
self.system_prompt = system_prompt
|
|
257
|
-
self.country = country
|
|
258
|
-
self.user_id = user_id
|
|
259
|
-
|
|
260
|
-
# API endpoint
|
|
261
|
-
self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
|
|
262
|
-
|
|
263
|
-
# Initialize session
|
|
264
|
-
self.session = Session(timeout=timeout, proxies=proxies)
|
|
265
|
-
|
|
266
|
-
# Set headers
|
|
267
|
-
self.headers = {
|
|
268
|
-
"Accept": "*/*",
|
|
269
|
-
"Accept-Language": "id-ID,id;q=0.9",
|
|
270
|
-
"Origin": "https://www.ai4chat.co",
|
|
271
|
-
"Priority": "u=1, i",
|
|
272
|
-
"Referer": "https://www.ai4chat.co/",
|
|
273
|
-
"Sec-CH-UA": '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
|
|
274
|
-
"Sec-CH-UA-Mobile": "?1",
|
|
275
|
-
"Sec-CH-UA-Platform": '"Android"',
|
|
276
|
-
"Sec-Fetch-Dest": "empty",
|
|
277
|
-
"Sec-Fetch-Mode": "cors",
|
|
278
|
-
"Sec-Fetch-Site": "cross-site",
|
|
279
|
-
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36"
|
|
280
|
-
}
|
|
281
|
-
|
|
282
|
-
# Update session headers
|
|
283
|
-
self.session.headers.update(self.headers)
|
|
284
|
-
|
|
285
|
-
# Initialize chat interface
|
|
286
|
-
self.chat = Chat(self)
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import urllib.parse
|
|
4
|
+
from curl_cffi.requests import Session, RequestsError
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# --- AI4Chat Client ---
|
|
15
|
+
|
|
16
|
+
class Completions(BaseCompletions):
|
|
17
|
+
def __init__(self, client: 'AI4Chat'):
|
|
18
|
+
self._client = client
|
|
19
|
+
|
|
20
|
+
def create(
|
|
21
|
+
self,
|
|
22
|
+
*,
|
|
23
|
+
model: str,
|
|
24
|
+
messages: List[Dict[str, str]],
|
|
25
|
+
max_tokens: Optional[int] = None,
|
|
26
|
+
stream: bool = False,
|
|
27
|
+
temperature: Optional[float] = None,
|
|
28
|
+
top_p: Optional[float] = None,
|
|
29
|
+
**kwargs: Any
|
|
30
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
31
|
+
"""
|
|
32
|
+
Creates a model response for the given chat conversation.
|
|
33
|
+
Mimics openai.chat.completions.create
|
|
34
|
+
"""
|
|
35
|
+
# Use the format_prompt utility to format the conversation
|
|
36
|
+
from .utils import format_prompt
|
|
37
|
+
|
|
38
|
+
# Format the messages into a single string
|
|
39
|
+
conversation_prompt = format_prompt(messages, add_special_tokens=True, include_system=True)
|
|
40
|
+
|
|
41
|
+
# Set up request parameters
|
|
42
|
+
country_param = kwargs.get("country", self._client.country)
|
|
43
|
+
user_id_param = kwargs.get("user_id", self._client.user_id)
|
|
44
|
+
|
|
45
|
+
# Generate request ID and timestamp
|
|
46
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
47
|
+
created_time = int(time.time())
|
|
48
|
+
|
|
49
|
+
# AI4Chat doesn't support streaming, so we'll simulate it if requested
|
|
50
|
+
if stream:
|
|
51
|
+
return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
52
|
+
else:
|
|
53
|
+
return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
54
|
+
|
|
55
|
+
def _create_stream(
|
|
56
|
+
self, request_id: str, created_time: int, model: str,
|
|
57
|
+
conversation_prompt: str, country: str, user_id: str
|
|
58
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
+
"""Simulate streaming by breaking up the full response."""
|
|
60
|
+
try:
|
|
61
|
+
# Get the full response first
|
|
62
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
63
|
+
|
|
64
|
+
# Break it into chunks for simulated streaming
|
|
65
|
+
words = full_response.split()
|
|
66
|
+
chunk_size = max(1, len(words) // 10) # Divide into ~10 chunks
|
|
67
|
+
|
|
68
|
+
# Track token usage
|
|
69
|
+
prompt_tokens = len(conversation_prompt.split())
|
|
70
|
+
completion_tokens = 0
|
|
71
|
+
|
|
72
|
+
# Stream chunks
|
|
73
|
+
for i in range(0, len(words), chunk_size):
|
|
74
|
+
chunk_text = " ".join(words[i:i+chunk_size])
|
|
75
|
+
completion_tokens += len(chunk_text.split())
|
|
76
|
+
|
|
77
|
+
# Create the delta object
|
|
78
|
+
delta = ChoiceDelta(
|
|
79
|
+
content=chunk_text,
|
|
80
|
+
role="assistant",
|
|
81
|
+
tool_calls=None
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Create the choice object
|
|
85
|
+
choice = Choice(
|
|
86
|
+
index=0,
|
|
87
|
+
delta=delta,
|
|
88
|
+
finish_reason=None,
|
|
89
|
+
logprobs=None
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Create the chunk object
|
|
93
|
+
chunk = ChatCompletionChunk(
|
|
94
|
+
id=request_id,
|
|
95
|
+
choices=[choice],
|
|
96
|
+
created=created_time,
|
|
97
|
+
model=model,
|
|
98
|
+
system_fingerprint=None
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
yield chunk
|
|
102
|
+
|
|
103
|
+
# Final chunk with finish_reason="stop"
|
|
104
|
+
delta = ChoiceDelta(
|
|
105
|
+
content=None,
|
|
106
|
+
role=None,
|
|
107
|
+
tool_calls=None
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
choice = Choice(
|
|
111
|
+
index=0,
|
|
112
|
+
delta=delta,
|
|
113
|
+
finish_reason="stop",
|
|
114
|
+
logprobs=None
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
chunk = ChatCompletionChunk(
|
|
118
|
+
id=request_id,
|
|
119
|
+
choices=[choice],
|
|
120
|
+
created=created_time,
|
|
121
|
+
model=model,
|
|
122
|
+
system_fingerprint=None
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
yield chunk
|
|
126
|
+
|
|
127
|
+
except RequestsError as e:
|
|
128
|
+
print(f"Error during AI4Chat stream request: {e}")
|
|
129
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
130
|
+
except Exception as e:
|
|
131
|
+
print(f"Unexpected error during AI4Chat stream request: {e}")
|
|
132
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
133
|
+
|
|
134
|
+
def _create_non_stream(
|
|
135
|
+
self, request_id: str, created_time: int, model: str,
|
|
136
|
+
conversation_prompt: str, country: str, user_id: str
|
|
137
|
+
) -> ChatCompletion:
|
|
138
|
+
"""Get a complete response from AI4Chat."""
|
|
139
|
+
try:
|
|
140
|
+
# Get the full response
|
|
141
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
142
|
+
|
|
143
|
+
# Estimate token counts
|
|
144
|
+
prompt_tokens = len(conversation_prompt.split())
|
|
145
|
+
completion_tokens = len(full_response.split())
|
|
146
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
147
|
+
|
|
148
|
+
# Create the message object
|
|
149
|
+
message = ChatCompletionMessage(
|
|
150
|
+
role="assistant",
|
|
151
|
+
content=full_response
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Create the choice object
|
|
155
|
+
choice = Choice(
|
|
156
|
+
index=0,
|
|
157
|
+
message=message,
|
|
158
|
+
finish_reason="stop"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# Create the usage object
|
|
162
|
+
usage = CompletionUsage(
|
|
163
|
+
prompt_tokens=prompt_tokens,
|
|
164
|
+
completion_tokens=completion_tokens,
|
|
165
|
+
total_tokens=total_tokens
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Create the completion object
|
|
169
|
+
completion = ChatCompletion(
|
|
170
|
+
id=request_id,
|
|
171
|
+
choices=[choice],
|
|
172
|
+
created=created_time,
|
|
173
|
+
model=model,
|
|
174
|
+
usage=usage,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
return completion
|
|
178
|
+
|
|
179
|
+
except RequestsError as e:
|
|
180
|
+
print(f"Error during AI4Chat non-stream request: {e}")
|
|
181
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
182
|
+
except Exception as e:
|
|
183
|
+
print(f"Unexpected error during AI4Chat non-stream request: {e}")
|
|
184
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
185
|
+
|
|
186
|
+
def _get_ai4chat_response(self, prompt: str, country: str, user_id: str) -> str:
|
|
187
|
+
"""Make the actual API request to AI4Chat."""
|
|
188
|
+
# URL encode parameters
|
|
189
|
+
encoded_text = urllib.parse.quote(prompt)
|
|
190
|
+
encoded_country = urllib.parse.quote(country)
|
|
191
|
+
encoded_user_id = urllib.parse.quote(user_id)
|
|
192
|
+
|
|
193
|
+
# Construct the API URL
|
|
194
|
+
url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
|
|
195
|
+
|
|
196
|
+
# Make the request
|
|
197
|
+
try:
|
|
198
|
+
response = self._client.session.get(url, headers=self._client.headers, timeout=self._client.timeout)
|
|
199
|
+
response.raise_for_status()
|
|
200
|
+
except RequestsError as e:
|
|
201
|
+
raise IOError(f"Failed to generate response: {e}")
|
|
202
|
+
|
|
203
|
+
# Process the response text
|
|
204
|
+
response_text = response.text
|
|
205
|
+
|
|
206
|
+
# Remove surrounding quotes if present
|
|
207
|
+
if response_text.startswith('"'):
|
|
208
|
+
response_text = response_text[1:]
|
|
209
|
+
if response_text.endswith('"'):
|
|
210
|
+
response_text = response_text[:-1]
|
|
211
|
+
|
|
212
|
+
# Replace escaped newlines
|
|
213
|
+
response_text = response_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
214
|
+
|
|
215
|
+
return response_text
|
|
216
|
+
|
|
217
|
+
class Chat(BaseChat):
|
|
218
|
+
def __init__(self, client: 'AI4Chat'):
|
|
219
|
+
self.completions = Completions(client)
|
|
220
|
+
|
|
221
|
+
class AI4Chat(OpenAICompatibleProvider):
|
|
222
|
+
"""
|
|
223
|
+
OpenAI-compatible client for AI4Chat API.
|
|
224
|
+
|
|
225
|
+
Usage:
|
|
226
|
+
client = AI4Chat()
|
|
227
|
+
response = client.chat.completions.create(
|
|
228
|
+
model="default",
|
|
229
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
230
|
+
)
|
|
231
|
+
print(response.choices[0].message.content)
|
|
232
|
+
"""
|
|
233
|
+
|
|
234
|
+
AVAILABLE_MODELS = ["default"]
|
|
235
|
+
|
|
236
|
+
def __init__(
|
|
237
|
+
self,
|
|
238
|
+
timeout: int = 30,
|
|
239
|
+
proxies: dict = {},
|
|
240
|
+
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
241
|
+
country: str = "Asia",
|
|
242
|
+
user_id: str = "usersmjb2oaz7y"
|
|
243
|
+
):
|
|
244
|
+
"""
|
|
245
|
+
Initialize the AI4Chat client.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
timeout: Request timeout in seconds
|
|
249
|
+
proxies: Optional proxy configuration
|
|
250
|
+
system_prompt: System prompt to guide the AI's behavior
|
|
251
|
+
country: Country parameter for API
|
|
252
|
+
user_id: User ID for API
|
|
253
|
+
"""
|
|
254
|
+
self.timeout = timeout
|
|
255
|
+
self.proxies = proxies
|
|
256
|
+
self.system_prompt = system_prompt
|
|
257
|
+
self.country = country
|
|
258
|
+
self.user_id = user_id
|
|
259
|
+
|
|
260
|
+
# API endpoint
|
|
261
|
+
self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
|
|
262
|
+
|
|
263
|
+
# Initialize session
|
|
264
|
+
self.session = Session(timeout=timeout, proxies=proxies)
|
|
265
|
+
|
|
266
|
+
# Set headers
|
|
267
|
+
self.headers = {
|
|
268
|
+
"Accept": "*/*",
|
|
269
|
+
"Accept-Language": "id-ID,id;q=0.9",
|
|
270
|
+
"Origin": "https://www.ai4chat.co",
|
|
271
|
+
"Priority": "u=1, i",
|
|
272
|
+
"Referer": "https://www.ai4chat.co/",
|
|
273
|
+
"Sec-CH-UA": '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
|
|
274
|
+
"Sec-CH-UA-Mobile": "?1",
|
|
275
|
+
"Sec-CH-UA-Platform": '"Android"',
|
|
276
|
+
"Sec-Fetch-Dest": "empty",
|
|
277
|
+
"Sec-Fetch-Mode": "cors",
|
|
278
|
+
"Sec-Fetch-Site": "cross-site",
|
|
279
|
+
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36"
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
# Update session headers
|
|
283
|
+
self.session.headers.update(self.headers)
|
|
284
|
+
|
|
285
|
+
# Initialize chat interface
|
|
286
|
+
self.chat = Chat(self)
|
|
287
|
+
|
|
288
|
+
@property
|
|
289
|
+
def models(self):
|
|
290
|
+
class _ModelList:
|
|
291
|
+
def list(inner_self):
|
|
292
|
+
return type(self).AVAILABLE_MODELS
|
|
293
|
+
return _ModelList()
|