webscout 8.2.3__py3-none-any.whl → 8.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/gguf.py +2 -0
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AISEARCH/scira_search.py +2 -5
- webscout/Provider/Aitopia.py +75 -51
- webscout/Provider/AllenAI.py +181 -147
- webscout/Provider/ChatGPTClone.py +97 -86
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +79 -32
- webscout/Provider/Deepinfra.py +135 -94
- webscout/Provider/ElectronHub.py +103 -39
- webscout/Provider/ExaChat.py +36 -20
- webscout/Provider/GPTWeb.py +103 -47
- webscout/Provider/GithubChat.py +52 -49
- webscout/Provider/GizAI.py +283 -0
- webscout/Provider/Glider.py +39 -28
- webscout/Provider/Groq.py +222 -91
- webscout/Provider/HeckAI.py +93 -69
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +104 -79
- webscout/Provider/LambdaChat.py +142 -123
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +95 -37
- webscout/Provider/Netwrck.py +94 -52
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/exachat.py +4 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OPENAI/scirachat.py +2 -4
- webscout/Provider/OPENAI/textpollinations.py +20 -22
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +178 -93
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/StandardInput.py +42 -30
- webscout/Provider/TeachAnything.py +95 -52
- webscout/Provider/TextPollinationsAI.py +138 -78
- webscout/Provider/TwoAI.py +162 -81
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/Venice.py +97 -58
- webscout/Provider/VercelAI.py +33 -14
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +9 -27
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/asksteve.py +53 -44
- webscout/Provider/cerebras.py +77 -31
- webscout/Provider/chatglm.py +47 -37
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +109 -60
- webscout/Provider/granite.py +102 -54
- webscout/Provider/hermes.py +95 -48
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +113 -54
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +110 -115
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +67 -28
- webscout/Provider/scira_chat.py +49 -30
- webscout/Provider/scnet.py +106 -53
- webscout/Provider/searchchat.py +87 -88
- webscout/Provider/sonus.py +113 -63
- webscout/Provider/toolbaz.py +115 -82
- webscout/Provider/turboseek.py +90 -43
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +85 -35
- webscout/Provider/typegpt.py +118 -61
- webscout/Provider/uncovr.py +132 -76
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/cli.py +256 -0
- webscout/conversation.py +34 -22
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/METADATA +183 -50
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/RECORD +97 -113
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/WHEEL +1 -1
- webscout-8.2.5.dist-info/entry_points.txt +3 -0
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info}/top_level.txt +0 -1
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- webscout-8.2.3.dist-info/entry_points.txt +0 -5
- {webscout-8.2.3.dist-info → webscout-8.2.5.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/Netwrck.py
CHANGED
|
@@ -1,14 +1,11 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
1
|
from typing import Any, Dict, Optional, Generator, Union
|
|
6
|
-
from
|
|
7
|
-
from datetime import date
|
|
8
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
2
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
3
|
from webscout.AIbase import Provider
|
|
10
4
|
from webscout import exceptions
|
|
11
5
|
from webscout.litagent import LitAgent
|
|
6
|
+
# Replace requests with curl_cffi
|
|
7
|
+
from curl_cffi.requests import Session # Import Session
|
|
8
|
+
from curl_cffi import CurlError # Import CurlError
|
|
12
9
|
|
|
13
10
|
class Netwrck(Provider):
|
|
14
11
|
"""
|
|
@@ -21,9 +18,10 @@ class Netwrck(Provider):
|
|
|
21
18
|
"x-ai/grok-2",
|
|
22
19
|
"anthropic/claude-3-7-sonnet-20250219",
|
|
23
20
|
"sao10k/l3-euryale-70b",
|
|
24
|
-
"openai/gpt-
|
|
21
|
+
"openai/gpt-4.1-mini",
|
|
25
22
|
"gryphe/mythomax-l2-13b",
|
|
26
23
|
"google/gemini-pro-1.5",
|
|
24
|
+
"google/gemini-2.5-flash-preview-04-17",
|
|
27
25
|
"nvidia/llama-3.1-nemotron-70b-instruct",
|
|
28
26
|
"deepseek/deepseek-r1",
|
|
29
27
|
"deepseek/deepseek-chat"
|
|
@@ -34,7 +32,7 @@ class Netwrck(Provider):
|
|
|
34
32
|
self,
|
|
35
33
|
model: str = "anthropic/claude-3-7-sonnet-20250219",
|
|
36
34
|
is_conversation: bool = True,
|
|
37
|
-
max_tokens: int = 4096,
|
|
35
|
+
max_tokens: int = 4096, # Note: max_tokens is not used by this API
|
|
38
36
|
timeout: int = 30,
|
|
39
37
|
intro: Optional[str] = None,
|
|
40
38
|
filepath: Optional[str] = None,
|
|
@@ -43,17 +41,18 @@ class Netwrck(Provider):
|
|
|
43
41
|
history_offset: int = 0,
|
|
44
42
|
act: Optional[str] = None,
|
|
45
43
|
system_prompt: str = "You are a helpful assistant.",
|
|
46
|
-
temperature: float = 0.7,
|
|
47
|
-
top_p: float = 0.8
|
|
44
|
+
temperature: float = 0.7, # Note: temperature is not used by this API
|
|
45
|
+
top_p: float = 0.8 # Note: top_p is not used by this API
|
|
48
46
|
):
|
|
49
47
|
"""Initializes the Netwrck API client."""
|
|
50
48
|
if model not in self.AVAILABLE_MODELS:
|
|
51
49
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
52
50
|
|
|
53
51
|
self.model = model
|
|
54
|
-
self.model_name = model
|
|
52
|
+
self.model_name = model
|
|
55
53
|
self.system_prompt = system_prompt
|
|
56
|
-
|
|
54
|
+
# Initialize curl_cffi Session
|
|
55
|
+
self.session = Session()
|
|
57
56
|
self.is_conversation = is_conversation
|
|
58
57
|
self.max_tokens_to_sample = max_tokens
|
|
59
58
|
self.timeout = timeout
|
|
@@ -61,7 +60,7 @@ class Netwrck(Provider):
|
|
|
61
60
|
self.temperature = temperature
|
|
62
61
|
self.top_p = top_p
|
|
63
62
|
|
|
64
|
-
self.agent = LitAgent()
|
|
63
|
+
self.agent = LitAgent() # Keep for potential future use or other headers
|
|
65
64
|
self.headers = {
|
|
66
65
|
'authority': 'netwrck.com',
|
|
67
66
|
'accept': '*/*',
|
|
@@ -69,11 +68,14 @@ class Netwrck(Provider):
|
|
|
69
68
|
'content-type': 'application/json',
|
|
70
69
|
'origin': 'https://netwrck.com',
|
|
71
70
|
'referer': 'https://netwrck.com/',
|
|
72
|
-
'user-agent': self.agent.random()
|
|
71
|
+
'user-agent': self.agent.random()
|
|
72
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
73
73
|
}
|
|
74
74
|
|
|
75
|
+
# Update curl_cffi session headers and proxies
|
|
75
76
|
self.session.headers.update(self.headers)
|
|
76
77
|
self.proxies = proxies or {}
|
|
78
|
+
self.session.proxies = self.proxies # Assign proxies directly
|
|
77
79
|
|
|
78
80
|
Conversation.intro = (
|
|
79
81
|
AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
|
|
@@ -88,11 +90,20 @@ class Netwrck(Provider):
|
|
|
88
90
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
89
91
|
)
|
|
90
92
|
|
|
93
|
+
@staticmethod
|
|
94
|
+
def _netwrck_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
95
|
+
"""Removes surrounding quotes and handles potential escapes."""
|
|
96
|
+
if isinstance(chunk, str):
|
|
97
|
+
text = chunk.strip('"')
|
|
98
|
+
# Handle potential unicode escapes if they appear
|
|
99
|
+
# text = text.encode().decode('unicode_escape') # Uncomment if needed
|
|
100
|
+
return text
|
|
101
|
+
return None
|
|
91
102
|
def ask(
|
|
92
103
|
self,
|
|
93
104
|
prompt: str,
|
|
94
105
|
stream: bool = False,
|
|
95
|
-
raw: bool = False,
|
|
106
|
+
raw: bool = False, # Keep raw param for interface consistency
|
|
96
107
|
optimizer: Optional[str] = None,
|
|
97
108
|
conversationally: bool = False,
|
|
98
109
|
) -> Union[Dict[str, Any], Generator]:
|
|
@@ -116,51 +127,77 @@ class Netwrck(Provider):
|
|
|
116
127
|
|
|
117
128
|
def for_stream():
|
|
118
129
|
try:
|
|
130
|
+
# Use curl_cffi session post with impersonate
|
|
119
131
|
response = self.session.post(
|
|
120
132
|
"https://netwrck.com/api/chatpred_or",
|
|
121
133
|
json=payload,
|
|
122
|
-
headers
|
|
123
|
-
proxies
|
|
134
|
+
# headers are set on the session
|
|
135
|
+
# proxies are set on the session
|
|
124
136
|
timeout=self.timeout,
|
|
125
137
|
stream=True,
|
|
138
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
126
139
|
)
|
|
127
|
-
response.raise_for_status()
|
|
140
|
+
response.raise_for_status() # Check for HTTP errors
|
|
128
141
|
|
|
129
142
|
streaming_text = ""
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
143
|
+
# Use sanitize_stream
|
|
144
|
+
processed_stream = sanitize_stream(
|
|
145
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
146
|
+
intro_value=None, # No prefix
|
|
147
|
+
to_json=False, # It's text
|
|
148
|
+
content_extractor=self._netwrck_extractor, # Use the quote stripper
|
|
149
|
+
yield_raw_on_error=True
|
|
150
|
+
)
|
|
151
|
+
for content_chunk in processed_stream:
|
|
152
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
153
|
+
streaming_text += content_chunk
|
|
154
|
+
yield {"text": content_chunk} if not raw else content_chunk
|
|
155
|
+
# Update history after stream finishes
|
|
156
|
+
self.last_response = {"text": streaming_text} # Store aggregated text
|
|
136
157
|
self.conversation.update_chat_history(payload["query"], streaming_text)
|
|
137
158
|
|
|
138
|
-
except
|
|
139
|
-
raise exceptions.ProviderConnectionError(f"Network error: {str(e)}") from e
|
|
140
|
-
except Exception as e:
|
|
141
|
-
|
|
159
|
+
except CurlError as e: # Catch CurlError
|
|
160
|
+
raise exceptions.ProviderConnectionError(f"Network error (CurlError): {str(e)}") from e
|
|
161
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
162
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
163
|
+
raise exceptions.ProviderConnectionError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
142
164
|
|
|
143
165
|
def for_non_stream():
|
|
144
166
|
try:
|
|
167
|
+
# Use curl_cffi session post with impersonate
|
|
145
168
|
response = self.session.post(
|
|
146
169
|
"https://netwrck.com/api/chatpred_or",
|
|
147
170
|
json=payload,
|
|
148
|
-
headers
|
|
149
|
-
proxies
|
|
171
|
+
# headers are set on the session
|
|
172
|
+
# proxies are set on the session
|
|
150
173
|
timeout=self.timeout,
|
|
174
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
151
175
|
)
|
|
152
|
-
response.raise_for_status()
|
|
176
|
+
response.raise_for_status() # Check for HTTP errors
|
|
153
177
|
|
|
154
|
-
|
|
155
|
-
|
|
178
|
+
response_text_raw = response.text # Get raw text
|
|
179
|
+
|
|
180
|
+
# Process the text using sanitize_stream
|
|
181
|
+
processed_stream = sanitize_stream(
|
|
182
|
+
data=response_text_raw,
|
|
183
|
+
intro_value=None,
|
|
184
|
+
to_json=False,
|
|
185
|
+
content_extractor=self._netwrck_extractor
|
|
186
|
+
)
|
|
187
|
+
# Aggregate the single result
|
|
188
|
+
text = "".join(list(processed_stream))
|
|
189
|
+
|
|
190
|
+
self.last_response = {"text": text} # Store processed text
|
|
156
191
|
self.conversation.update_chat_history(prompt, text)
|
|
157
192
|
|
|
158
|
-
|
|
193
|
+
# Return dict or raw string
|
|
194
|
+
return text if raw else self.last_response
|
|
159
195
|
|
|
160
|
-
except
|
|
161
|
-
raise exceptions.FailedToGenerateResponseError(f"Network error: {str(e)}") from e
|
|
162
|
-
except Exception as e:
|
|
163
|
-
|
|
196
|
+
except CurlError as e: # Catch CurlError
|
|
197
|
+
raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
|
|
198
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
199
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
200
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
164
201
|
|
|
165
202
|
return for_stream() if stream else for_non_stream()
|
|
166
203
|
|
|
@@ -172,26 +209,30 @@ class Netwrck(Provider):
|
|
|
172
209
|
conversationally: bool = False,
|
|
173
210
|
) -> str:
|
|
174
211
|
"""Generates a response from the Netwrck API."""
|
|
175
|
-
def
|
|
176
|
-
|
|
212
|
+
def for_stream_chat():
|
|
213
|
+
# ask() yields dicts or strings when streaming
|
|
214
|
+
gen = self.ask(
|
|
177
215
|
prompt,
|
|
178
216
|
stream=True,
|
|
217
|
+
raw=False, # Ensure ask yields dicts for get_message
|
|
179
218
|
optimizer=optimizer,
|
|
180
219
|
conversationally=conversationally
|
|
181
|
-
)
|
|
182
|
-
|
|
220
|
+
)
|
|
221
|
+
for response_dict in gen:
|
|
222
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
183
223
|
|
|
184
|
-
def
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
224
|
+
def for_non_stream_chat():
|
|
225
|
+
# ask() returns dict or str when not streaming
|
|
226
|
+
response_data = self.ask(
|
|
227
|
+
prompt,
|
|
228
|
+
stream=False,
|
|
229
|
+
raw=False, # Ensure ask returns dict for get_message
|
|
230
|
+
optimizer=optimizer,
|
|
231
|
+
conversationally=conversationally,
|
|
192
232
|
)
|
|
233
|
+
return self.get_message(response_data) # get_message expects dict
|
|
193
234
|
|
|
194
|
-
return
|
|
235
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
195
236
|
|
|
196
237
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
197
238
|
"""Retrieves message only from response"""
|
|
@@ -199,6 +240,7 @@ class Netwrck(Provider):
|
|
|
199
240
|
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
200
241
|
|
|
201
242
|
if __name__ == "__main__":
|
|
243
|
+
# Ensure curl_cffi is installed
|
|
202
244
|
print("-" * 80)
|
|
203
245
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
204
246
|
print("-" * 80)
|
|
@@ -22,4 +22,7 @@ from .uncovrAI import *
|
|
|
22
22
|
from .opkfc import *
|
|
23
23
|
from .chatgpt import *
|
|
24
24
|
from .textpollinations import *
|
|
25
|
-
from .e2b import *
|
|
25
|
+
from .e2b import *
|
|
26
|
+
from .multichat import * # Add MultiChatAI
|
|
27
|
+
from .ai4chat import * # Add AI4Chat
|
|
28
|
+
from .mcpcore import *
|
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import urllib.parse
|
|
4
|
+
from curl_cffi.requests import Session, RequestsError
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# --- AI4Chat Client ---
|
|
15
|
+
|
|
16
|
+
class Completions(BaseCompletions):
|
|
17
|
+
def __init__(self, client: 'AI4Chat'):
|
|
18
|
+
self._client = client
|
|
19
|
+
|
|
20
|
+
def create(
|
|
21
|
+
self,
|
|
22
|
+
*,
|
|
23
|
+
model: str,
|
|
24
|
+
messages: List[Dict[str, str]],
|
|
25
|
+
max_tokens: Optional[int] = None,
|
|
26
|
+
stream: bool = False,
|
|
27
|
+
temperature: Optional[float] = None,
|
|
28
|
+
top_p: Optional[float] = None,
|
|
29
|
+
**kwargs: Any
|
|
30
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
31
|
+
"""
|
|
32
|
+
Creates a model response for the given chat conversation.
|
|
33
|
+
Mimics openai.chat.completions.create
|
|
34
|
+
"""
|
|
35
|
+
# Use the format_prompt utility to format the conversation
|
|
36
|
+
from .utils import format_prompt
|
|
37
|
+
|
|
38
|
+
# Format the messages into a single string
|
|
39
|
+
conversation_prompt = format_prompt(messages, add_special_tokens=True, include_system=True)
|
|
40
|
+
|
|
41
|
+
# Set up request parameters
|
|
42
|
+
country_param = kwargs.get("country", self._client.country)
|
|
43
|
+
user_id_param = kwargs.get("user_id", self._client.user_id)
|
|
44
|
+
|
|
45
|
+
# Generate request ID and timestamp
|
|
46
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
47
|
+
created_time = int(time.time())
|
|
48
|
+
|
|
49
|
+
# AI4Chat doesn't support streaming, so we'll simulate it if requested
|
|
50
|
+
if stream:
|
|
51
|
+
return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
52
|
+
else:
|
|
53
|
+
return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
54
|
+
|
|
55
|
+
def _create_stream(
|
|
56
|
+
self, request_id: str, created_time: int, model: str,
|
|
57
|
+
conversation_prompt: str, country: str, user_id: str
|
|
58
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
+
"""Simulate streaming by breaking up the full response."""
|
|
60
|
+
try:
|
|
61
|
+
# Get the full response first
|
|
62
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
63
|
+
|
|
64
|
+
# Break it into chunks for simulated streaming
|
|
65
|
+
words = full_response.split()
|
|
66
|
+
chunk_size = max(1, len(words) // 10) # Divide into ~10 chunks
|
|
67
|
+
|
|
68
|
+
# Track token usage
|
|
69
|
+
prompt_tokens = len(conversation_prompt.split())
|
|
70
|
+
completion_tokens = 0
|
|
71
|
+
|
|
72
|
+
# Stream chunks
|
|
73
|
+
for i in range(0, len(words), chunk_size):
|
|
74
|
+
chunk_text = " ".join(words[i:i+chunk_size])
|
|
75
|
+
completion_tokens += len(chunk_text.split())
|
|
76
|
+
|
|
77
|
+
# Create the delta object
|
|
78
|
+
delta = ChoiceDelta(
|
|
79
|
+
content=chunk_text,
|
|
80
|
+
role="assistant",
|
|
81
|
+
tool_calls=None
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Create the choice object
|
|
85
|
+
choice = Choice(
|
|
86
|
+
index=0,
|
|
87
|
+
delta=delta,
|
|
88
|
+
finish_reason=None,
|
|
89
|
+
logprobs=None
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Create the chunk object
|
|
93
|
+
chunk = ChatCompletionChunk(
|
|
94
|
+
id=request_id,
|
|
95
|
+
choices=[choice],
|
|
96
|
+
created=created_time,
|
|
97
|
+
model=model,
|
|
98
|
+
system_fingerprint=None
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
yield chunk
|
|
102
|
+
|
|
103
|
+
# Final chunk with finish_reason="stop"
|
|
104
|
+
delta = ChoiceDelta(
|
|
105
|
+
content=None,
|
|
106
|
+
role=None,
|
|
107
|
+
tool_calls=None
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
choice = Choice(
|
|
111
|
+
index=0,
|
|
112
|
+
delta=delta,
|
|
113
|
+
finish_reason="stop",
|
|
114
|
+
logprobs=None
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
chunk = ChatCompletionChunk(
|
|
118
|
+
id=request_id,
|
|
119
|
+
choices=[choice],
|
|
120
|
+
created=created_time,
|
|
121
|
+
model=model,
|
|
122
|
+
system_fingerprint=None
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
yield chunk
|
|
126
|
+
|
|
127
|
+
except RequestsError as e:
|
|
128
|
+
print(f"Error during AI4Chat stream request: {e}")
|
|
129
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
130
|
+
except Exception as e:
|
|
131
|
+
print(f"Unexpected error during AI4Chat stream request: {e}")
|
|
132
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
133
|
+
|
|
134
|
+
def _create_non_stream(
|
|
135
|
+
self, request_id: str, created_time: int, model: str,
|
|
136
|
+
conversation_prompt: str, country: str, user_id: str
|
|
137
|
+
) -> ChatCompletion:
|
|
138
|
+
"""Get a complete response from AI4Chat."""
|
|
139
|
+
try:
|
|
140
|
+
# Get the full response
|
|
141
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
142
|
+
|
|
143
|
+
# Estimate token counts
|
|
144
|
+
prompt_tokens = len(conversation_prompt.split())
|
|
145
|
+
completion_tokens = len(full_response.split())
|
|
146
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
147
|
+
|
|
148
|
+
# Create the message object
|
|
149
|
+
message = ChatCompletionMessage(
|
|
150
|
+
role="assistant",
|
|
151
|
+
content=full_response
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Create the choice object
|
|
155
|
+
choice = Choice(
|
|
156
|
+
index=0,
|
|
157
|
+
message=message,
|
|
158
|
+
finish_reason="stop"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# Create the usage object
|
|
162
|
+
usage = CompletionUsage(
|
|
163
|
+
prompt_tokens=prompt_tokens,
|
|
164
|
+
completion_tokens=completion_tokens,
|
|
165
|
+
total_tokens=total_tokens
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Create the completion object
|
|
169
|
+
completion = ChatCompletion(
|
|
170
|
+
id=request_id,
|
|
171
|
+
choices=[choice],
|
|
172
|
+
created=created_time,
|
|
173
|
+
model=model,
|
|
174
|
+
usage=usage,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
return completion
|
|
178
|
+
|
|
179
|
+
except RequestsError as e:
|
|
180
|
+
print(f"Error during AI4Chat non-stream request: {e}")
|
|
181
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
182
|
+
except Exception as e:
|
|
183
|
+
print(f"Unexpected error during AI4Chat non-stream request: {e}")
|
|
184
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
185
|
+
|
|
186
|
+
def _get_ai4chat_response(self, prompt: str, country: str, user_id: str) -> str:
|
|
187
|
+
"""Make the actual API request to AI4Chat."""
|
|
188
|
+
# URL encode parameters
|
|
189
|
+
encoded_text = urllib.parse.quote(prompt)
|
|
190
|
+
encoded_country = urllib.parse.quote(country)
|
|
191
|
+
encoded_user_id = urllib.parse.quote(user_id)
|
|
192
|
+
|
|
193
|
+
# Construct the API URL
|
|
194
|
+
url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
|
|
195
|
+
|
|
196
|
+
# Make the request
|
|
197
|
+
try:
|
|
198
|
+
response = self._client.session.get(url, headers=self._client.headers, timeout=self._client.timeout)
|
|
199
|
+
response.raise_for_status()
|
|
200
|
+
except RequestsError as e:
|
|
201
|
+
raise IOError(f"Failed to generate response: {e}")
|
|
202
|
+
|
|
203
|
+
# Process the response text
|
|
204
|
+
response_text = response.text
|
|
205
|
+
|
|
206
|
+
# Remove surrounding quotes if present
|
|
207
|
+
if response_text.startswith('"'):
|
|
208
|
+
response_text = response_text[1:]
|
|
209
|
+
if response_text.endswith('"'):
|
|
210
|
+
response_text = response_text[:-1]
|
|
211
|
+
|
|
212
|
+
# Replace escaped newlines
|
|
213
|
+
response_text = response_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
214
|
+
|
|
215
|
+
return response_text
|
|
216
|
+
|
|
217
|
+
class Chat(BaseChat):
|
|
218
|
+
def __init__(self, client: 'AI4Chat'):
|
|
219
|
+
self.completions = Completions(client)
|
|
220
|
+
|
|
221
|
+
class AI4Chat(OpenAICompatibleProvider):
|
|
222
|
+
"""
|
|
223
|
+
OpenAI-compatible client for AI4Chat API.
|
|
224
|
+
|
|
225
|
+
Usage:
|
|
226
|
+
client = AI4Chat()
|
|
227
|
+
response = client.chat.completions.create(
|
|
228
|
+
model="default",
|
|
229
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
230
|
+
)
|
|
231
|
+
print(response.choices[0].message.content)
|
|
232
|
+
"""
|
|
233
|
+
|
|
234
|
+
AVAILABLE_MODELS = ["default"]
|
|
235
|
+
|
|
236
|
+
def __init__(
|
|
237
|
+
self,
|
|
238
|
+
timeout: int = 30,
|
|
239
|
+
proxies: dict = {},
|
|
240
|
+
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
241
|
+
country: str = "Asia",
|
|
242
|
+
user_id: str = "usersmjb2oaz7y"
|
|
243
|
+
):
|
|
244
|
+
"""
|
|
245
|
+
Initialize the AI4Chat client.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
timeout: Request timeout in seconds
|
|
249
|
+
proxies: Optional proxy configuration
|
|
250
|
+
system_prompt: System prompt to guide the AI's behavior
|
|
251
|
+
country: Country parameter for API
|
|
252
|
+
user_id: User ID for API
|
|
253
|
+
"""
|
|
254
|
+
self.timeout = timeout
|
|
255
|
+
self.proxies = proxies
|
|
256
|
+
self.system_prompt = system_prompt
|
|
257
|
+
self.country = country
|
|
258
|
+
self.user_id = user_id
|
|
259
|
+
|
|
260
|
+
# API endpoint
|
|
261
|
+
self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
|
|
262
|
+
|
|
263
|
+
# Initialize session
|
|
264
|
+
self.session = Session(timeout=timeout, proxies=proxies)
|
|
265
|
+
|
|
266
|
+
# Set headers
|
|
267
|
+
self.headers = {
|
|
268
|
+
"Accept": "*/*",
|
|
269
|
+
"Accept-Language": "id-ID,id;q=0.9",
|
|
270
|
+
"Origin": "https://www.ai4chat.co",
|
|
271
|
+
"Priority": "u=1, i",
|
|
272
|
+
"Referer": "https://www.ai4chat.co/",
|
|
273
|
+
"Sec-CH-UA": '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
|
|
274
|
+
"Sec-CH-UA-Mobile": "?1",
|
|
275
|
+
"Sec-CH-UA-Platform": '"Android"',
|
|
276
|
+
"Sec-Fetch-Dest": "empty",
|
|
277
|
+
"Sec-Fetch-Mode": "cors",
|
|
278
|
+
"Sec-Fetch-Site": "cross-site",
|
|
279
|
+
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36"
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
# Update session headers
|
|
283
|
+
self.session.headers.update(self.headers)
|
|
284
|
+
|
|
285
|
+
# Initialize chat interface
|
|
286
|
+
self.chat = Chat(self)
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import time
|
|
2
2
|
import uuid
|
|
3
|
-
import cloudscraper
|
|
3
|
+
# import cloudscraper
|
|
4
|
+
from curl_cffi.requests import Session, RequestsError
|
|
4
5
|
import json
|
|
5
6
|
import re
|
|
6
7
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
@@ -123,12 +124,15 @@ class Completions(BaseCompletions):
|
|
|
123
124
|
for msg in payload.get("messages", []):
|
|
124
125
|
prompt_tokens += len(msg.get("content", "").split())
|
|
125
126
|
|
|
126
|
-
|
|
127
|
+
buffer = ""
|
|
128
|
+
for line in response.iter_content():
|
|
127
129
|
if line:
|
|
128
|
-
|
|
130
|
+
if isinstance(line, bytes):
|
|
131
|
+
line = line.decode("utf-8", errors="replace")
|
|
132
|
+
buffer += line
|
|
129
133
|
|
|
130
134
|
# ChatGPTClone uses a different format, so we need to extract the content
|
|
131
|
-
match = re.search(r'0:"(.*?)"',
|
|
135
|
+
match = re.search(r'0:"(.*?)"', buffer)
|
|
132
136
|
if match:
|
|
133
137
|
content = match.group(1)
|
|
134
138
|
|
|
@@ -179,6 +183,12 @@ class Completions(BaseCompletions):
|
|
|
179
183
|
# Return the chunk object for internal processing
|
|
180
184
|
yield chunk
|
|
181
185
|
|
|
186
|
+
# Clear buffer after processing
|
|
187
|
+
buffer = ""
|
|
188
|
+
# If buffer gets too long, reset it to avoid memory issues
|
|
189
|
+
elif len(buffer) > 1024:
|
|
190
|
+
buffer = ""
|
|
191
|
+
|
|
182
192
|
# Final chunk with finish_reason="stop"
|
|
183
193
|
delta = ChoiceDelta(
|
|
184
194
|
content=None,
|
|
@@ -254,12 +264,20 @@ class Completions(BaseCompletions):
|
|
|
254
264
|
|
|
255
265
|
# Collect the full response
|
|
256
266
|
full_text = ""
|
|
257
|
-
|
|
267
|
+
buffer = ""
|
|
268
|
+
for line in response.iter_content():
|
|
258
269
|
if line:
|
|
259
|
-
|
|
270
|
+
if isinstance(line, bytes):
|
|
271
|
+
line = line.decode("utf-8", errors="replace")
|
|
272
|
+
buffer += line
|
|
273
|
+
match = re.search(r'0:"(.*?)"', buffer)
|
|
260
274
|
if match:
|
|
261
275
|
content = match.group(1)
|
|
262
276
|
full_text += content
|
|
277
|
+
buffer = ""
|
|
278
|
+
# If buffer gets too long, reset it to avoid memory issues
|
|
279
|
+
elif len(buffer) > 1024:
|
|
280
|
+
buffer = ""
|
|
263
281
|
|
|
264
282
|
# Format the text (replace escaped newlines)
|
|
265
283
|
full_text = self._client.format_text(full_text)
|
|
@@ -329,23 +347,25 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
329
347
|
def __init__(
|
|
330
348
|
self,
|
|
331
349
|
timeout: Optional[int] = None,
|
|
332
|
-
browser: str = "chrome"
|
|
350
|
+
browser: str = "chrome",
|
|
351
|
+
impersonate: str = "chrome120"
|
|
333
352
|
):
|
|
334
353
|
"""
|
|
335
354
|
Initialize the ChatGPTClone client.
|
|
336
355
|
|
|
337
356
|
Args:
|
|
338
357
|
timeout: Request timeout in seconds (None for no timeout)
|
|
339
|
-
browser: Browser to emulate in user agent
|
|
358
|
+
browser: Browser to emulate in user agent (for LitAgent fallback)
|
|
359
|
+
impersonate: Browser impersonation for curl_cffi (default: chrome120)
|
|
340
360
|
"""
|
|
341
361
|
self.timeout = timeout
|
|
342
362
|
self.temperature = 0.6 # Default temperature
|
|
343
363
|
self.top_p = 0.7 # Default top_p
|
|
344
364
|
|
|
345
|
-
# Use
|
|
346
|
-
self.session =
|
|
365
|
+
# Use curl_cffi for Cloudflare bypass and browser impersonation
|
|
366
|
+
self.session = Session(impersonate=impersonate, timeout=timeout)
|
|
347
367
|
|
|
348
|
-
#
|
|
368
|
+
# Use LitAgent for fingerprint if available, else fallback
|
|
349
369
|
agent = LitAgent()
|
|
350
370
|
self.fingerprint = agent.generate_fingerprint(browser)
|
|
351
371
|
|
|
@@ -374,11 +394,12 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
374
394
|
# Initialize the chat interface
|
|
375
395
|
self.chat = Chat(self)
|
|
376
396
|
|
|
377
|
-
def refresh_identity(self, browser: str = None):
|
|
378
|
-
"""Refreshes the browser identity fingerprint."""
|
|
397
|
+
def refresh_identity(self, browser: str = None, impersonate: str = None):
|
|
398
|
+
"""Refreshes the browser identity fingerprint and curl_cffi session."""
|
|
379
399
|
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
400
|
+
impersonate = impersonate or "chrome120"
|
|
380
401
|
self.fingerprint = LitAgent().generate_fingerprint(browser)
|
|
381
|
-
|
|
402
|
+
self.session = Session(impersonate=impersonate, timeout=self.timeout)
|
|
382
403
|
# Update headers with new fingerprint
|
|
383
404
|
self.headers.update({
|
|
384
405
|
"Accept": self.fingerprint["accept"],
|