webscout 8.2.3__py3-none-any.whl → 8.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/lol.py +589 -0
- webscout/AIutel.py +226 -14
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/Extra/tempmail/base.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +4 -0
- webscout/Provider/AllenAI.py +163 -126
- webscout/Provider/ChatGPTClone.py +96 -84
- webscout/Provider/Deepinfra.py +95 -67
- webscout/Provider/ElectronHub.py +55 -0
- webscout/Provider/GPTWeb.py +96 -46
- webscout/Provider/Groq.py +194 -91
- webscout/Provider/HeckAI.py +89 -47
- webscout/Provider/HuggingFaceChat.py +113 -106
- webscout/Provider/Hunyuan.py +94 -83
- webscout/Provider/Jadve.py +107 -75
- webscout/Provider/LambdaChat.py +106 -64
- webscout/Provider/Llama3.py +94 -39
- webscout/Provider/MCPCore.py +318 -0
- webscout/Provider/Marcus.py +85 -36
- webscout/Provider/Netwrck.py +76 -43
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +286 -0
- webscout/Provider/OPENAI/chatgptclone.py +35 -14
- webscout/Provider/OPENAI/deepinfra.py +37 -0
- webscout/Provider/OPENAI/groq.py +354 -0
- webscout/Provider/OPENAI/heckai.py +6 -2
- webscout/Provider/OPENAI/mcpcore.py +376 -0
- webscout/Provider/OPENAI/multichat.py +368 -0
- webscout/Provider/OPENAI/netwrck.py +3 -1
- webscout/Provider/OpenGPT.py +48 -38
- webscout/Provider/PI.py +168 -92
- webscout/Provider/PizzaGPT.py +66 -36
- webscout/Provider/TeachAnything.py +85 -51
- webscout/Provider/TextPollinationsAI.py +109 -51
- webscout/Provider/TwoAI.py +109 -60
- webscout/Provider/Venice.py +93 -56
- webscout/Provider/VercelAI.py +2 -2
- webscout/Provider/WiseCat.py +65 -28
- webscout/Provider/Writecream.py +37 -11
- webscout/Provider/WritingMate.py +135 -63
- webscout/Provider/__init__.py +3 -21
- webscout/Provider/ai4chat.py +6 -7
- webscout/Provider/copilot.py +0 -3
- webscout/Provider/elmo.py +101 -58
- webscout/Provider/granite.py +91 -46
- webscout/Provider/hermes.py +87 -47
- webscout/Provider/koala.py +1 -1
- webscout/Provider/learnfastai.py +104 -50
- webscout/Provider/llama3mitril.py +86 -51
- webscout/Provider/llmchat.py +88 -46
- webscout/Provider/llmchatco.py +74 -49
- webscout/Provider/meta.py +41 -37
- webscout/Provider/multichat.py +54 -25
- webscout/Provider/scnet.py +93 -43
- webscout/Provider/searchchat.py +82 -75
- webscout/Provider/sonus.py +103 -51
- webscout/Provider/toolbaz.py +132 -77
- webscout/Provider/turboseek.py +92 -41
- webscout/Provider/tutorai.py +82 -64
- webscout/Provider/typefully.py +75 -33
- webscout/Provider/typegpt.py +96 -35
- webscout/Provider/uncovr.py +112 -62
- webscout/Provider/x0gpt.py +69 -26
- webscout/Provider/yep.py +79 -66
- webscout/conversation.py +35 -21
- webscout/exceptions.py +20 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/METADATA +22 -10
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/RECORD +78 -81
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/WHEEL +1 -1
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llamatutor.py +0 -192
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info/licenses}/LICENSE.md +0 -0
- {webscout-8.2.3.dist-info → webscout-8.2.4.dist-info}/top_level.txt +0 -0
webscout/Provider/Netwrck.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import time
|
|
2
2
|
import uuid
|
|
3
|
-
import requests
|
|
4
3
|
import json
|
|
5
4
|
from typing import Any, Dict, Optional, Generator, Union
|
|
6
5
|
from dataclasses import dataclass, asdict
|
|
@@ -9,6 +8,9 @@ from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
|
9
8
|
from webscout.AIbase import Provider
|
|
10
9
|
from webscout import exceptions
|
|
11
10
|
from webscout.litagent import LitAgent
|
|
11
|
+
# Replace requests with curl_cffi
|
|
12
|
+
from curl_cffi.requests import Session # Import Session
|
|
13
|
+
from curl_cffi import CurlError # Import CurlError
|
|
12
14
|
|
|
13
15
|
class Netwrck(Provider):
|
|
14
16
|
"""
|
|
@@ -21,9 +23,10 @@ class Netwrck(Provider):
|
|
|
21
23
|
"x-ai/grok-2",
|
|
22
24
|
"anthropic/claude-3-7-sonnet-20250219",
|
|
23
25
|
"sao10k/l3-euryale-70b",
|
|
24
|
-
"openai/gpt-
|
|
26
|
+
"openai/gpt-4.1-mini",
|
|
25
27
|
"gryphe/mythomax-l2-13b",
|
|
26
28
|
"google/gemini-pro-1.5",
|
|
29
|
+
"google/gemini-2.5-flash-preview-04-17",
|
|
27
30
|
"nvidia/llama-3.1-nemotron-70b-instruct",
|
|
28
31
|
"deepseek/deepseek-r1",
|
|
29
32
|
"deepseek/deepseek-chat"
|
|
@@ -34,7 +37,7 @@ class Netwrck(Provider):
|
|
|
34
37
|
self,
|
|
35
38
|
model: str = "anthropic/claude-3-7-sonnet-20250219",
|
|
36
39
|
is_conversation: bool = True,
|
|
37
|
-
max_tokens: int = 4096,
|
|
40
|
+
max_tokens: int = 4096, # Note: max_tokens is not used by this API
|
|
38
41
|
timeout: int = 30,
|
|
39
42
|
intro: Optional[str] = None,
|
|
40
43
|
filepath: Optional[str] = None,
|
|
@@ -43,17 +46,18 @@ class Netwrck(Provider):
|
|
|
43
46
|
history_offset: int = 0,
|
|
44
47
|
act: Optional[str] = None,
|
|
45
48
|
system_prompt: str = "You are a helpful assistant.",
|
|
46
|
-
temperature: float = 0.7,
|
|
47
|
-
top_p: float = 0.8
|
|
49
|
+
temperature: float = 0.7, # Note: temperature is not used by this API
|
|
50
|
+
top_p: float = 0.8 # Note: top_p is not used by this API
|
|
48
51
|
):
|
|
49
52
|
"""Initializes the Netwrck API client."""
|
|
50
53
|
if model not in self.AVAILABLE_MODELS:
|
|
51
54
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
52
55
|
|
|
53
56
|
self.model = model
|
|
54
|
-
self.model_name = model
|
|
57
|
+
self.model_name = model
|
|
55
58
|
self.system_prompt = system_prompt
|
|
56
|
-
|
|
59
|
+
# Initialize curl_cffi Session
|
|
60
|
+
self.session = Session()
|
|
57
61
|
self.is_conversation = is_conversation
|
|
58
62
|
self.max_tokens_to_sample = max_tokens
|
|
59
63
|
self.timeout = timeout
|
|
@@ -61,7 +65,7 @@ class Netwrck(Provider):
|
|
|
61
65
|
self.temperature = temperature
|
|
62
66
|
self.top_p = top_p
|
|
63
67
|
|
|
64
|
-
self.agent = LitAgent()
|
|
68
|
+
self.agent = LitAgent() # Keep for potential future use or other headers
|
|
65
69
|
self.headers = {
|
|
66
70
|
'authority': 'netwrck.com',
|
|
67
71
|
'accept': '*/*',
|
|
@@ -69,11 +73,14 @@ class Netwrck(Provider):
|
|
|
69
73
|
'content-type': 'application/json',
|
|
70
74
|
'origin': 'https://netwrck.com',
|
|
71
75
|
'referer': 'https://netwrck.com/',
|
|
72
|
-
'user-agent': self.agent.random()
|
|
76
|
+
'user-agent': self.agent.random()
|
|
77
|
+
# Add sec-ch-ua headers if needed for impersonation consistency
|
|
73
78
|
}
|
|
74
79
|
|
|
80
|
+
# Update curl_cffi session headers and proxies
|
|
75
81
|
self.session.headers.update(self.headers)
|
|
76
82
|
self.proxies = proxies or {}
|
|
83
|
+
self.session.proxies = self.proxies # Assign proxies directly
|
|
77
84
|
|
|
78
85
|
Conversation.intro = (
|
|
79
86
|
AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
|
|
@@ -92,7 +99,7 @@ class Netwrck(Provider):
|
|
|
92
99
|
self,
|
|
93
100
|
prompt: str,
|
|
94
101
|
stream: bool = False,
|
|
95
|
-
raw: bool = False,
|
|
102
|
+
raw: bool = False, # Keep raw param for interface consistency
|
|
96
103
|
optimizer: Optional[str] = None,
|
|
97
104
|
conversationally: bool = False,
|
|
98
105
|
) -> Union[Dict[str, Any], Generator]:
|
|
@@ -116,51 +123,72 @@ class Netwrck(Provider):
|
|
|
116
123
|
|
|
117
124
|
def for_stream():
|
|
118
125
|
try:
|
|
126
|
+
# Use curl_cffi session post with impersonate
|
|
119
127
|
response = self.session.post(
|
|
120
128
|
"https://netwrck.com/api/chatpred_or",
|
|
121
129
|
json=payload,
|
|
122
|
-
headers
|
|
123
|
-
proxies
|
|
130
|
+
# headers are set on the session
|
|
131
|
+
# proxies are set on the session
|
|
124
132
|
timeout=self.timeout,
|
|
125
133
|
stream=True,
|
|
134
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
126
135
|
)
|
|
127
|
-
response.raise_for_status()
|
|
136
|
+
response.raise_for_status() # Check for HTTP errors
|
|
128
137
|
|
|
129
138
|
streaming_text = ""
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
139
|
+
# Iterate over bytes and decode manually
|
|
140
|
+
for line_bytes in response.iter_lines():
|
|
141
|
+
if line_bytes:
|
|
142
|
+
try:
|
|
143
|
+
decoded_line = line_bytes.decode('utf-8').strip('"')
|
|
144
|
+
# Handle potential escape sequences if necessary
|
|
145
|
+
# decoded_line = decoded_line.encode().decode('unicode_escape') # Uncomment if needed
|
|
146
|
+
streaming_text += decoded_line
|
|
147
|
+
resp = {"text": decoded_line}
|
|
148
|
+
# Yield dict or raw string
|
|
149
|
+
yield resp if not raw else decoded_line
|
|
150
|
+
except UnicodeDecodeError:
|
|
151
|
+
# Handle potential decoding errors if chunks split mid-character
|
|
152
|
+
continue
|
|
135
153
|
|
|
154
|
+
# Update history after stream finishes
|
|
155
|
+
self.last_response = {"text": streaming_text} # Store aggregated text
|
|
136
156
|
self.conversation.update_chat_history(payload["query"], streaming_text)
|
|
137
157
|
|
|
138
|
-
except
|
|
139
|
-
raise exceptions.ProviderConnectionError(f"Network error: {str(e)}") from e
|
|
140
|
-
except Exception as e:
|
|
141
|
-
|
|
158
|
+
except CurlError as e: # Catch CurlError
|
|
159
|
+
raise exceptions.ProviderConnectionError(f"Network error (CurlError): {str(e)}") from e
|
|
160
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
161
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
162
|
+
raise exceptions.ProviderConnectionError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
142
163
|
|
|
143
164
|
def for_non_stream():
|
|
144
165
|
try:
|
|
166
|
+
# Use curl_cffi session post with impersonate
|
|
145
167
|
response = self.session.post(
|
|
146
168
|
"https://netwrck.com/api/chatpred_or",
|
|
147
169
|
json=payload,
|
|
148
|
-
headers
|
|
149
|
-
proxies
|
|
170
|
+
# headers are set on the session
|
|
171
|
+
# proxies are set on the session
|
|
150
172
|
timeout=self.timeout,
|
|
173
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
151
174
|
)
|
|
152
|
-
response.raise_for_status()
|
|
175
|
+
response.raise_for_status() # Check for HTTP errors
|
|
153
176
|
|
|
177
|
+
# Use response.text which is already decoded
|
|
154
178
|
text = response.text.strip('"')
|
|
179
|
+
# Handle potential escape sequences if necessary
|
|
180
|
+
# text = text.encode().decode('unicode_escape') # Uncomment if needed
|
|
155
181
|
self.last_response = {"text": text}
|
|
156
182
|
self.conversation.update_chat_history(prompt, text)
|
|
157
183
|
|
|
158
|
-
|
|
184
|
+
# Return dict or raw string
|
|
185
|
+
return text if raw else self.last_response
|
|
159
186
|
|
|
160
|
-
except
|
|
161
|
-
raise exceptions.FailedToGenerateResponseError(f"Network error: {str(e)}") from e
|
|
162
|
-
except Exception as e:
|
|
163
|
-
|
|
187
|
+
except CurlError as e: # Catch CurlError
|
|
188
|
+
raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
|
|
189
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
190
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
191
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
164
192
|
|
|
165
193
|
return for_stream() if stream else for_non_stream()
|
|
166
194
|
|
|
@@ -172,26 +200,30 @@ class Netwrck(Provider):
|
|
|
172
200
|
conversationally: bool = False,
|
|
173
201
|
) -> str:
|
|
174
202
|
"""Generates a response from the Netwrck API."""
|
|
175
|
-
def
|
|
176
|
-
|
|
203
|
+
def for_stream_chat():
|
|
204
|
+
# ask() yields dicts or strings when streaming
|
|
205
|
+
gen = self.ask(
|
|
177
206
|
prompt,
|
|
178
207
|
stream=True,
|
|
208
|
+
raw=False, # Ensure ask yields dicts for get_message
|
|
179
209
|
optimizer=optimizer,
|
|
180
210
|
conversationally=conversationally
|
|
181
|
-
)
|
|
182
|
-
|
|
211
|
+
)
|
|
212
|
+
for response_dict in gen:
|
|
213
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
183
214
|
|
|
184
|
-
def
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
215
|
+
def for_non_stream_chat():
|
|
216
|
+
# ask() returns dict or str when not streaming
|
|
217
|
+
response_data = self.ask(
|
|
218
|
+
prompt,
|
|
219
|
+
stream=False,
|
|
220
|
+
raw=False, # Ensure ask returns dict for get_message
|
|
221
|
+
optimizer=optimizer,
|
|
222
|
+
conversationally=conversationally,
|
|
192
223
|
)
|
|
224
|
+
return self.get_message(response_data) # get_message expects dict
|
|
193
225
|
|
|
194
|
-
return
|
|
226
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
195
227
|
|
|
196
228
|
def get_message(self, response: Dict[str, Any]) -> str:
|
|
197
229
|
"""Retrieves message only from response"""
|
|
@@ -199,6 +231,7 @@ class Netwrck(Provider):
|
|
|
199
231
|
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
200
232
|
|
|
201
233
|
if __name__ == "__main__":
|
|
234
|
+
# Ensure curl_cffi is installed
|
|
202
235
|
print("-" * 80)
|
|
203
236
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
204
237
|
print("-" * 80)
|
|
@@ -22,4 +22,7 @@ from .uncovrAI import *
|
|
|
22
22
|
from .opkfc import *
|
|
23
23
|
from .chatgpt import *
|
|
24
24
|
from .textpollinations import *
|
|
25
|
-
from .e2b import *
|
|
25
|
+
from .e2b import *
|
|
26
|
+
from .multichat import * # Add MultiChatAI
|
|
27
|
+
from .ai4chat import * # Add AI4Chat
|
|
28
|
+
from .mcpcore import *
|
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import urllib.parse
|
|
4
|
+
from curl_cffi.requests import Session, RequestsError
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# --- AI4Chat Client ---
|
|
15
|
+
|
|
16
|
+
class Completions(BaseCompletions):
|
|
17
|
+
def __init__(self, client: 'AI4Chat'):
|
|
18
|
+
self._client = client
|
|
19
|
+
|
|
20
|
+
def create(
|
|
21
|
+
self,
|
|
22
|
+
*,
|
|
23
|
+
model: str,
|
|
24
|
+
messages: List[Dict[str, str]],
|
|
25
|
+
max_tokens: Optional[int] = None,
|
|
26
|
+
stream: bool = False,
|
|
27
|
+
temperature: Optional[float] = None,
|
|
28
|
+
top_p: Optional[float] = None,
|
|
29
|
+
**kwargs: Any
|
|
30
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
31
|
+
"""
|
|
32
|
+
Creates a model response for the given chat conversation.
|
|
33
|
+
Mimics openai.chat.completions.create
|
|
34
|
+
"""
|
|
35
|
+
# Use the format_prompt utility to format the conversation
|
|
36
|
+
from .utils import format_prompt
|
|
37
|
+
|
|
38
|
+
# Format the messages into a single string
|
|
39
|
+
conversation_prompt = format_prompt(messages, add_special_tokens=True, include_system=True)
|
|
40
|
+
|
|
41
|
+
# Set up request parameters
|
|
42
|
+
country_param = kwargs.get("country", self._client.country)
|
|
43
|
+
user_id_param = kwargs.get("user_id", self._client.user_id)
|
|
44
|
+
|
|
45
|
+
# Generate request ID and timestamp
|
|
46
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
47
|
+
created_time = int(time.time())
|
|
48
|
+
|
|
49
|
+
# AI4Chat doesn't support streaming, so we'll simulate it if requested
|
|
50
|
+
if stream:
|
|
51
|
+
return self._create_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
52
|
+
else:
|
|
53
|
+
return self._create_non_stream(request_id, created_time, model, conversation_prompt, country_param, user_id_param)
|
|
54
|
+
|
|
55
|
+
def _create_stream(
|
|
56
|
+
self, request_id: str, created_time: int, model: str,
|
|
57
|
+
conversation_prompt: str, country: str, user_id: str
|
|
58
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
+
"""Simulate streaming by breaking up the full response."""
|
|
60
|
+
try:
|
|
61
|
+
# Get the full response first
|
|
62
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
63
|
+
|
|
64
|
+
# Break it into chunks for simulated streaming
|
|
65
|
+
words = full_response.split()
|
|
66
|
+
chunk_size = max(1, len(words) // 10) # Divide into ~10 chunks
|
|
67
|
+
|
|
68
|
+
# Track token usage
|
|
69
|
+
prompt_tokens = len(conversation_prompt.split())
|
|
70
|
+
completion_tokens = 0
|
|
71
|
+
|
|
72
|
+
# Stream chunks
|
|
73
|
+
for i in range(0, len(words), chunk_size):
|
|
74
|
+
chunk_text = " ".join(words[i:i+chunk_size])
|
|
75
|
+
completion_tokens += len(chunk_text.split())
|
|
76
|
+
|
|
77
|
+
# Create the delta object
|
|
78
|
+
delta = ChoiceDelta(
|
|
79
|
+
content=chunk_text,
|
|
80
|
+
role="assistant",
|
|
81
|
+
tool_calls=None
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Create the choice object
|
|
85
|
+
choice = Choice(
|
|
86
|
+
index=0,
|
|
87
|
+
delta=delta,
|
|
88
|
+
finish_reason=None,
|
|
89
|
+
logprobs=None
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Create the chunk object
|
|
93
|
+
chunk = ChatCompletionChunk(
|
|
94
|
+
id=request_id,
|
|
95
|
+
choices=[choice],
|
|
96
|
+
created=created_time,
|
|
97
|
+
model=model,
|
|
98
|
+
system_fingerprint=None
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
yield chunk
|
|
102
|
+
|
|
103
|
+
# Final chunk with finish_reason="stop"
|
|
104
|
+
delta = ChoiceDelta(
|
|
105
|
+
content=None,
|
|
106
|
+
role=None,
|
|
107
|
+
tool_calls=None
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
choice = Choice(
|
|
111
|
+
index=0,
|
|
112
|
+
delta=delta,
|
|
113
|
+
finish_reason="stop",
|
|
114
|
+
logprobs=None
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
chunk = ChatCompletionChunk(
|
|
118
|
+
id=request_id,
|
|
119
|
+
choices=[choice],
|
|
120
|
+
created=created_time,
|
|
121
|
+
model=model,
|
|
122
|
+
system_fingerprint=None
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
yield chunk
|
|
126
|
+
|
|
127
|
+
except RequestsError as e:
|
|
128
|
+
print(f"Error during AI4Chat stream request: {e}")
|
|
129
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
130
|
+
except Exception as e:
|
|
131
|
+
print(f"Unexpected error during AI4Chat stream request: {e}")
|
|
132
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
133
|
+
|
|
134
|
+
def _create_non_stream(
|
|
135
|
+
self, request_id: str, created_time: int, model: str,
|
|
136
|
+
conversation_prompt: str, country: str, user_id: str
|
|
137
|
+
) -> ChatCompletion:
|
|
138
|
+
"""Get a complete response from AI4Chat."""
|
|
139
|
+
try:
|
|
140
|
+
# Get the full response
|
|
141
|
+
full_response = self._get_ai4chat_response(conversation_prompt, country, user_id)
|
|
142
|
+
|
|
143
|
+
# Estimate token counts
|
|
144
|
+
prompt_tokens = len(conversation_prompt.split())
|
|
145
|
+
completion_tokens = len(full_response.split())
|
|
146
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
147
|
+
|
|
148
|
+
# Create the message object
|
|
149
|
+
message = ChatCompletionMessage(
|
|
150
|
+
role="assistant",
|
|
151
|
+
content=full_response
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Create the choice object
|
|
155
|
+
choice = Choice(
|
|
156
|
+
index=0,
|
|
157
|
+
message=message,
|
|
158
|
+
finish_reason="stop"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# Create the usage object
|
|
162
|
+
usage = CompletionUsage(
|
|
163
|
+
prompt_tokens=prompt_tokens,
|
|
164
|
+
completion_tokens=completion_tokens,
|
|
165
|
+
total_tokens=total_tokens
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Create the completion object
|
|
169
|
+
completion = ChatCompletion(
|
|
170
|
+
id=request_id,
|
|
171
|
+
choices=[choice],
|
|
172
|
+
created=created_time,
|
|
173
|
+
model=model,
|
|
174
|
+
usage=usage,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
return completion
|
|
178
|
+
|
|
179
|
+
except RequestsError as e:
|
|
180
|
+
print(f"Error during AI4Chat non-stream request: {e}")
|
|
181
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
182
|
+
except Exception as e:
|
|
183
|
+
print(f"Unexpected error during AI4Chat non-stream request: {e}")
|
|
184
|
+
raise IOError(f"AI4Chat request failed: {e}") from e
|
|
185
|
+
|
|
186
|
+
def _get_ai4chat_response(self, prompt: str, country: str, user_id: str) -> str:
|
|
187
|
+
"""Make the actual API request to AI4Chat."""
|
|
188
|
+
# URL encode parameters
|
|
189
|
+
encoded_text = urllib.parse.quote(prompt)
|
|
190
|
+
encoded_country = urllib.parse.quote(country)
|
|
191
|
+
encoded_user_id = urllib.parse.quote(user_id)
|
|
192
|
+
|
|
193
|
+
# Construct the API URL
|
|
194
|
+
url = f"{self._client.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
|
|
195
|
+
|
|
196
|
+
# Make the request
|
|
197
|
+
try:
|
|
198
|
+
response = self._client.session.get(url, headers=self._client.headers, timeout=self._client.timeout)
|
|
199
|
+
response.raise_for_status()
|
|
200
|
+
except RequestsError as e:
|
|
201
|
+
raise IOError(f"Failed to generate response: {e}")
|
|
202
|
+
|
|
203
|
+
# Process the response text
|
|
204
|
+
response_text = response.text
|
|
205
|
+
|
|
206
|
+
# Remove surrounding quotes if present
|
|
207
|
+
if response_text.startswith('"'):
|
|
208
|
+
response_text = response_text[1:]
|
|
209
|
+
if response_text.endswith('"'):
|
|
210
|
+
response_text = response_text[:-1]
|
|
211
|
+
|
|
212
|
+
# Replace escaped newlines
|
|
213
|
+
response_text = response_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
214
|
+
|
|
215
|
+
return response_text
|
|
216
|
+
|
|
217
|
+
class Chat(BaseChat):
|
|
218
|
+
def __init__(self, client: 'AI4Chat'):
|
|
219
|
+
self.completions = Completions(client)
|
|
220
|
+
|
|
221
|
+
class AI4Chat(OpenAICompatibleProvider):
|
|
222
|
+
"""
|
|
223
|
+
OpenAI-compatible client for AI4Chat API.
|
|
224
|
+
|
|
225
|
+
Usage:
|
|
226
|
+
client = AI4Chat()
|
|
227
|
+
response = client.chat.completions.create(
|
|
228
|
+
model="default",
|
|
229
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
230
|
+
)
|
|
231
|
+
print(response.choices[0].message.content)
|
|
232
|
+
"""
|
|
233
|
+
|
|
234
|
+
AVAILABLE_MODELS = ["default"]
|
|
235
|
+
|
|
236
|
+
def __init__(
|
|
237
|
+
self,
|
|
238
|
+
timeout: int = 30,
|
|
239
|
+
proxies: dict = {},
|
|
240
|
+
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
241
|
+
country: str = "Asia",
|
|
242
|
+
user_id: str = "usersmjb2oaz7y"
|
|
243
|
+
):
|
|
244
|
+
"""
|
|
245
|
+
Initialize the AI4Chat client.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
timeout: Request timeout in seconds
|
|
249
|
+
proxies: Optional proxy configuration
|
|
250
|
+
system_prompt: System prompt to guide the AI's behavior
|
|
251
|
+
country: Country parameter for API
|
|
252
|
+
user_id: User ID for API
|
|
253
|
+
"""
|
|
254
|
+
self.timeout = timeout
|
|
255
|
+
self.proxies = proxies
|
|
256
|
+
self.system_prompt = system_prompt
|
|
257
|
+
self.country = country
|
|
258
|
+
self.user_id = user_id
|
|
259
|
+
|
|
260
|
+
# API endpoint
|
|
261
|
+
self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
|
|
262
|
+
|
|
263
|
+
# Initialize session
|
|
264
|
+
self.session = Session(timeout=timeout, proxies=proxies)
|
|
265
|
+
|
|
266
|
+
# Set headers
|
|
267
|
+
self.headers = {
|
|
268
|
+
"Accept": "*/*",
|
|
269
|
+
"Accept-Language": "id-ID,id;q=0.9",
|
|
270
|
+
"Origin": "https://www.ai4chat.co",
|
|
271
|
+
"Priority": "u=1, i",
|
|
272
|
+
"Referer": "https://www.ai4chat.co/",
|
|
273
|
+
"Sec-CH-UA": '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
|
|
274
|
+
"Sec-CH-UA-Mobile": "?1",
|
|
275
|
+
"Sec-CH-UA-Platform": '"Android"',
|
|
276
|
+
"Sec-Fetch-Dest": "empty",
|
|
277
|
+
"Sec-Fetch-Mode": "cors",
|
|
278
|
+
"Sec-Fetch-Site": "cross-site",
|
|
279
|
+
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36"
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
# Update session headers
|
|
283
|
+
self.session.headers.update(self.headers)
|
|
284
|
+
|
|
285
|
+
# Initialize chat interface
|
|
286
|
+
self.chat = Chat(self)
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import time
|
|
2
2
|
import uuid
|
|
3
|
-
import cloudscraper
|
|
3
|
+
# import cloudscraper
|
|
4
|
+
from curl_cffi.requests import Session, RequestsError
|
|
4
5
|
import json
|
|
5
6
|
import re
|
|
6
7
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
@@ -123,12 +124,15 @@ class Completions(BaseCompletions):
|
|
|
123
124
|
for msg in payload.get("messages", []):
|
|
124
125
|
prompt_tokens += len(msg.get("content", "").split())
|
|
125
126
|
|
|
126
|
-
|
|
127
|
+
buffer = ""
|
|
128
|
+
for line in response.iter_content():
|
|
127
129
|
if line:
|
|
128
|
-
|
|
130
|
+
if isinstance(line, bytes):
|
|
131
|
+
line = line.decode("utf-8", errors="replace")
|
|
132
|
+
buffer += line
|
|
129
133
|
|
|
130
134
|
# ChatGPTClone uses a different format, so we need to extract the content
|
|
131
|
-
match = re.search(r'0:"(.*?)"',
|
|
135
|
+
match = re.search(r'0:"(.*?)"', buffer)
|
|
132
136
|
if match:
|
|
133
137
|
content = match.group(1)
|
|
134
138
|
|
|
@@ -179,6 +183,12 @@ class Completions(BaseCompletions):
|
|
|
179
183
|
# Return the chunk object for internal processing
|
|
180
184
|
yield chunk
|
|
181
185
|
|
|
186
|
+
# Clear buffer after processing
|
|
187
|
+
buffer = ""
|
|
188
|
+
# If buffer gets too long, reset it to avoid memory issues
|
|
189
|
+
elif len(buffer) > 1024:
|
|
190
|
+
buffer = ""
|
|
191
|
+
|
|
182
192
|
# Final chunk with finish_reason="stop"
|
|
183
193
|
delta = ChoiceDelta(
|
|
184
194
|
content=None,
|
|
@@ -254,12 +264,20 @@ class Completions(BaseCompletions):
|
|
|
254
264
|
|
|
255
265
|
# Collect the full response
|
|
256
266
|
full_text = ""
|
|
257
|
-
|
|
267
|
+
buffer = ""
|
|
268
|
+
for line in response.iter_content():
|
|
258
269
|
if line:
|
|
259
|
-
|
|
270
|
+
if isinstance(line, bytes):
|
|
271
|
+
line = line.decode("utf-8", errors="replace")
|
|
272
|
+
buffer += line
|
|
273
|
+
match = re.search(r'0:"(.*?)"', buffer)
|
|
260
274
|
if match:
|
|
261
275
|
content = match.group(1)
|
|
262
276
|
full_text += content
|
|
277
|
+
buffer = ""
|
|
278
|
+
# If buffer gets too long, reset it to avoid memory issues
|
|
279
|
+
elif len(buffer) > 1024:
|
|
280
|
+
buffer = ""
|
|
263
281
|
|
|
264
282
|
# Format the text (replace escaped newlines)
|
|
265
283
|
full_text = self._client.format_text(full_text)
|
|
@@ -329,23 +347,25 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
329
347
|
def __init__(
|
|
330
348
|
self,
|
|
331
349
|
timeout: Optional[int] = None,
|
|
332
|
-
browser: str = "chrome"
|
|
350
|
+
browser: str = "chrome",
|
|
351
|
+
impersonate: str = "chrome120"
|
|
333
352
|
):
|
|
334
353
|
"""
|
|
335
354
|
Initialize the ChatGPTClone client.
|
|
336
355
|
|
|
337
356
|
Args:
|
|
338
357
|
timeout: Request timeout in seconds (None for no timeout)
|
|
339
|
-
browser: Browser to emulate in user agent
|
|
358
|
+
browser: Browser to emulate in user agent (for LitAgent fallback)
|
|
359
|
+
impersonate: Browser impersonation for curl_cffi (default: chrome120)
|
|
340
360
|
"""
|
|
341
361
|
self.timeout = timeout
|
|
342
362
|
self.temperature = 0.6 # Default temperature
|
|
343
363
|
self.top_p = 0.7 # Default top_p
|
|
344
364
|
|
|
345
|
-
# Use
|
|
346
|
-
self.session =
|
|
365
|
+
# Use curl_cffi for Cloudflare bypass and browser impersonation
|
|
366
|
+
self.session = Session(impersonate=impersonate, timeout=timeout)
|
|
347
367
|
|
|
348
|
-
#
|
|
368
|
+
# Use LitAgent for fingerprint if available, else fallback
|
|
349
369
|
agent = LitAgent()
|
|
350
370
|
self.fingerprint = agent.generate_fingerprint(browser)
|
|
351
371
|
|
|
@@ -374,11 +394,12 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
374
394
|
# Initialize the chat interface
|
|
375
395
|
self.chat = Chat(self)
|
|
376
396
|
|
|
377
|
-
def refresh_identity(self, browser: str = None):
|
|
378
|
-
"""Refreshes the browser identity fingerprint."""
|
|
397
|
+
def refresh_identity(self, browser: str = None, impersonate: str = None):
|
|
398
|
+
"""Refreshes the browser identity fingerprint and curl_cffi session."""
|
|
379
399
|
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
400
|
+
impersonate = impersonate or "chrome120"
|
|
380
401
|
self.fingerprint = LitAgent().generate_fingerprint(browser)
|
|
381
|
-
|
|
402
|
+
self.session = Session(impersonate=impersonate, timeout=self.timeout)
|
|
382
403
|
# Update headers with new fingerprint
|
|
383
404
|
self.headers.update({
|
|
384
405
|
"Accept": self.fingerprint["accept"],
|