webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +180 -78
- webscout/Bing_search.py +417 -0
- webscout/Extra/gguf.py +706 -177
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/genspark_search.py +7 -7
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/ExaChat.py +84 -58
- webscout/Provider/GeminiProxy.py +140 -0
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/MCPCore.py +78 -75
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +34 -51
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
- webscout/Provider/OPENAI/GeminiProxy.py +328 -0
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +32 -29
- webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +17 -1
- webscout/Provider/OPENAI/autoproxy.py +1067 -39
- webscout/Provider/OPENAI/base.py +17 -76
- webscout/Provider/OPENAI/deepinfra.py +42 -108
- webscout/Provider/OPENAI/e2b.py +0 -1
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/mcpcore.py +109 -70
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/scirachat.py +59 -51
- webscout/Provider/OPENAI/toolbaz.py +3 -9
- webscout/Provider/OPENAI/typegpt.py +1 -1
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OPENAI/xenai.py +514 -0
- webscout/Provider/OPENAI/yep.py +8 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/bing.py +231 -0
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TTS/speechma.py +45 -39
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +78 -70
- webscout/Provider/TogetherAI.py +350 -0
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/XenAI.py +324 -0
- webscout/Provider/__init__.py +10 -5
- webscout/Provider/deepseek_assistant.py +378 -0
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +159 -96
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +41 -46
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +33 -17
- webscout/Provider/yep.py +79 -96
- webscout/auth/__init__.py +55 -0
- webscout/auth/api_key_manager.py +189 -0
- webscout/auth/auth_system.py +100 -0
- webscout/auth/config.py +76 -0
- webscout/auth/database.py +400 -0
- webscout/auth/exceptions.py +67 -0
- webscout/auth/middleware.py +248 -0
- webscout/auth/models.py +130 -0
- webscout/auth/providers.py +279 -0
- webscout/auth/rate_limiter.py +254 -0
- webscout/auth/request_models.py +127 -0
- webscout/auth/request_processing.py +226 -0
- webscout/auth/routes.py +550 -0
- webscout/auth/schemas.py +103 -0
- webscout/auth/server.py +367 -0
- webscout/client.py +121 -70
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/scout/core/scout.py +104 -26
- webscout/scout/element.py +139 -18
- webscout/swiftcli/core/cli.py +14 -3
- webscout/swiftcli/decorators/output.py +59 -9
- webscout/update_checker.py +31 -49
- webscout/version.py +1 -1
- webscout/webscout_search.py +4 -12
- webscout/webscout_search_async.py +3 -10
- webscout/yep_search.py +2 -11
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/OPENAI/api.py +0 -1320
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
|
@@ -1,166 +1,179 @@
|
|
|
1
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
2
|
-
import time
|
|
3
|
-
import json
|
|
4
|
-
|
|
5
|
-
# Import base classes and utility structures
|
|
6
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
7
|
-
from .utils import (
|
|
8
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
9
|
-
ChatCompletionMessage, CompletionUsage
|
|
10
|
-
)
|
|
11
|
-
|
|
12
|
-
# Import requests for HTTP requests (instead of curl_cffi)
|
|
13
|
-
import requests
|
|
14
|
-
import zstandard as zstd
|
|
15
|
-
import uuid
|
|
16
|
-
|
|
17
|
-
# Attempt to import LitAgent, fallback if not available
|
|
18
|
-
try:
|
|
19
|
-
from webscout.litagent import LitAgent
|
|
20
|
-
except ImportError:
|
|
21
|
-
class LitAgent:
|
|
22
|
-
def generate_fingerprint(self, browser):
|
|
23
|
-
return {"user_agent": "Mozilla/5.0"}
|
|
24
|
-
|
|
25
|
-
# --- Flowith OpenAI-Compatible Client ---
|
|
26
|
-
|
|
27
|
-
class Completions(BaseCompletions):
|
|
28
|
-
def __init__(self, client: 'Flowith'):
|
|
29
|
-
self.client = client
|
|
30
|
-
|
|
31
|
-
def create(
|
|
32
|
-
self,
|
|
33
|
-
*,
|
|
34
|
-
model: str,
|
|
35
|
-
messages: List[Dict[str, str]],
|
|
36
|
-
max_tokens: Optional[int] = 2048,
|
|
37
|
-
stream: bool = False,
|
|
38
|
-
temperature: Optional[float] = None,
|
|
39
|
-
top_p: Optional[float] = None,
|
|
40
|
-
timeout: Optional[int] = None,
|
|
41
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
42
|
-
**kwargs: Any
|
|
43
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
44
|
-
"""
|
|
45
|
-
Implements OpenAI-compatible chat/completions endpoint for Flowith.
|
|
46
|
-
"""
|
|
47
|
-
url = "https://edge.flowith.net/ai/chat?mode=general"
|
|
48
|
-
agent = LitAgent()
|
|
49
|
-
fingerprint = agent.generate_fingerprint("chrome")
|
|
50
|
-
headers = {
|
|
51
|
-
"accept": "*/*",
|
|
52
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
53
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
54
|
-
"content-type": "application/json",
|
|
55
|
-
"origin": "https://flowith.io",
|
|
56
|
-
"referer": "https://edge.flowith.net/",
|
|
57
|
-
"user-agent": fingerprint["user_agent"],
|
|
58
|
-
"dnt": "1",
|
|
59
|
-
"sec-gpc": "1"
|
|
60
|
-
}
|
|
61
|
-
session = requests.Session()
|
|
62
|
-
session.headers.update(headers)
|
|
63
|
-
node_id = str(uuid.uuid4())
|
|
64
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
65
|
-
created_time = int(time.time())
|
|
66
|
-
payload = {
|
|
67
|
-
"model": model,
|
|
68
|
-
"messages": messages,
|
|
69
|
-
"stream": stream,
|
|
70
|
-
"nodeId": node_id
|
|
71
|
-
}
|
|
72
|
-
payload.update(kwargs)
|
|
73
|
-
|
|
74
|
-
def for_stream():
|
|
75
|
-
try:
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
)
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
text =
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
raise RuntimeError(f"Flowith request failed: {e}")
|
|
144
|
-
|
|
145
|
-
return for_stream() if stream else for_non_stream()
|
|
146
|
-
|
|
147
|
-
class Chat(BaseChat):
|
|
148
|
-
def __init__(self, client: 'Flowith'):
|
|
149
|
-
self.completions = Completions(client)
|
|
150
|
-
|
|
151
|
-
class Flowith(OpenAICompatibleProvider):
|
|
152
|
-
AVAILABLE_MODELS = [
|
|
153
|
-
"gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku",
|
|
154
|
-
"gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"
|
|
155
|
-
]
|
|
156
|
-
|
|
157
|
-
chat: Chat
|
|
158
|
-
def __init__(self):
|
|
159
|
-
self.chat = Chat(self)
|
|
160
|
-
|
|
161
|
-
@property
|
|
162
|
-
def models(self):
|
|
163
|
-
class _ModelList:
|
|
164
|
-
def list(inner_self):
|
|
165
|
-
return type(self).AVAILABLE_MODELS
|
|
166
|
-
return _ModelList()
|
|
1
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
2
|
+
import time
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
# Import base classes and utility structures
|
|
6
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
7
|
+
from webscout.Provider.OPENAI.utils import (
|
|
8
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
9
|
+
ChatCompletionMessage, CompletionUsage
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
# Import requests for HTTP requests (instead of curl_cffi)
|
|
13
|
+
import requests
|
|
14
|
+
import zstandard as zstd
|
|
15
|
+
import uuid
|
|
16
|
+
|
|
17
|
+
# Attempt to import LitAgent, fallback if not available
|
|
18
|
+
try:
|
|
19
|
+
from webscout.litagent import LitAgent
|
|
20
|
+
except ImportError:
|
|
21
|
+
class LitAgent:
|
|
22
|
+
def generate_fingerprint(self, browser):
|
|
23
|
+
return {"user_agent": "Mozilla/5.0"}
|
|
24
|
+
|
|
25
|
+
# --- Flowith OpenAI-Compatible Client ---
|
|
26
|
+
|
|
27
|
+
class Completions(BaseCompletions):
|
|
28
|
+
def __init__(self, client: 'Flowith'):
|
|
29
|
+
self.client = client
|
|
30
|
+
|
|
31
|
+
def create(
|
|
32
|
+
self,
|
|
33
|
+
*,
|
|
34
|
+
model: str,
|
|
35
|
+
messages: List[Dict[str, str]],
|
|
36
|
+
max_tokens: Optional[int] = 2048,
|
|
37
|
+
stream: bool = False,
|
|
38
|
+
temperature: Optional[float] = None,
|
|
39
|
+
top_p: Optional[float] = None,
|
|
40
|
+
timeout: Optional[int] = None,
|
|
41
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
42
|
+
**kwargs: Any
|
|
43
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
44
|
+
"""
|
|
45
|
+
Implements OpenAI-compatible chat/completions endpoint for Flowith.
|
|
46
|
+
"""
|
|
47
|
+
url = "https://edge.flowith.net/ai/chat?mode=general"
|
|
48
|
+
agent = LitAgent()
|
|
49
|
+
fingerprint = agent.generate_fingerprint("chrome")
|
|
50
|
+
headers = {
|
|
51
|
+
"accept": "*/*",
|
|
52
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
53
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
54
|
+
"content-type": "application/json",
|
|
55
|
+
"origin": "https://flowith.io",
|
|
56
|
+
"referer": "https://edge.flowith.net/",
|
|
57
|
+
"user-agent": fingerprint["user_agent"],
|
|
58
|
+
"dnt": "1",
|
|
59
|
+
"sec-gpc": "1"
|
|
60
|
+
}
|
|
61
|
+
session = requests.Session()
|
|
62
|
+
session.headers.update(headers)
|
|
63
|
+
node_id = str(uuid.uuid4())
|
|
64
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
65
|
+
created_time = int(time.time())
|
|
66
|
+
payload = {
|
|
67
|
+
"model": model,
|
|
68
|
+
"messages": messages,
|
|
69
|
+
"stream": stream,
|
|
70
|
+
"nodeId": node_id
|
|
71
|
+
}
|
|
72
|
+
payload.update(kwargs)
|
|
73
|
+
|
|
74
|
+
def for_stream():
|
|
75
|
+
try:
|
|
76
|
+
response = session.post(
|
|
77
|
+
url,
|
|
78
|
+
json=payload,
|
|
79
|
+
stream=True,
|
|
80
|
+
timeout=timeout or 30,
|
|
81
|
+
proxies=proxies
|
|
82
|
+
)
|
|
83
|
+
response.raise_for_status()
|
|
84
|
+
for chunk in response.iter_content(chunk_size=4096):
|
|
85
|
+
if not chunk:
|
|
86
|
+
break
|
|
87
|
+
text = chunk.decode('utf-8', errors='replace')
|
|
88
|
+
delta = ChoiceDelta(content=text, role="assistant")
|
|
89
|
+
choice = Choice(index=0, delta=delta)
|
|
90
|
+
chunk_obj = ChatCompletionChunk(
|
|
91
|
+
id=request_id,
|
|
92
|
+
choices=[choice],
|
|
93
|
+
created=created_time,
|
|
94
|
+
model=model,
|
|
95
|
+
system_fingerprint=None
|
|
96
|
+
)
|
|
97
|
+
yield chunk_obj
|
|
98
|
+
except Exception as e:
|
|
99
|
+
raise RuntimeError(f"Flowith streaming request failed: {e}")
|
|
100
|
+
|
|
101
|
+
def for_non_stream():
|
|
102
|
+
try:
|
|
103
|
+
response = session.post(
|
|
104
|
+
url,
|
|
105
|
+
json=payload,
|
|
106
|
+
timeout=timeout or 30,
|
|
107
|
+
proxies=proxies
|
|
108
|
+
)
|
|
109
|
+
response.raise_for_status()
|
|
110
|
+
encoding = response.headers.get('Content-Encoding', '').lower()
|
|
111
|
+
|
|
112
|
+
# Try to handle different compression formats
|
|
113
|
+
if encoding == 'zstd':
|
|
114
|
+
try:
|
|
115
|
+
# First, check if the content is actually zstd compressed
|
|
116
|
+
if response.content.startswith(b'\x28\xb5\x2f\xfd'): # zstd magic number
|
|
117
|
+
dctx = zstd.ZstdDecompressor()
|
|
118
|
+
text = dctx.decompress(response.content).decode('utf-8', errors='replace')
|
|
119
|
+
else:
|
|
120
|
+
text = response.content.decode('utf-8', errors='replace')
|
|
121
|
+
except Exception as zstd_error:
|
|
122
|
+
text = response.content.decode('utf-8', errors='replace')
|
|
123
|
+
elif encoding in ['gzip', 'deflate', 'br']:
|
|
124
|
+
# Let requests handle other compression formats automatically
|
|
125
|
+
text = response.text
|
|
126
|
+
else:
|
|
127
|
+
text = response.text
|
|
128
|
+
|
|
129
|
+
# Flowith returns raw text, not JSON
|
|
130
|
+
content = text.strip()
|
|
131
|
+
message = ChatCompletionMessage(role="assistant", content=content)
|
|
132
|
+
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
133
|
+
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
|
134
|
+
completion = ChatCompletion(
|
|
135
|
+
id=request_id,
|
|
136
|
+
choices=[choice],
|
|
137
|
+
created=created_time,
|
|
138
|
+
model=model,
|
|
139
|
+
usage=usage
|
|
140
|
+
)
|
|
141
|
+
return completion
|
|
142
|
+
except Exception as e:
|
|
143
|
+
raise RuntimeError(f"Flowith request failed: {e}")
|
|
144
|
+
|
|
145
|
+
return for_stream() if stream else for_non_stream()
|
|
146
|
+
|
|
147
|
+
class Chat(BaseChat):
|
|
148
|
+
def __init__(self, client: 'Flowith'):
|
|
149
|
+
self.completions = Completions(client)
|
|
150
|
+
|
|
151
|
+
class Flowith(OpenAICompatibleProvider):
|
|
152
|
+
AVAILABLE_MODELS = [
|
|
153
|
+
"gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku",
|
|
154
|
+
"gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"
|
|
155
|
+
]
|
|
156
|
+
|
|
157
|
+
chat: Chat
|
|
158
|
+
def __init__(self):
|
|
159
|
+
self.chat = Chat(self)
|
|
160
|
+
|
|
161
|
+
@property
|
|
162
|
+
def models(self):
|
|
163
|
+
class _ModelList:
|
|
164
|
+
def list(inner_self):
|
|
165
|
+
return type(self).AVAILABLE_MODELS
|
|
166
|
+
return _ModelList()
|
|
167
|
+
|
|
168
|
+
if __name__ == "__main__":
|
|
169
|
+
# Example usage
|
|
170
|
+
client = Flowith()
|
|
171
|
+
messages = [{"role": "user", "content": "Hello, how are you?"}]
|
|
172
|
+
response = client.chat.completions.create(
|
|
173
|
+
model="gpt-4.1-mini",
|
|
174
|
+
messages=messages,
|
|
175
|
+
stream=True
|
|
176
|
+
)
|
|
177
|
+
for chunk in response:
|
|
178
|
+
print(chunk.choices[0].delta.content, end="", flush=True)
|
|
179
|
+
print()
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
8
|
+
from webscout.Provider.OPENAI.utils import (
|
|
9
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
10
|
+
ChatCompletionMessage, CompletionUsage
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
|
+
except ImportError:
|
|
16
|
+
LitAgent = None
|
|
17
|
+
|
|
18
|
+
class Completions(BaseCompletions):
|
|
19
|
+
def __init__(self, client: 'Friendli'):
|
|
20
|
+
self._client = client
|
|
21
|
+
|
|
22
|
+
def create(
|
|
23
|
+
self,
|
|
24
|
+
*,
|
|
25
|
+
model: str,
|
|
26
|
+
messages: List[Dict[str, str]],
|
|
27
|
+
max_tokens: Optional[int] = 81920,
|
|
28
|
+
min_tokens: Optional[int] = 0,
|
|
29
|
+
stream: bool = False,
|
|
30
|
+
temperature: Optional[float] = 1,
|
|
31
|
+
top_p: Optional[float] = 0.8,
|
|
32
|
+
frequency_penalty: Optional[float] = 0,
|
|
33
|
+
stop: Optional[List[str]] = None,
|
|
34
|
+
stream_options: Optional[Dict[str, Any]] = None,
|
|
35
|
+
timeout: Optional[int] = None,
|
|
36
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
37
|
+
**kwargs: Any
|
|
38
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
39
|
+
payload = {
|
|
40
|
+
"model": model,
|
|
41
|
+
"messages": messages,
|
|
42
|
+
"min_tokens": min_tokens,
|
|
43
|
+
"max_tokens": max_tokens,
|
|
44
|
+
"temperature": temperature,
|
|
45
|
+
"top_p": top_p,
|
|
46
|
+
"frequency_penalty": frequency_penalty,
|
|
47
|
+
"stop": stop or [],
|
|
48
|
+
"stream": stream,
|
|
49
|
+
"stream_options": stream_options or {"include_usage": True},
|
|
50
|
+
}
|
|
51
|
+
payload.update(kwargs)
|
|
52
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
53
|
+
created_time = int(time.time())
|
|
54
|
+
if stream:
|
|
55
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
56
|
+
else:
|
|
57
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
58
|
+
|
|
59
|
+
def _create_stream(
|
|
60
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
61
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
62
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
63
|
+
try:
|
|
64
|
+
response = self._client.session.post(
|
|
65
|
+
self._client.base_url,
|
|
66
|
+
headers=self._client.headers,
|
|
67
|
+
json=payload,
|
|
68
|
+
stream=True,
|
|
69
|
+
timeout=timeout or self._client.timeout,
|
|
70
|
+
proxies=proxies
|
|
71
|
+
)
|
|
72
|
+
response.raise_for_status()
|
|
73
|
+
for line in response.iter_lines():
|
|
74
|
+
if line:
|
|
75
|
+
decoded_line = line.decode('utf-8').strip()
|
|
76
|
+
if decoded_line.startswith("data: "):
|
|
77
|
+
json_str = decoded_line[6:]
|
|
78
|
+
if json_str == "[DONE]":
|
|
79
|
+
break
|
|
80
|
+
try:
|
|
81
|
+
data = json.loads(json_str)
|
|
82
|
+
choices = data.get('choices', [])
|
|
83
|
+
if not choices:
|
|
84
|
+
continue # Skip if choices is empty
|
|
85
|
+
choice_data = choices[0]
|
|
86
|
+
delta_data = choice_data.get('delta', {})
|
|
87
|
+
finish_reason = choice_data.get('finish_reason')
|
|
88
|
+
delta = ChoiceDelta(
|
|
89
|
+
content=delta_data.get('content'),
|
|
90
|
+
role=delta_data.get('role'),
|
|
91
|
+
tool_calls=delta_data.get('tool_calls')
|
|
92
|
+
)
|
|
93
|
+
choice = Choice(
|
|
94
|
+
index=choice_data.get('index', 0),
|
|
95
|
+
delta=delta,
|
|
96
|
+
finish_reason=finish_reason,
|
|
97
|
+
logprobs=choice_data.get('logprobs')
|
|
98
|
+
)
|
|
99
|
+
chunk = ChatCompletionChunk(
|
|
100
|
+
id=data.get('id', request_id),
|
|
101
|
+
choices=[choice],
|
|
102
|
+
created=data.get('created', created_time),
|
|
103
|
+
model=data.get('model', model),
|
|
104
|
+
system_fingerprint=data.get('system_fingerprint'),
|
|
105
|
+
)
|
|
106
|
+
yield chunk
|
|
107
|
+
except json.JSONDecodeError:
|
|
108
|
+
continue
|
|
109
|
+
except requests.exceptions.RequestException as e:
|
|
110
|
+
print(f"Error during Friendli stream request: {e}")
|
|
111
|
+
raise IOError(f"Friendli request failed: {e}") from e
|
|
112
|
+
except Exception as e:
|
|
113
|
+
print(f"Error processing Friendli stream: {e}")
|
|
114
|
+
raise
|
|
115
|
+
|
|
116
|
+
def _create_non_stream(
|
|
117
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
118
|
+
timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
119
|
+
) -> ChatCompletion:
|
|
120
|
+
try:
|
|
121
|
+
response = self._client.session.post(
|
|
122
|
+
self._client.base_url,
|
|
123
|
+
headers=self._client.headers,
|
|
124
|
+
json=payload,
|
|
125
|
+
timeout=timeout or self._client.timeout,
|
|
126
|
+
proxies=proxies
|
|
127
|
+
)
|
|
128
|
+
response.raise_for_status()
|
|
129
|
+
data = response.json()
|
|
130
|
+
choices_data = data.get('choices', [])
|
|
131
|
+
usage_data = data.get('usage', {})
|
|
132
|
+
choices = []
|
|
133
|
+
for choice_d in choices_data:
|
|
134
|
+
message_d = choice_d.get('message', {})
|
|
135
|
+
message = ChatCompletionMessage(
|
|
136
|
+
role=message_d.get('role', 'assistant'),
|
|
137
|
+
content=message_d.get('content', '')
|
|
138
|
+
)
|
|
139
|
+
choice = Choice(
|
|
140
|
+
index=choice_d.get('index', 0),
|
|
141
|
+
message=message,
|
|
142
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
143
|
+
)
|
|
144
|
+
choices.append(choice)
|
|
145
|
+
usage = CompletionUsage(
|
|
146
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
147
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
148
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
149
|
+
)
|
|
150
|
+
completion = ChatCompletion(
|
|
151
|
+
id=data.get('id', request_id),
|
|
152
|
+
choices=choices,
|
|
153
|
+
created=data.get('created', created_time),
|
|
154
|
+
model=data.get('model', model),
|
|
155
|
+
usage=usage,
|
|
156
|
+
)
|
|
157
|
+
return completion
|
|
158
|
+
except requests.exceptions.RequestException as e:
|
|
159
|
+
print(f"Error during Friendli non-stream request: {e}")
|
|
160
|
+
raise IOError(f"Friendli request failed: {e}") from e
|
|
161
|
+
except Exception as e:
|
|
162
|
+
print(f"Error processing Friendli response: {e}")
|
|
163
|
+
raise
|
|
164
|
+
|
|
165
|
+
class Chat(BaseChat):
|
|
166
|
+
def __init__(self, client: 'Friendli'):
|
|
167
|
+
self.completions = Completions(client)
|
|
168
|
+
|
|
169
|
+
class Friendli(OpenAICompatibleProvider):
|
|
170
|
+
AVAILABLE_MODELS = [
|
|
171
|
+
"deepseek-r1",
|
|
172
|
+
# Add more as needed
|
|
173
|
+
]
|
|
174
|
+
def __init__(self, browser: str = "chrome"):
|
|
175
|
+
self.timeout = None
|
|
176
|
+
self.base_url = "https://friendli.ai/serverless/v1/chat/completions"
|
|
177
|
+
self.session = requests.Session()
|
|
178
|
+
agent = LitAgent()
|
|
179
|
+
fingerprint = agent.generate_fingerprint(browser)
|
|
180
|
+
self.headers = {
|
|
181
|
+
"Accept": fingerprint["accept"],
|
|
182
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
183
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
184
|
+
"Content-Type": "application/json",
|
|
185
|
+
"Origin": "https://friendli.ai",
|
|
186
|
+
"Referer": "https://friendli.ai/",
|
|
187
|
+
"Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="137", "Chromium";v="137"',
|
|
188
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
189
|
+
"Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
|
|
190
|
+
"User-Agent": fingerprint["user_agent"],
|
|
191
|
+
# Improved formatting for cookie header
|
|
192
|
+
"cookie": (
|
|
193
|
+
f"Next-Locale=en; "
|
|
194
|
+
f"cookie-consent-state=rejected; "
|
|
195
|
+
f"_gcl_au=1.1.2030343227.1749659739; "
|
|
196
|
+
f"st-last-access-token-update=1749659740085; "
|
|
197
|
+
f"_ga=GA1.1.912258413.1749659740; "
|
|
198
|
+
f"AMP_MKTG_26fe53b9aa=JTdCJTdE; "
|
|
199
|
+
f"pfTmpSessionVisitorContext=eb4334fe9f7540c7828d3ba71bab1fa7; "
|
|
200
|
+
f"_fuid=MGVkY2IzZTItNDExNC00OTMxLWIyYjMtMDlhM2QyZDkwMTlj; "
|
|
201
|
+
f"g_state={{\"i_p\":1749666944837,\"i_l\":1}}; "
|
|
202
|
+
f"__stripe_mid={str(uuid.uuid4())}; "
|
|
203
|
+
f"__stripe_sid={str(uuid.uuid4())}; "
|
|
204
|
+
f"intercom-id-hcnpxbkh={str(uuid.uuid4())}; "
|
|
205
|
+
f"intercom-session-hcnpxbkh=; "
|
|
206
|
+
f"intercom-device-id-hcnpxbkh={str(uuid.uuid4())}; "
|
|
207
|
+
f"AMP_26fe53b9aa=JTdCJTIyZGV2aWNlSWQlMjIlM0ElMjJjOTJkMDYxYy0yYzBkLTQ4YTYtOGYzMy1kMjIzZTNjMzA1MzMlMjIlMkMlMjJzZXNzaW9uSWQlMjIlM0ExNzQ5NjU5NzQxMDkxJTJDJTIyb3B0T3V0JTIyJTNBZmFsc2UlMkMlMjJsYXN0RXZlbnRUaW1lJTIyJTNBMTc0OTY1OTc1NzQ5NiUyQyUyMmxhc3RFdmVudElkJTIyJTNBNCUyQyUyMnBhZ2VDb3VudGVyJTIyJTNBMiU3RA==; "
|
|
208
|
+
f"_ga_PS0FM9F67K=GS2.1.s1749659740$o1$g1$t1749659771$j29$l0$h644129183"
|
|
209
|
+
), # Replace with actual cookie
|
|
210
|
+
"rid": "anti-csrf", # Replace with actual rid token if dynamic, otherwise keep as is
|
|
211
|
+
"Sec-Fetch-Dest": "empty", # Keep existing headers
|
|
212
|
+
"Sec-Fetch-Mode": "cors",
|
|
213
|
+
"Sec-Fetch-Site": "same-origin"
|
|
214
|
+
}
|
|
215
|
+
self.session.headers.update(self.headers)
|
|
216
|
+
self.chat = Chat(self)
|
|
217
|
+
|
|
218
|
+
@property
|
|
219
|
+
def models(self):
|
|
220
|
+
class _ModelList:
|
|
221
|
+
def list(inner_self):
|
|
222
|
+
return type(self).AVAILABLE_MODELS
|
|
223
|
+
return _ModelList()
|
|
224
|
+
|
|
225
|
+
if __name__ == "__main__":
|
|
226
|
+
client = Friendli()
|
|
227
|
+
resp = client.chat.completions.create(
|
|
228
|
+
model="deepseek-r1",
|
|
229
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
230
|
+
stream=True
|
|
231
|
+
)
|
|
232
|
+
for chunk in resp:
|
|
233
|
+
print(chunk.choices[0].delta.content, end='', flush=True) # Print each chunk as it arrives
|