webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (94) hide show
  1. webscout/AIutel.py +146 -37
  2. webscout/Bing_search.py +1 -2
  3. webscout/Provider/AISEARCH/__init__.py +1 -0
  4. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  5. webscout/Provider/ExaChat.py +84 -58
  6. webscout/Provider/HeckAI.py +85 -80
  7. webscout/Provider/Jadve.py +56 -50
  8. webscout/Provider/MiniMax.py +207 -0
  9. webscout/Provider/Nemotron.py +41 -13
  10. webscout/Provider/Netwrck.py +34 -51
  11. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  12. webscout/Provider/OPENAI/MiniMax.py +298 -0
  13. webscout/Provider/OPENAI/README.md +30 -29
  14. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  15. webscout/Provider/OPENAI/__init__.py +3 -1
  16. webscout/Provider/OPENAI/autoproxy.py +752 -17
  17. webscout/Provider/OPENAI/base.py +7 -76
  18. webscout/Provider/OPENAI/deepinfra.py +42 -108
  19. webscout/Provider/OPENAI/flowith.py +179 -166
  20. webscout/Provider/OPENAI/friendli.py +233 -0
  21. webscout/Provider/OPENAI/monochat.py +329 -0
  22. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  23. webscout/Provider/OPENAI/toolbaz.py +1 -0
  24. webscout/Provider/OPENAI/typegpt.py +1 -1
  25. webscout/Provider/OPENAI/utils.py +19 -42
  26. webscout/Provider/OPENAI/x0gpt.py +14 -2
  27. webscout/Provider/OpenGPT.py +54 -32
  28. webscout/Provider/PI.py +58 -84
  29. webscout/Provider/StandardInput.py +32 -13
  30. webscout/Provider/TTI/README.md +9 -9
  31. webscout/Provider/TTI/__init__.py +2 -1
  32. webscout/Provider/TTI/aiarta.py +92 -78
  33. webscout/Provider/TTI/infip.py +212 -0
  34. webscout/Provider/TTI/monochat.py +220 -0
  35. webscout/Provider/TeachAnything.py +11 -3
  36. webscout/Provider/TextPollinationsAI.py +78 -70
  37. webscout/Provider/TogetherAI.py +32 -48
  38. webscout/Provider/Venice.py +37 -46
  39. webscout/Provider/VercelAI.py +27 -24
  40. webscout/Provider/WiseCat.py +35 -35
  41. webscout/Provider/WrDoChat.py +22 -26
  42. webscout/Provider/WritingMate.py +26 -22
  43. webscout/Provider/__init__.py +2 -2
  44. webscout/Provider/granite.py +48 -57
  45. webscout/Provider/koala.py +51 -39
  46. webscout/Provider/learnfastai.py +49 -64
  47. webscout/Provider/llmchat.py +79 -93
  48. webscout/Provider/llmchatco.py +63 -78
  49. webscout/Provider/multichat.py +51 -40
  50. webscout/Provider/oivscode.py +1 -1
  51. webscout/Provider/scira_chat.py +159 -96
  52. webscout/Provider/scnet.py +13 -13
  53. webscout/Provider/searchchat.py +13 -13
  54. webscout/Provider/sonus.py +12 -11
  55. webscout/Provider/toolbaz.py +25 -8
  56. webscout/Provider/turboseek.py +41 -42
  57. webscout/Provider/typefully.py +27 -12
  58. webscout/Provider/typegpt.py +41 -46
  59. webscout/Provider/uncovr.py +55 -90
  60. webscout/Provider/x0gpt.py +33 -17
  61. webscout/Provider/yep.py +79 -96
  62. webscout/auth/__init__.py +12 -1
  63. webscout/auth/providers.py +27 -5
  64. webscout/auth/routes.py +128 -104
  65. webscout/auth/server.py +367 -312
  66. webscout/client.py +121 -116
  67. webscout/litagent/Readme.md +68 -55
  68. webscout/litagent/agent.py +99 -9
  69. webscout/version.py +1 -1
  70. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
  71. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
  72. webscout/Provider/TTI/fastflux.py +0 -233
  73. webscout/Provider/Writecream.py +0 -246
  74. webscout/auth/static/favicon.svg +0 -11
  75. webscout/auth/swagger_ui.py +0 -203
  76. webscout/auth/templates/components/authentication.html +0 -237
  77. webscout/auth/templates/components/base.html +0 -103
  78. webscout/auth/templates/components/endpoints.html +0 -750
  79. webscout/auth/templates/components/examples.html +0 -491
  80. webscout/auth/templates/components/footer.html +0 -75
  81. webscout/auth/templates/components/header.html +0 -27
  82. webscout/auth/templates/components/models.html +0 -286
  83. webscout/auth/templates/components/navigation.html +0 -70
  84. webscout/auth/templates/static/api.js +0 -455
  85. webscout/auth/templates/static/icons.js +0 -168
  86. webscout/auth/templates/static/main.js +0 -784
  87. webscout/auth/templates/static/particles.js +0 -201
  88. webscout/auth/templates/static/styles.css +0 -3353
  89. webscout/auth/templates/static/ui.js +0 -374
  90. webscout/auth/templates/swagger_ui.html +0 -170
  91. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  92. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
  93. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  94. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -1,166 +1,179 @@
1
- from typing import List, Dict, Optional, Union, Generator, Any
2
- import time
3
- import json
4
-
5
- # Import base classes and utility structures
6
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
7
- from .utils import (
8
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
9
- ChatCompletionMessage, CompletionUsage
10
- )
11
-
12
- # Import requests for HTTP requests (instead of curl_cffi)
13
- import requests
14
- import zstandard as zstd
15
- import uuid
16
-
17
- # Attempt to import LitAgent, fallback if not available
18
- try:
19
- from webscout.litagent import LitAgent
20
- except ImportError:
21
- class LitAgent:
22
- def generate_fingerprint(self, browser):
23
- return {"user_agent": "Mozilla/5.0"}
24
-
25
- # --- Flowith OpenAI-Compatible Client ---
26
-
27
- class Completions(BaseCompletions):
28
- def __init__(self, client: 'Flowith'):
29
- self.client = client
30
-
31
- def create(
32
- self,
33
- *,
34
- model: str,
35
- messages: List[Dict[str, str]],
36
- max_tokens: Optional[int] = 2048,
37
- stream: bool = False,
38
- temperature: Optional[float] = None,
39
- top_p: Optional[float] = None,
40
- timeout: Optional[int] = None,
41
- proxies: Optional[Dict[str, str]] = None,
42
- **kwargs: Any
43
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
44
- """
45
- Implements OpenAI-compatible chat/completions endpoint for Flowith.
46
- """
47
- url = "https://edge.flowith.net/ai/chat?mode=general"
48
- agent = LitAgent()
49
- fingerprint = agent.generate_fingerprint("chrome")
50
- headers = {
51
- "accept": "*/*",
52
- "accept-encoding": "gzip, deflate, br, zstd",
53
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
54
- "content-type": "application/json",
55
- "origin": "https://flowith.io",
56
- "referer": "https://edge.flowith.net/",
57
- "user-agent": fingerprint["user_agent"],
58
- "dnt": "1",
59
- "sec-gpc": "1"
60
- }
61
- session = requests.Session()
62
- session.headers.update(headers)
63
- node_id = str(uuid.uuid4())
64
- request_id = f"chatcmpl-{uuid.uuid4()}"
65
- created_time = int(time.time())
66
- payload = {
67
- "model": model,
68
- "messages": messages,
69
- "stream": stream,
70
- "nodeId": node_id
71
- }
72
- payload.update(kwargs)
73
-
74
- def for_stream():
75
- try:
76
- print(f"[DEBUG] Sending streaming request to {url} with payload: {payload}")
77
- response = session.post(
78
- url,
79
- json=payload,
80
- stream=True,
81
- timeout=timeout or 30,
82
- proxies=proxies
83
- )
84
- print(f"[DEBUG] Response status: {response.status_code}")
85
- response.raise_for_status()
86
- for chunk in response.iter_content(chunk_size=4096):
87
- if not chunk:
88
- break
89
- text = chunk.decode('utf-8', errors='replace')
90
- print(f"[DEBUG] Stream chunk: {repr(text)}")
91
- delta = ChoiceDelta(content=text, role="assistant")
92
- choice = Choice(index=0, delta=delta)
93
- chunk_obj = ChatCompletionChunk(
94
- id=request_id,
95
- choices=[choice],
96
- created=created_time,
97
- model=model,
98
- system_fingerprint=None
99
- )
100
- yield chunk_obj
101
- except Exception as e:
102
- print(f"[DEBUG] Streaming error: {e}")
103
- raise RuntimeError(f"Flowith streaming request failed: {e}")
104
-
105
- def for_non_stream():
106
- try:
107
- print(f"[DEBUG] Sending non-stream request to {url} with payload: {payload}")
108
- response = session.post(
109
- url,
110
- json=payload,
111
- timeout=timeout or 30,
112
- proxies=proxies
113
- )
114
- print(f"[DEBUG] Response status: {response.status_code}")
115
- response.raise_for_status()
116
- encoding = response.headers.get('Content-Encoding', '').lower()
117
- print(f"[DEBUG] Response encoding: {encoding}")
118
- if encoding == 'zstd':
119
- dctx = zstd.ZstdDecompressor()
120
- with dctx.stream_reader(response.raw) as reader:
121
- decompressed = reader.read()
122
- text = decompressed.decode('utf-8', errors='replace')
123
- else:
124
- text = response.text
125
- print(f"[DEBUG] Raw response text: {repr(text)}")
126
- # Flowith returns raw text, not JSON
127
- content = text.strip()
128
- print(f"[DEBUG] Final content for ChatCompletion: {repr(content)}")
129
- message = ChatCompletionMessage(role="assistant", content=content)
130
- choice = Choice(index=0, message=message, finish_reason="stop")
131
- usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
132
- completion = ChatCompletion(
133
- id=request_id,
134
- choices=[choice],
135
- created=created_time,
136
- model=model,
137
- usage=usage
138
- )
139
- print(f"[DEBUG] Returning ChatCompletion: {completion}")
140
- return completion
141
- except Exception as e:
142
- print(f"[DEBUG] Non-streaming error: {e}")
143
- raise RuntimeError(f"Flowith request failed: {e}")
144
-
145
- return for_stream() if stream else for_non_stream()
146
-
147
- class Chat(BaseChat):
148
- def __init__(self, client: 'Flowith'):
149
- self.completions = Completions(client)
150
-
151
- class Flowith(OpenAICompatibleProvider):
152
- AVAILABLE_MODELS = [
153
- "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku",
154
- "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"
155
- ]
156
-
157
- chat: Chat
158
- def __init__(self):
159
- self.chat = Chat(self)
160
-
161
- @property
162
- def models(self):
163
- class _ModelList:
164
- def list(inner_self):
165
- return type(self).AVAILABLE_MODELS
166
- return _ModelList()
1
+ from typing import List, Dict, Optional, Union, Generator, Any
2
+ import time
3
+ import json
4
+
5
+ # Import base classes and utility structures
6
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
7
+ from webscout.Provider.OPENAI.utils import (
8
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
9
+ ChatCompletionMessage, CompletionUsage
10
+ )
11
+
12
+ # Import requests for HTTP requests (instead of curl_cffi)
13
+ import requests
14
+ import zstandard as zstd
15
+ import uuid
16
+
17
+ # Attempt to import LitAgent, fallback if not available
18
+ try:
19
+ from webscout.litagent import LitAgent
20
+ except ImportError:
21
+ class LitAgent:
22
+ def generate_fingerprint(self, browser):
23
+ return {"user_agent": "Mozilla/5.0"}
24
+
25
+ # --- Flowith OpenAI-Compatible Client ---
26
+
27
+ class Completions(BaseCompletions):
28
+ def __init__(self, client: 'Flowith'):
29
+ self.client = client
30
+
31
+ def create(
32
+ self,
33
+ *,
34
+ model: str,
35
+ messages: List[Dict[str, str]],
36
+ max_tokens: Optional[int] = 2048,
37
+ stream: bool = False,
38
+ temperature: Optional[float] = None,
39
+ top_p: Optional[float] = None,
40
+ timeout: Optional[int] = None,
41
+ proxies: Optional[Dict[str, str]] = None,
42
+ **kwargs: Any
43
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
44
+ """
45
+ Implements OpenAI-compatible chat/completions endpoint for Flowith.
46
+ """
47
+ url = "https://edge.flowith.net/ai/chat?mode=general"
48
+ agent = LitAgent()
49
+ fingerprint = agent.generate_fingerprint("chrome")
50
+ headers = {
51
+ "accept": "*/*",
52
+ "accept-encoding": "gzip, deflate, br, zstd",
53
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
54
+ "content-type": "application/json",
55
+ "origin": "https://flowith.io",
56
+ "referer": "https://edge.flowith.net/",
57
+ "user-agent": fingerprint["user_agent"],
58
+ "dnt": "1",
59
+ "sec-gpc": "1"
60
+ }
61
+ session = requests.Session()
62
+ session.headers.update(headers)
63
+ node_id = str(uuid.uuid4())
64
+ request_id = f"chatcmpl-{uuid.uuid4()}"
65
+ created_time = int(time.time())
66
+ payload = {
67
+ "model": model,
68
+ "messages": messages,
69
+ "stream": stream,
70
+ "nodeId": node_id
71
+ }
72
+ payload.update(kwargs)
73
+
74
+ def for_stream():
75
+ try:
76
+ response = session.post(
77
+ url,
78
+ json=payload,
79
+ stream=True,
80
+ timeout=timeout or 30,
81
+ proxies=proxies
82
+ )
83
+ response.raise_for_status()
84
+ for chunk in response.iter_content(chunk_size=4096):
85
+ if not chunk:
86
+ break
87
+ text = chunk.decode('utf-8', errors='replace')
88
+ delta = ChoiceDelta(content=text, role="assistant")
89
+ choice = Choice(index=0, delta=delta)
90
+ chunk_obj = ChatCompletionChunk(
91
+ id=request_id,
92
+ choices=[choice],
93
+ created=created_time,
94
+ model=model,
95
+ system_fingerprint=None
96
+ )
97
+ yield chunk_obj
98
+ except Exception as e:
99
+ raise RuntimeError(f"Flowith streaming request failed: {e}")
100
+
101
+ def for_non_stream():
102
+ try:
103
+ response = session.post(
104
+ url,
105
+ json=payload,
106
+ timeout=timeout or 30,
107
+ proxies=proxies
108
+ )
109
+ response.raise_for_status()
110
+ encoding = response.headers.get('Content-Encoding', '').lower()
111
+
112
+ # Try to handle different compression formats
113
+ if encoding == 'zstd':
114
+ try:
115
+ # First, check if the content is actually zstd compressed
116
+ if response.content.startswith(b'\x28\xb5\x2f\xfd'): # zstd magic number
117
+ dctx = zstd.ZstdDecompressor()
118
+ text = dctx.decompress(response.content).decode('utf-8', errors='replace')
119
+ else:
120
+ text = response.content.decode('utf-8', errors='replace')
121
+ except Exception as zstd_error:
122
+ text = response.content.decode('utf-8', errors='replace')
123
+ elif encoding in ['gzip', 'deflate', 'br']:
124
+ # Let requests handle other compression formats automatically
125
+ text = response.text
126
+ else:
127
+ text = response.text
128
+
129
+ # Flowith returns raw text, not JSON
130
+ content = text.strip()
131
+ message = ChatCompletionMessage(role="assistant", content=content)
132
+ choice = Choice(index=0, message=message, finish_reason="stop")
133
+ usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
134
+ completion = ChatCompletion(
135
+ id=request_id,
136
+ choices=[choice],
137
+ created=created_time,
138
+ model=model,
139
+ usage=usage
140
+ )
141
+ return completion
142
+ except Exception as e:
143
+ raise RuntimeError(f"Flowith request failed: {e}")
144
+
145
+ return for_stream() if stream else for_non_stream()
146
+
147
+ class Chat(BaseChat):
148
+ def __init__(self, client: 'Flowith'):
149
+ self.completions = Completions(client)
150
+
151
+ class Flowith(OpenAICompatibleProvider):
152
+ AVAILABLE_MODELS = [
153
+ "gpt-4.1-mini", "deepseek-chat", "deepseek-reasoner", "claude-3.5-haiku",
154
+ "gemini-2.0-flash", "gemini-2.5-flash", "grok-3-mini"
155
+ ]
156
+
157
+ chat: Chat
158
+ def __init__(self):
159
+ self.chat = Chat(self)
160
+
161
+ @property
162
+ def models(self):
163
+ class _ModelList:
164
+ def list(inner_self):
165
+ return type(self).AVAILABLE_MODELS
166
+ return _ModelList()
167
+
168
+ if __name__ == "__main__":
169
+ # Example usage
170
+ client = Flowith()
171
+ messages = [{"role": "user", "content": "Hello, how are you?"}]
172
+ response = client.chat.completions.create(
173
+ model="gpt-4.1-mini",
174
+ messages=messages,
175
+ stream=True
176
+ )
177
+ for chunk in response:
178
+ print(chunk.choices[0].delta.content, end="", flush=True)
179
+ print()
@@ -0,0 +1,233 @@
1
+ import requests
2
+ import json
3
+ import time
4
+ import uuid
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
8
+ from webscout.Provider.OPENAI.utils import (
9
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
10
+ ChatCompletionMessage, CompletionUsage
11
+ )
12
+
13
+ try:
14
+ from webscout.litagent import LitAgent
15
+ except ImportError:
16
+ LitAgent = None
17
+
18
+ class Completions(BaseCompletions):
19
+ def __init__(self, client: 'Friendli'):
20
+ self._client = client
21
+
22
+ def create(
23
+ self,
24
+ *,
25
+ model: str,
26
+ messages: List[Dict[str, str]],
27
+ max_tokens: Optional[int] = 81920,
28
+ min_tokens: Optional[int] = 0,
29
+ stream: bool = False,
30
+ temperature: Optional[float] = 1,
31
+ top_p: Optional[float] = 0.8,
32
+ frequency_penalty: Optional[float] = 0,
33
+ stop: Optional[List[str]] = None,
34
+ stream_options: Optional[Dict[str, Any]] = None,
35
+ timeout: Optional[int] = None,
36
+ proxies: Optional[Dict[str, str]] = None,
37
+ **kwargs: Any
38
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
+ payload = {
40
+ "model": model,
41
+ "messages": messages,
42
+ "min_tokens": min_tokens,
43
+ "max_tokens": max_tokens,
44
+ "temperature": temperature,
45
+ "top_p": top_p,
46
+ "frequency_penalty": frequency_penalty,
47
+ "stop": stop or [],
48
+ "stream": stream,
49
+ "stream_options": stream_options or {"include_usage": True},
50
+ }
51
+ payload.update(kwargs)
52
+ request_id = f"chatcmpl-{uuid.uuid4()}"
53
+ created_time = int(time.time())
54
+ if stream:
55
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
56
+ else:
57
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
58
+
59
+ def _create_stream(
60
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
61
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
62
+ ) -> Generator[ChatCompletionChunk, None, None]:
63
+ try:
64
+ response = self._client.session.post(
65
+ self._client.base_url,
66
+ headers=self._client.headers,
67
+ json=payload,
68
+ stream=True,
69
+ timeout=timeout or self._client.timeout,
70
+ proxies=proxies
71
+ )
72
+ response.raise_for_status()
73
+ for line in response.iter_lines():
74
+ if line:
75
+ decoded_line = line.decode('utf-8').strip()
76
+ if decoded_line.startswith("data: "):
77
+ json_str = decoded_line[6:]
78
+ if json_str == "[DONE]":
79
+ break
80
+ try:
81
+ data = json.loads(json_str)
82
+ choices = data.get('choices', [])
83
+ if not choices:
84
+ continue # Skip if choices is empty
85
+ choice_data = choices[0]
86
+ delta_data = choice_data.get('delta', {})
87
+ finish_reason = choice_data.get('finish_reason')
88
+ delta = ChoiceDelta(
89
+ content=delta_data.get('content'),
90
+ role=delta_data.get('role'),
91
+ tool_calls=delta_data.get('tool_calls')
92
+ )
93
+ choice = Choice(
94
+ index=choice_data.get('index', 0),
95
+ delta=delta,
96
+ finish_reason=finish_reason,
97
+ logprobs=choice_data.get('logprobs')
98
+ )
99
+ chunk = ChatCompletionChunk(
100
+ id=data.get('id', request_id),
101
+ choices=[choice],
102
+ created=data.get('created', created_time),
103
+ model=data.get('model', model),
104
+ system_fingerprint=data.get('system_fingerprint'),
105
+ )
106
+ yield chunk
107
+ except json.JSONDecodeError:
108
+ continue
109
+ except requests.exceptions.RequestException as e:
110
+ print(f"Error during Friendli stream request: {e}")
111
+ raise IOError(f"Friendli request failed: {e}") from e
112
+ except Exception as e:
113
+ print(f"Error processing Friendli stream: {e}")
114
+ raise
115
+
116
+ def _create_non_stream(
117
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
118
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
119
+ ) -> ChatCompletion:
120
+ try:
121
+ response = self._client.session.post(
122
+ self._client.base_url,
123
+ headers=self._client.headers,
124
+ json=payload,
125
+ timeout=timeout or self._client.timeout,
126
+ proxies=proxies
127
+ )
128
+ response.raise_for_status()
129
+ data = response.json()
130
+ choices_data = data.get('choices', [])
131
+ usage_data = data.get('usage', {})
132
+ choices = []
133
+ for choice_d in choices_data:
134
+ message_d = choice_d.get('message', {})
135
+ message = ChatCompletionMessage(
136
+ role=message_d.get('role', 'assistant'),
137
+ content=message_d.get('content', '')
138
+ )
139
+ choice = Choice(
140
+ index=choice_d.get('index', 0),
141
+ message=message,
142
+ finish_reason=choice_d.get('finish_reason', 'stop')
143
+ )
144
+ choices.append(choice)
145
+ usage = CompletionUsage(
146
+ prompt_tokens=usage_data.get('prompt_tokens', 0),
147
+ completion_tokens=usage_data.get('completion_tokens', 0),
148
+ total_tokens=usage_data.get('total_tokens', 0)
149
+ )
150
+ completion = ChatCompletion(
151
+ id=data.get('id', request_id),
152
+ choices=choices,
153
+ created=data.get('created', created_time),
154
+ model=data.get('model', model),
155
+ usage=usage,
156
+ )
157
+ return completion
158
+ except requests.exceptions.RequestException as e:
159
+ print(f"Error during Friendli non-stream request: {e}")
160
+ raise IOError(f"Friendli request failed: {e}") from e
161
+ except Exception as e:
162
+ print(f"Error processing Friendli response: {e}")
163
+ raise
164
+
165
+ class Chat(BaseChat):
166
+ def __init__(self, client: 'Friendli'):
167
+ self.completions = Completions(client)
168
+
169
+ class Friendli(OpenAICompatibleProvider):
170
+ AVAILABLE_MODELS = [
171
+ "deepseek-r1",
172
+ # Add more as needed
173
+ ]
174
+ def __init__(self, browser: str = "chrome"):
175
+ self.timeout = None
176
+ self.base_url = "https://friendli.ai/serverless/v1/chat/completions"
177
+ self.session = requests.Session()
178
+ agent = LitAgent()
179
+ fingerprint = agent.generate_fingerprint(browser)
180
+ self.headers = {
181
+ "Accept": fingerprint["accept"],
182
+ "Accept-Encoding": "gzip, deflate, br, zstd",
183
+ "Accept-Language": fingerprint["accept_language"],
184
+ "Content-Type": "application/json",
185
+ "Origin": "https://friendli.ai",
186
+ "Referer": "https://friendli.ai/",
187
+ "Sec-CH-UA": fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="137", "Chromium";v="137"',
188
+ "Sec-CH-UA-Mobile": "?0",
189
+ "Sec-CH-UA-Platform": f'"{fingerprint["platform"]}"',
190
+ "User-Agent": fingerprint["user_agent"],
191
+ # Improved formatting for cookie header
192
+ "cookie": (
193
+ f"Next-Locale=en; "
194
+ f"cookie-consent-state=rejected; "
195
+ f"_gcl_au=1.1.2030343227.1749659739; "
196
+ f"st-last-access-token-update=1749659740085; "
197
+ f"_ga=GA1.1.912258413.1749659740; "
198
+ f"AMP_MKTG_26fe53b9aa=JTdCJTdE; "
199
+ f"pfTmpSessionVisitorContext=eb4334fe9f7540c7828d3ba71bab1fa7; "
200
+ f"_fuid=MGVkY2IzZTItNDExNC00OTMxLWIyYjMtMDlhM2QyZDkwMTlj; "
201
+ f"g_state={{\"i_p\":1749666944837,\"i_l\":1}}; "
202
+ f"__stripe_mid={str(uuid.uuid4())}; "
203
+ f"__stripe_sid={str(uuid.uuid4())}; "
204
+ f"intercom-id-hcnpxbkh={str(uuid.uuid4())}; "
205
+ f"intercom-session-hcnpxbkh=; "
206
+ f"intercom-device-id-hcnpxbkh={str(uuid.uuid4())}; "
207
+ f"AMP_26fe53b9aa=JTdCJTIyZGV2aWNlSWQlMjIlM0ElMjJjOTJkMDYxYy0yYzBkLTQ4YTYtOGYzMy1kMjIzZTNjMzA1MzMlMjIlMkMlMjJzZXNzaW9uSWQlMjIlM0ExNzQ5NjU5NzQxMDkxJTJDJTIyb3B0T3V0JTIyJTNBZmFsc2UlMkMlMjJsYXN0RXZlbnRUaW1lJTIyJTNBMTc0OTY1OTc1NzQ5NiUyQyUyMmxhc3RFdmVudElkJTIyJTNBNCUyQyUyMnBhZ2VDb3VudGVyJTIyJTNBMiU3RA==; "
208
+ f"_ga_PS0FM9F67K=GS2.1.s1749659740$o1$g1$t1749659771$j29$l0$h644129183"
209
+ ), # Replace with actual cookie
210
+ "rid": "anti-csrf", # Replace with actual rid token if dynamic, otherwise keep as is
211
+ "Sec-Fetch-Dest": "empty", # Keep existing headers
212
+ "Sec-Fetch-Mode": "cors",
213
+ "Sec-Fetch-Site": "same-origin"
214
+ }
215
+ self.session.headers.update(self.headers)
216
+ self.chat = Chat(self)
217
+
218
+ @property
219
+ def models(self):
220
+ class _ModelList:
221
+ def list(inner_self):
222
+ return type(self).AVAILABLE_MODELS
223
+ return _ModelList()
224
+
225
+ if __name__ == "__main__":
226
+ client = Friendli()
227
+ resp = client.chat.completions.create(
228
+ model="deepseek-r1",
229
+ messages=[{"role": "user", "content": "Hello, how are you?"}],
230
+ stream=True
231
+ )
232
+ for chunk in resp:
233
+ print(chunk.choices[0].delta.content, end='', flush=True) # Print each chunk as it arrives