webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +34 -16
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +155 -35
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +172 -82
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
- webscout/Provider/OPENAI/Cloudflare.py +38 -21
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +35 -18
- webscout/Provider/OPENAI/NEMOTRON.py +34 -34
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +304 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +374 -0
- webscout/Provider/OPENAI/__init__.py +7 -1
- webscout/Provider/OPENAI/ai4chat.py +73 -63
- webscout/Provider/OPENAI/api.py +869 -644
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +34 -13
- webscout/Provider/OPENAI/chatgpt.py +575 -556
- webscout/Provider/OPENAI/chatgptclone.py +512 -487
- webscout/Provider/OPENAI/chatsandbox.py +11 -6
- webscout/Provider/OPENAI/copilot.py +258 -0
- webscout/Provider/OPENAI/deepinfra.py +327 -318
- webscout/Provider/OPENAI/e2b.py +140 -104
- webscout/Provider/OPENAI/exaai.py +420 -411
- webscout/Provider/OPENAI/exachat.py +448 -443
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +12 -8
- webscout/Provider/OPENAI/glider.py +15 -8
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +311 -307
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +18 -9
- webscout/Provider/OPENAI/multichat.py +7 -5
- webscout/Provider/OPENAI/netwrck.py +16 -11
- webscout/Provider/OPENAI/oivscode.py +290 -0
- webscout/Provider/OPENAI/opkfc.py +507 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +29 -17
- webscout/Provider/OPENAI/sonus.py +308 -303
- webscout/Provider/OPENAI/standardinput.py +442 -433
- webscout/Provider/OPENAI/textpollinations.py +18 -11
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +17 -10
- webscout/Provider/OPENAI/typegpt.py +21 -11
- webscout/Provider/OPENAI/uncovrAI.py +477 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +435 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +26 -37
- webscout/Provider/OPENAI/yep.py +384 -356
- webscout/Provider/PI.py +2 -1
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/granite.py +41 -6
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
- webscout-8.3.dist-info/RECORD +290 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- webscout-8.2.8.dist-info/RECORD +0 -334
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
|
@@ -1,163 +1,166 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
|
|
7
|
-
# Import base classes and utility structures
|
|
8
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
-
from .utils import (
|
|
10
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
-
ChatCompletionMessage, CompletionUsage
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
# Attempt to import LitAgent, fallback if not available
|
|
15
|
-
try:
|
|
16
|
-
from webscout.litagent import LitAgent
|
|
17
|
-
except ImportError:
|
|
18
|
-
print("Warning: LitAgent not found. Using default user agent.")
|
|
19
|
-
|
|
20
|
-
class Completions(BaseCompletions):
|
|
21
|
-
def __init__(self, client: 'Writecream'):
|
|
22
|
-
self._client = client
|
|
23
|
-
|
|
24
|
-
def create(
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
model: str = None, # Not used by Writecream, for compatibility
|
|
28
|
-
messages: List[Dict[str, str]],
|
|
29
|
-
max_tokens: Optional[int] = None, # Not used by Writecream
|
|
30
|
-
stream: bool = False,
|
|
31
|
-
temperature: Optional[float] = None, # Not used by Writecream
|
|
32
|
-
top_p: Optional[float] = None, # Not used by Writecream
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
return self.
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
self.
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
|
|
7
|
+
# Import base classes and utility structures
|
|
8
|
+
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from .utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# Attempt to import LitAgent, fallback if not available
|
|
15
|
+
try:
|
|
16
|
+
from webscout.litagent import LitAgent
|
|
17
|
+
except ImportError:
|
|
18
|
+
print("Warning: LitAgent not found. Using default user agent.")
|
|
19
|
+
|
|
20
|
+
class Completions(BaseCompletions):
|
|
21
|
+
def __init__(self, client: 'Writecream'):
|
|
22
|
+
self._client = client
|
|
23
|
+
|
|
24
|
+
def create(
|
|
25
|
+
self,
|
|
26
|
+
*,
|
|
27
|
+
model: str = None, # Not used by Writecream, for compatibility
|
|
28
|
+
messages: List[Dict[str, str]],
|
|
29
|
+
max_tokens: Optional[int] = None, # Not used by Writecream
|
|
30
|
+
stream: bool = False,
|
|
31
|
+
temperature: Optional[float] = None, # Not used by Writecream
|
|
32
|
+
top_p: Optional[float] = None, # Not used by Writecream
|
|
33
|
+
timeout: Optional[int] = None,
|
|
34
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
35
|
+
**kwargs: Any
|
|
36
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
37
|
+
"""
|
|
38
|
+
Creates a model response for the given chat conversation.
|
|
39
|
+
Mimics openai.chat.completions.create
|
|
40
|
+
"""
|
|
41
|
+
payload = messages
|
|
42
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
43
|
+
created_time = int(time.time())
|
|
44
|
+
if stream:
|
|
45
|
+
return self._create_stream(request_id, created_time, payload, timeout, proxies)
|
|
46
|
+
else:
|
|
47
|
+
return self._create_non_stream(request_id, created_time, payload, timeout, proxies)
|
|
48
|
+
|
|
49
|
+
def _create_stream(
|
|
50
|
+
self, request_id: str, created_time: int, payload: List[Dict[str, str]], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
51
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
52
|
+
# Writecream does not support streaming, so yield the full response as a single chunk
|
|
53
|
+
completion = self._create_non_stream(request_id, created_time, payload, timeout, proxies)
|
|
54
|
+
content = completion.choices[0].message.content
|
|
55
|
+
# Yield as a single chunk
|
|
56
|
+
delta = ChoiceDelta(content=content)
|
|
57
|
+
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
58
|
+
chunk = ChatCompletionChunk(
|
|
59
|
+
id=request_id,
|
|
60
|
+
choices=[choice],
|
|
61
|
+
created=created_time,
|
|
62
|
+
model="writecream",
|
|
63
|
+
)
|
|
64
|
+
yield chunk
|
|
65
|
+
# Final chunk with finish_reason
|
|
66
|
+
delta = ChoiceDelta(content=None)
|
|
67
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
68
|
+
chunk = ChatCompletionChunk(
|
|
69
|
+
id=request_id,
|
|
70
|
+
choices=[choice],
|
|
71
|
+
created=created_time,
|
|
72
|
+
model="writecream",
|
|
73
|
+
)
|
|
74
|
+
yield chunk
|
|
75
|
+
|
|
76
|
+
def _create_non_stream(
|
|
77
|
+
self, request_id: str, created_time: int, payload: List[Dict[str, str]], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
78
|
+
) -> ChatCompletion:
|
|
79
|
+
try:
|
|
80
|
+
params = {
|
|
81
|
+
"query": json.dumps(payload),
|
|
82
|
+
"link": "writecream.com"
|
|
83
|
+
}
|
|
84
|
+
response = self._client.session.get(
|
|
85
|
+
self._client.base_url,
|
|
86
|
+
params=params,
|
|
87
|
+
headers=self._client.headers,
|
|
88
|
+
timeout=timeout or self._client.timeout,
|
|
89
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
90
|
+
)
|
|
91
|
+
response.raise_for_status()
|
|
92
|
+
data = response.json()
|
|
93
|
+
# Extract the response content according to the new API format
|
|
94
|
+
content = data.get("response_content", "")
|
|
95
|
+
# Estimate tokens
|
|
96
|
+
prompt_tokens = sum(count_tokens(m.get("content", "")) for m in payload)
|
|
97
|
+
completion_tokens = count_tokens(content)
|
|
98
|
+
usage = CompletionUsage(
|
|
99
|
+
prompt_tokens=prompt_tokens,
|
|
100
|
+
completion_tokens=completion_tokens,
|
|
101
|
+
total_tokens=prompt_tokens + completion_tokens
|
|
102
|
+
)
|
|
103
|
+
message = ChatCompletionMessage(role="assistant", content=content)
|
|
104
|
+
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
105
|
+
completion = ChatCompletion(
|
|
106
|
+
id=request_id,
|
|
107
|
+
choices=[choice],
|
|
108
|
+
created=created_time,
|
|
109
|
+
model="writecream",
|
|
110
|
+
usage=usage
|
|
111
|
+
)
|
|
112
|
+
return completion
|
|
113
|
+
except Exception as e:
|
|
114
|
+
print(f"Error during Writecream request: {e}")
|
|
115
|
+
raise IOError(f"Writecream request failed: {e}") from e
|
|
116
|
+
|
|
117
|
+
class Chat(BaseChat):
|
|
118
|
+
def __init__(self, client: 'Writecream'):
|
|
119
|
+
self.completions = Completions(client)
|
|
120
|
+
|
|
121
|
+
class Writecream(OpenAICompatibleProvider):
|
|
122
|
+
"""
|
|
123
|
+
OpenAI-compatible client for Writecream API.
|
|
124
|
+
|
|
125
|
+
Usage:
|
|
126
|
+
client = Writecream()
|
|
127
|
+
response = client.chat.completions.create(
|
|
128
|
+
messages=[{"role": "system", "content": "You are a helpful assistant."},
|
|
129
|
+
{"role": "user", "content": "What is the capital of France?"}]
|
|
130
|
+
)
|
|
131
|
+
print(response.choices[0].message.content)
|
|
132
|
+
"""
|
|
133
|
+
AVAILABLE_MODELS = ["writecream"]
|
|
134
|
+
|
|
135
|
+
def __init__(self, browser: str = "chrome"):
|
|
136
|
+
self.timeout = None
|
|
137
|
+
self.base_url = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat"
|
|
138
|
+
self.session = requests.Session()
|
|
139
|
+
agent = LitAgent()
|
|
140
|
+
self.headers = {
|
|
141
|
+
"User-Agent": agent.random(),
|
|
142
|
+
"Referer": "https://www.writecream.com/chatgpt-chat/"
|
|
143
|
+
}
|
|
144
|
+
self.session.headers.update(self.headers)
|
|
145
|
+
self.chat = Chat(self)
|
|
146
|
+
|
|
147
|
+
def convert_model_name(self, model: str) -> str:
|
|
148
|
+
return "writecream"
|
|
149
|
+
|
|
150
|
+
@property
|
|
151
|
+
def models(self):
|
|
152
|
+
class _ModelList:
|
|
153
|
+
def list(inner_self):
|
|
154
|
+
return Writecream.AVAILABLE_MODELS
|
|
155
|
+
return _ModelList()
|
|
156
|
+
|
|
157
|
+
# Simple test if run directly
|
|
158
|
+
if __name__ == "__main__":
|
|
159
|
+
client = Writecream()
|
|
160
|
+
response = client.chat.completions.create(
|
|
161
|
+
messages=[
|
|
162
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
163
|
+
{"role": "user", "content": "What is the capital of France?"}
|
|
164
|
+
]
|
|
165
|
+
)
|
|
166
|
+
print(response.choices[0].message.content)
|
|
@@ -9,7 +9,7 @@ from typing import List, Dict, Optional, Union, Generator, Any
|
|
|
9
9
|
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
10
|
from .utils import (
|
|
11
11
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
-
ChatCompletionMessage, CompletionUsage
|
|
12
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
13
13
|
)
|
|
14
14
|
|
|
15
15
|
# Import LitAgent
|
|
@@ -30,6 +30,8 @@ class Completions(BaseCompletions):
|
|
|
30
30
|
stream: bool = False,
|
|
31
31
|
temperature: Optional[float] = None,
|
|
32
32
|
top_p: Optional[float] = None,
|
|
33
|
+
timeout: Optional[int] = None,
|
|
34
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
33
35
|
**kwargs: Any
|
|
34
36
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
35
37
|
"""
|
|
@@ -60,12 +62,12 @@ class Completions(BaseCompletions):
|
|
|
60
62
|
created_time = int(time.time())
|
|
61
63
|
|
|
62
64
|
if stream:
|
|
63
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
65
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
64
66
|
else:
|
|
65
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
67
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
66
68
|
|
|
67
69
|
def _create_stream(
|
|
68
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
70
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
69
71
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
70
72
|
try:
|
|
71
73
|
response = self._client.session.post(
|
|
@@ -73,7 +75,8 @@ class Completions(BaseCompletions):
|
|
|
73
75
|
headers=self._client.headers,
|
|
74
76
|
json=payload,
|
|
75
77
|
stream=True,
|
|
76
|
-
timeout=self._client.timeout
|
|
78
|
+
timeout=timeout or self._client.timeout,
|
|
79
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
77
80
|
)
|
|
78
81
|
|
|
79
82
|
# Handle non-200 responses
|
|
@@ -82,15 +85,11 @@ class Completions(BaseCompletions):
|
|
|
82
85
|
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
83
86
|
)
|
|
84
87
|
|
|
85
|
-
#
|
|
86
|
-
prompt_tokens =
|
|
88
|
+
# Use count_tokens for prompt tokens
|
|
89
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
87
90
|
completion_tokens = 0
|
|
88
91
|
total_tokens = 0
|
|
89
92
|
|
|
90
|
-
# Estimate prompt tokens based on message length
|
|
91
|
-
for msg in payload.get("messages", []):
|
|
92
|
-
prompt_tokens += len(msg.get("content", "").split())
|
|
93
|
-
|
|
94
93
|
for line in response.iter_lines():
|
|
95
94
|
if line:
|
|
96
95
|
decoded_line = line.decode('utf-8').strip()
|
|
@@ -103,8 +102,8 @@ class Completions(BaseCompletions):
|
|
|
103
102
|
# Format the content (replace escaped newlines)
|
|
104
103
|
content = self._client.format_text(content)
|
|
105
104
|
|
|
106
|
-
# Update token counts
|
|
107
|
-
completion_tokens +=
|
|
105
|
+
# Update token counts using count_tokens
|
|
106
|
+
completion_tokens += count_tokens(content)
|
|
108
107
|
total_tokens = prompt_tokens + completion_tokens
|
|
109
108
|
|
|
110
109
|
# Create the delta object
|
|
@@ -131,20 +130,15 @@ class Completions(BaseCompletions):
|
|
|
131
130
|
system_fingerprint=None
|
|
132
131
|
)
|
|
133
132
|
|
|
134
|
-
#
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
# Add usage information to match OpenAI format
|
|
138
|
-
usage_dict = {
|
|
133
|
+
# Set usage directly on the chunk object
|
|
134
|
+
chunk.usage = {
|
|
139
135
|
"prompt_tokens": prompt_tokens,
|
|
140
136
|
"completion_tokens": completion_tokens,
|
|
141
137
|
"total_tokens": total_tokens,
|
|
142
138
|
"estimated_cost": None
|
|
143
139
|
}
|
|
144
140
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
# Return the chunk object for internal processing
|
|
141
|
+
# Return the chunk object with usage information
|
|
148
142
|
yield chunk
|
|
149
143
|
|
|
150
144
|
# Final chunk with finish_reason="stop"
|
|
@@ -169,8 +163,8 @@ class Completions(BaseCompletions):
|
|
|
169
163
|
system_fingerprint=None
|
|
170
164
|
)
|
|
171
165
|
|
|
172
|
-
|
|
173
|
-
|
|
166
|
+
# Set usage directly on the chunk object
|
|
167
|
+
chunk.usage = {
|
|
174
168
|
"prompt_tokens": prompt_tokens,
|
|
175
169
|
"completion_tokens": completion_tokens,
|
|
176
170
|
"total_tokens": total_tokens,
|
|
@@ -184,16 +178,16 @@ class Completions(BaseCompletions):
|
|
|
184
178
|
raise IOError(f"X0GPT request failed: {e}") from e
|
|
185
179
|
|
|
186
180
|
def _create_non_stream(
|
|
187
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
181
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
188
182
|
) -> ChatCompletion:
|
|
189
183
|
try:
|
|
190
|
-
# For non-streaming, we still use streaming internally to collect the full response
|
|
191
184
|
response = self._client.session.post(
|
|
192
185
|
self._client.api_endpoint,
|
|
193
186
|
headers=self._client.headers,
|
|
194
187
|
json=payload,
|
|
195
188
|
stream=True,
|
|
196
|
-
timeout=self._client.timeout
|
|
189
|
+
timeout=timeout or self._client.timeout,
|
|
190
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
197
191
|
)
|
|
198
192
|
|
|
199
193
|
# Handle non-200 responses
|
|
@@ -214,12 +208,9 @@ class Completions(BaseCompletions):
|
|
|
214
208
|
# Format the text (replace escaped newlines)
|
|
215
209
|
full_text = self._client.format_text(full_text)
|
|
216
210
|
|
|
217
|
-
#
|
|
218
|
-
prompt_tokens =
|
|
219
|
-
|
|
220
|
-
prompt_tokens += len(msg.get("content", "").split())
|
|
221
|
-
|
|
222
|
-
completion_tokens = len(full_text.split())
|
|
211
|
+
# Use count_tokens for accurate token counts
|
|
212
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
213
|
+
completion_tokens = count_tokens(full_text)
|
|
223
214
|
total_tokens = prompt_tokens + completion_tokens
|
|
224
215
|
|
|
225
216
|
# Create the message object
|
|
@@ -268,26 +259,24 @@ class X0GPT(OpenAICompatibleProvider):
|
|
|
268
259
|
Usage:
|
|
269
260
|
client = X0GPT()
|
|
270
261
|
response = client.chat.completions.create(
|
|
271
|
-
model="
|
|
262
|
+
model="X0GPT",
|
|
272
263
|
messages=[{"role": "user", "content": "Hello!"}]
|
|
273
264
|
)
|
|
274
265
|
"""
|
|
275
266
|
|
|
276
|
-
AVAILABLE_MODELS = ["
|
|
267
|
+
AVAILABLE_MODELS = ["X0GPT"]
|
|
277
268
|
|
|
278
269
|
def __init__(
|
|
279
270
|
self,
|
|
280
|
-
timeout: Optional[int] = None,
|
|
281
271
|
browser: str = "chrome"
|
|
282
272
|
):
|
|
283
273
|
"""
|
|
284
274
|
Initialize the X0GPT client.
|
|
285
275
|
|
|
286
276
|
Args:
|
|
287
|
-
timeout: Request timeout in seconds (None for no timeout)
|
|
288
277
|
browser: Browser to emulate in user agent
|
|
289
278
|
"""
|
|
290
|
-
self.timeout =
|
|
279
|
+
self.timeout = None
|
|
291
280
|
self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
|
|
292
281
|
self.session = requests.Session()
|
|
293
282
|
|