webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +34 -16
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +155 -35
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +172 -82
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
- webscout/Provider/OPENAI/Cloudflare.py +38 -21
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +35 -18
- webscout/Provider/OPENAI/NEMOTRON.py +34 -34
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +304 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +374 -0
- webscout/Provider/OPENAI/__init__.py +7 -1
- webscout/Provider/OPENAI/ai4chat.py +73 -63
- webscout/Provider/OPENAI/api.py +869 -644
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +34 -13
- webscout/Provider/OPENAI/chatgpt.py +575 -556
- webscout/Provider/OPENAI/chatgptclone.py +512 -487
- webscout/Provider/OPENAI/chatsandbox.py +11 -6
- webscout/Provider/OPENAI/copilot.py +258 -0
- webscout/Provider/OPENAI/deepinfra.py +327 -318
- webscout/Provider/OPENAI/e2b.py +140 -104
- webscout/Provider/OPENAI/exaai.py +420 -411
- webscout/Provider/OPENAI/exachat.py +448 -443
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +12 -8
- webscout/Provider/OPENAI/glider.py +15 -8
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +311 -307
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +18 -9
- webscout/Provider/OPENAI/multichat.py +7 -5
- webscout/Provider/OPENAI/netwrck.py +16 -11
- webscout/Provider/OPENAI/oivscode.py +290 -0
- webscout/Provider/OPENAI/opkfc.py +507 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +29 -17
- webscout/Provider/OPENAI/sonus.py +308 -303
- webscout/Provider/OPENAI/standardinput.py +442 -433
- webscout/Provider/OPENAI/textpollinations.py +18 -11
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +17 -10
- webscout/Provider/OPENAI/typegpt.py +21 -11
- webscout/Provider/OPENAI/uncovrAI.py +477 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +435 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +26 -37
- webscout/Provider/OPENAI/yep.py +384 -356
- webscout/Provider/PI.py +2 -1
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/granite.py +41 -6
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
- webscout-8.3.dist-info/RECORD +290 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- webscout-8.2.8.dist-info/RECORD +0 -334
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
webscout/Provider/Writecream.py
CHANGED
|
@@ -1,246 +1,246 @@
|
|
|
1
|
-
from curl_cffi import CurlError
|
|
2
|
-
from curl_cffi.requests import Session # Keep Session import
|
|
3
|
-
import json
|
|
4
|
-
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
from webscout.litagent import LitAgent
|
|
12
|
-
|
|
13
|
-
class Writecream(Provider):
|
|
14
|
-
"""
|
|
15
|
-
A class to interact with the Writecream API.
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
AVAILABLE_MODELS = ["writecream-gpt"]
|
|
19
|
-
|
|
20
|
-
def __init__(
|
|
21
|
-
self,
|
|
22
|
-
is_conversation: bool = True,
|
|
23
|
-
max_tokens: int = 600,
|
|
24
|
-
timeout: int = 30,
|
|
25
|
-
intro: str = None,
|
|
26
|
-
filepath: str = None,
|
|
27
|
-
update_file: bool = True,
|
|
28
|
-
proxies: dict = {},
|
|
29
|
-
history_offset: int = 10250,
|
|
30
|
-
act: str = None,
|
|
31
|
-
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
32
|
-
base_url: str = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat",
|
|
33
|
-
referer: str = "https://www.writecream.com/chatgpt-chat/",
|
|
34
|
-
link: str = "writecream.com",
|
|
35
|
-
model: str = "writecream-gpt"
|
|
36
|
-
):
|
|
37
|
-
"""
|
|
38
|
-
Initializes the Writecream API with given parameters.
|
|
39
|
-
"""
|
|
40
|
-
if model not in self.AVAILABLE_MODELS:
|
|
41
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
42
|
-
|
|
43
|
-
# Initialize curl_cffi Session
|
|
44
|
-
self.session = Session()
|
|
45
|
-
self.is_conversation = is_conversation
|
|
46
|
-
self.max_tokens_to_sample = max_tokens
|
|
47
|
-
self.base_url = base_url
|
|
48
|
-
self.timeout = timeout
|
|
49
|
-
self.last_response = {}
|
|
50
|
-
self.system_prompt = system_prompt
|
|
51
|
-
self.model = model
|
|
52
|
-
# Initialize LitAgent
|
|
53
|
-
self.agent = LitAgent()
|
|
54
|
-
self.referer = referer
|
|
55
|
-
self.link = link
|
|
56
|
-
|
|
57
|
-
self.headers = {
|
|
58
|
-
# Use LitAgent for User-Agent
|
|
59
|
-
"User-Agent": self.agent.random(),
|
|
60
|
-
"Referer": self.referer
|
|
61
|
-
# Add other headers if needed by curl_cffi impersonation or API
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
self.__available_optimizers = (
|
|
65
|
-
method
|
|
66
|
-
for method in dir(Optimizers)
|
|
67
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
# Update curl_cffi session headers and proxies
|
|
71
|
-
self.session.headers.update(self.headers)
|
|
72
|
-
self.session.proxies.update(proxies)
|
|
73
|
-
|
|
74
|
-
Conversation.intro = (
|
|
75
|
-
AwesomePrompts().get_act(
|
|
76
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
77
|
-
)
|
|
78
|
-
if act
|
|
79
|
-
else intro or Conversation.intro
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
self.conversation = Conversation(
|
|
83
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
|
-
)
|
|
85
|
-
self.conversation.history_offset = history_offset
|
|
86
|
-
|
|
87
|
-
def ask(
|
|
88
|
-
self,
|
|
89
|
-
prompt: str,
|
|
90
|
-
stream: bool = False,
|
|
91
|
-
raw: bool = False,
|
|
92
|
-
optimizer: str = None,
|
|
93
|
-
conversationally: bool = False,
|
|
94
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
95
|
-
"""
|
|
96
|
-
Sends a message to the Writecream API and returns the response.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
prompt (str): Prompt to be sent.
|
|
100
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
101
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
|
-
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
103
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
104
|
-
|
|
105
|
-
Returns:
|
|
106
|
-
Union[Dict[str, Any], Generator]: Response from the API.
|
|
107
|
-
"""
|
|
108
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
109
|
-
if optimizer:
|
|
110
|
-
if optimizer in self.__available_optimizers:
|
|
111
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
112
|
-
conversation_prompt if conversationally else prompt
|
|
113
|
-
)
|
|
114
|
-
else:
|
|
115
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
116
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
final_query = [
|
|
120
|
-
{"role": "system", "content": self.system_prompt},
|
|
121
|
-
{"role": "user", "content": conversation_prompt}
|
|
122
|
-
]
|
|
123
|
-
|
|
124
|
-
params = {
|
|
125
|
-
"query": json.dumps(final_query),
|
|
126
|
-
"link": self.link
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
def for_non_stream():
|
|
130
|
-
try:
|
|
131
|
-
# Use curl_cffi session.get with impersonate
|
|
132
|
-
response = self.session.get(
|
|
133
|
-
self.base_url,
|
|
134
|
-
params=params,
|
|
135
|
-
timeout=self.timeout,
|
|
136
|
-
impersonate="chrome120" # Add impersonate
|
|
137
|
-
)
|
|
138
|
-
response.raise_for_status()
|
|
139
|
-
response_text = response.text # Get the raw text
|
|
140
|
-
|
|
141
|
-
# Use sanitize_stream to process the non-streaming text
|
|
142
|
-
# It will try to parse the whole text as JSON because to_json=True
|
|
143
|
-
processed_stream = sanitize_stream(
|
|
144
|
-
data=response_text,
|
|
145
|
-
to_json=True, # Attempt to parse the whole response text as JSON
|
|
146
|
-
intro_value=None, # No prefix expected on the full response
|
|
147
|
-
content_extractor=lambda chunk: chunk.get("response", chunk.get("response_content", "")) if isinstance(chunk, dict) else None
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
# Extract the single result from the generator
|
|
151
|
-
response_content = ""
|
|
152
|
-
for content in processed_stream:
|
|
153
|
-
response_content = content if isinstance(content, str) else ""
|
|
154
|
-
|
|
155
|
-
# Update conversation history
|
|
156
|
-
self.last_response = {"text": response_content}
|
|
157
|
-
self.conversation.update_chat_history(prompt, response_content)
|
|
158
|
-
|
|
159
|
-
return {"text": response_content}
|
|
160
|
-
except CurlError as e: # Catch CurlError
|
|
161
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
162
|
-
except Exception as e:
|
|
163
|
-
# Include original exception type
|
|
164
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}")
|
|
165
|
-
|
|
166
|
-
# Currently, Writecream API doesn't support streaming, so we always return non-streaming response
|
|
167
|
-
return for_non_stream()
|
|
168
|
-
|
|
169
|
-
def chat(
|
|
170
|
-
self,
|
|
171
|
-
prompt: str,
|
|
172
|
-
stream: bool = False,
|
|
173
|
-
optimizer: str = None,
|
|
174
|
-
conversationally: bool = False,
|
|
175
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
176
|
-
"""
|
|
177
|
-
Generates a response from the Writecream API.
|
|
178
|
-
|
|
179
|
-
Args:
|
|
180
|
-
prompt (str): Prompt to be sent.
|
|
181
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
182
|
-
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
183
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
184
|
-
|
|
185
|
-
Returns:
|
|
186
|
-
Union[str, Generator[str, None, None]]: Response from the API.
|
|
187
|
-
"""
|
|
188
|
-
def for_non_stream():
|
|
189
|
-
return self.get_message(
|
|
190
|
-
self.ask(
|
|
191
|
-
prompt,
|
|
192
|
-
stream=False,
|
|
193
|
-
optimizer=optimizer,
|
|
194
|
-
conversationally=conversationally,
|
|
195
|
-
)
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
if stream:
|
|
199
|
-
# For compatibility with AUTO streaming interface, yield a dict
|
|
200
|
-
response_dict = self.ask(
|
|
201
|
-
prompt,
|
|
202
|
-
stream=False,
|
|
203
|
-
optimizer=optimizer,
|
|
204
|
-
conversationally=conversationally,
|
|
205
|
-
)
|
|
206
|
-
yield response_dict
|
|
207
|
-
else:
|
|
208
|
-
return for_non_stream()
|
|
209
|
-
|
|
210
|
-
def get_message(self, response: dict) -> str:
|
|
211
|
-
"""
|
|
212
|
-
Retrieves message only from response.
|
|
213
|
-
|
|
214
|
-
Args:
|
|
215
|
-
response (dict): Response generated by `self.ask`
|
|
216
|
-
|
|
217
|
-
Returns:
|
|
218
|
-
str: Message extracted
|
|
219
|
-
"""
|
|
220
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
221
|
-
return response["text"]
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
if __name__ == "__main__":
|
|
225
|
-
# Ensure curl_cffi is installed
|
|
226
|
-
print("-" * 80)
|
|
227
|
-
print(f"{'Model':<30} {'Status':<10} {'Response'}")
|
|
228
|
-
print("-" * 80)
|
|
229
|
-
|
|
230
|
-
try:
|
|
231
|
-
test_api = Writecream(timeout=60)
|
|
232
|
-
prompt = "Say 'Hello' in one word"
|
|
233
|
-
response = test_api.chat(prompt)
|
|
234
|
-
|
|
235
|
-
if response and len(response.strip()) > 0:
|
|
236
|
-
status = "✓"
|
|
237
|
-
# Clean and truncate response
|
|
238
|
-
clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
239
|
-
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
240
|
-
else:
|
|
241
|
-
status = "✗"
|
|
242
|
-
display_text = "Empty or invalid response"
|
|
243
|
-
|
|
244
|
-
print(f"{test_api.model:<30} {status:<10} {display_text}")
|
|
245
|
-
except Exception as e:
|
|
246
|
-
print(f"{Writecream.AVAILABLE_MODELS[0]:<30} {'✗':<10} {str(e)}")
|
|
1
|
+
from curl_cffi import CurlError
|
|
2
|
+
from curl_cffi.requests import Session # Keep Session import
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
9
|
+
from webscout.AIbase import Provider
|
|
10
|
+
from webscout import exceptions
|
|
11
|
+
from webscout.litagent import LitAgent
|
|
12
|
+
|
|
13
|
+
class Writecream(Provider):
|
|
14
|
+
"""
|
|
15
|
+
A class to interact with the Writecream API.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
AVAILABLE_MODELS = ["writecream-gpt"]
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
is_conversation: bool = True,
|
|
23
|
+
max_tokens: int = 600,
|
|
24
|
+
timeout: int = 30,
|
|
25
|
+
intro: str = None,
|
|
26
|
+
filepath: str = None,
|
|
27
|
+
update_file: bool = True,
|
|
28
|
+
proxies: dict = {},
|
|
29
|
+
history_offset: int = 10250,
|
|
30
|
+
act: str = None,
|
|
31
|
+
system_prompt: str = "You are a helpful and informative AI assistant.",
|
|
32
|
+
base_url: str = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat",
|
|
33
|
+
referer: str = "https://www.writecream.com/chatgpt-chat/",
|
|
34
|
+
link: str = "writecream.com",
|
|
35
|
+
model: str = "writecream-gpt"
|
|
36
|
+
):
|
|
37
|
+
"""
|
|
38
|
+
Initializes the Writecream API with given parameters.
|
|
39
|
+
"""
|
|
40
|
+
if model not in self.AVAILABLE_MODELS:
|
|
41
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
42
|
+
|
|
43
|
+
# Initialize curl_cffi Session
|
|
44
|
+
self.session = Session()
|
|
45
|
+
self.is_conversation = is_conversation
|
|
46
|
+
self.max_tokens_to_sample = max_tokens
|
|
47
|
+
self.base_url = base_url
|
|
48
|
+
self.timeout = timeout
|
|
49
|
+
self.last_response = {}
|
|
50
|
+
self.system_prompt = system_prompt
|
|
51
|
+
self.model = model
|
|
52
|
+
# Initialize LitAgent
|
|
53
|
+
self.agent = LitAgent()
|
|
54
|
+
self.referer = referer
|
|
55
|
+
self.link = link
|
|
56
|
+
|
|
57
|
+
self.headers = {
|
|
58
|
+
# Use LitAgent for User-Agent
|
|
59
|
+
"User-Agent": self.agent.random(),
|
|
60
|
+
"Referer": self.referer
|
|
61
|
+
# Add other headers if needed by curl_cffi impersonation or API
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
self.__available_optimizers = (
|
|
65
|
+
method
|
|
66
|
+
for method in dir(Optimizers)
|
|
67
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Update curl_cffi session headers and proxies
|
|
71
|
+
self.session.headers.update(self.headers)
|
|
72
|
+
self.session.proxies.update(proxies)
|
|
73
|
+
|
|
74
|
+
Conversation.intro = (
|
|
75
|
+
AwesomePrompts().get_act(
|
|
76
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
77
|
+
)
|
|
78
|
+
if act
|
|
79
|
+
else intro or Conversation.intro
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
self.conversation = Conversation(
|
|
83
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
84
|
+
)
|
|
85
|
+
self.conversation.history_offset = history_offset
|
|
86
|
+
|
|
87
|
+
def ask(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
stream: bool = False,
|
|
91
|
+
raw: bool = False,
|
|
92
|
+
optimizer: str = None,
|
|
93
|
+
conversationally: bool = False,
|
|
94
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
95
|
+
"""
|
|
96
|
+
Sends a message to the Writecream API and returns the response.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
prompt (str): Prompt to be sent.
|
|
100
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
101
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
102
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
103
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Union[Dict[str, Any], Generator]: Response from the API.
|
|
107
|
+
"""
|
|
108
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
109
|
+
if optimizer:
|
|
110
|
+
if optimizer in self.__available_optimizers:
|
|
111
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
112
|
+
conversation_prompt if conversationally else prompt
|
|
113
|
+
)
|
|
114
|
+
else:
|
|
115
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
116
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
final_query = [
|
|
120
|
+
{"role": "system", "content": self.system_prompt},
|
|
121
|
+
{"role": "user", "content": conversation_prompt}
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
params = {
|
|
125
|
+
"query": json.dumps(final_query),
|
|
126
|
+
"link": self.link
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
def for_non_stream():
|
|
130
|
+
try:
|
|
131
|
+
# Use curl_cffi session.get with impersonate
|
|
132
|
+
response = self.session.get(
|
|
133
|
+
self.base_url,
|
|
134
|
+
params=params,
|
|
135
|
+
timeout=self.timeout,
|
|
136
|
+
impersonate="chrome120" # Add impersonate
|
|
137
|
+
)
|
|
138
|
+
response.raise_for_status()
|
|
139
|
+
response_text = response.text # Get the raw text
|
|
140
|
+
|
|
141
|
+
# Use sanitize_stream to process the non-streaming text
|
|
142
|
+
# It will try to parse the whole text as JSON because to_json=True
|
|
143
|
+
processed_stream = sanitize_stream(
|
|
144
|
+
data=response_text,
|
|
145
|
+
to_json=True, # Attempt to parse the whole response text as JSON
|
|
146
|
+
intro_value=None, # No prefix expected on the full response
|
|
147
|
+
content_extractor=lambda chunk: chunk.get("response", chunk.get("response_content", "")) if isinstance(chunk, dict) else None
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Extract the single result from the generator
|
|
151
|
+
response_content = ""
|
|
152
|
+
for content in processed_stream:
|
|
153
|
+
response_content = content if isinstance(content, str) else ""
|
|
154
|
+
|
|
155
|
+
# Update conversation history
|
|
156
|
+
self.last_response = {"text": response_content}
|
|
157
|
+
self.conversation.update_chat_history(prompt, response_content)
|
|
158
|
+
|
|
159
|
+
return {"text": response_content}
|
|
160
|
+
except CurlError as e: # Catch CurlError
|
|
161
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
162
|
+
except Exception as e:
|
|
163
|
+
# Include original exception type
|
|
164
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}")
|
|
165
|
+
|
|
166
|
+
# Currently, Writecream API doesn't support streaming, so we always return non-streaming response
|
|
167
|
+
return for_non_stream()
|
|
168
|
+
|
|
169
|
+
def chat(
|
|
170
|
+
self,
|
|
171
|
+
prompt: str,
|
|
172
|
+
stream: bool = False,
|
|
173
|
+
optimizer: str = None,
|
|
174
|
+
conversationally: bool = False,
|
|
175
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
176
|
+
"""
|
|
177
|
+
Generates a response from the Writecream API.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
prompt (str): Prompt to be sent.
|
|
181
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
182
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
183
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
Union[str, Generator[str, None, None]]: Response from the API.
|
|
187
|
+
"""
|
|
188
|
+
def for_non_stream():
|
|
189
|
+
return self.get_message(
|
|
190
|
+
self.ask(
|
|
191
|
+
prompt,
|
|
192
|
+
stream=False,
|
|
193
|
+
optimizer=optimizer,
|
|
194
|
+
conversationally=conversationally,
|
|
195
|
+
)
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
if stream:
|
|
199
|
+
# For compatibility with AUTO streaming interface, yield a dict
|
|
200
|
+
response_dict = self.ask(
|
|
201
|
+
prompt,
|
|
202
|
+
stream=False,
|
|
203
|
+
optimizer=optimizer,
|
|
204
|
+
conversationally=conversationally,
|
|
205
|
+
)
|
|
206
|
+
yield response_dict
|
|
207
|
+
else:
|
|
208
|
+
return for_non_stream()
|
|
209
|
+
|
|
210
|
+
def get_message(self, response: dict) -> str:
|
|
211
|
+
"""
|
|
212
|
+
Retrieves message only from response.
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
response (dict): Response generated by `self.ask`
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
str: Message extracted
|
|
219
|
+
"""
|
|
220
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
221
|
+
return response["text"]
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
if __name__ == "__main__":
|
|
225
|
+
# Ensure curl_cffi is installed
|
|
226
|
+
print("-" * 80)
|
|
227
|
+
print(f"{'Model':<30} {'Status':<10} {'Response'}")
|
|
228
|
+
print("-" * 80)
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
test_api = Writecream(timeout=60)
|
|
232
|
+
prompt = "Say 'Hello' in one word"
|
|
233
|
+
response = test_api.chat(prompt)
|
|
234
|
+
|
|
235
|
+
if response and len(response.strip()) > 0:
|
|
236
|
+
status = "✓"
|
|
237
|
+
# Clean and truncate response
|
|
238
|
+
clean_text = response.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
239
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
240
|
+
else:
|
|
241
|
+
status = "✗"
|
|
242
|
+
display_text = "Empty or invalid response"
|
|
243
|
+
|
|
244
|
+
print(f"{test_api.model:<30} {status:<10} {display_text}")
|
|
245
|
+
except Exception as e:
|
|
246
|
+
print(f"{Writecream.AVAILABLE_MODELS[0]:<30} {'✗':<10} {str(e)}")
|
webscout/Provider/__init__.py
CHANGED
|
@@ -42,7 +42,6 @@ from .chatglm import *
|
|
|
42
42
|
from .hermes import *
|
|
43
43
|
from .TextPollinationsAI import *
|
|
44
44
|
from .Glider import *
|
|
45
|
-
from .ChatGPTGratis import *
|
|
46
45
|
from .QwenLM import *
|
|
47
46
|
from .granite import *
|
|
48
47
|
from .WiseCat import *
|
|
@@ -83,8 +82,10 @@ from .FreeGemini import FreeGemini
|
|
|
83
82
|
from .Flowith import Flowith
|
|
84
83
|
from .samurai import samurai
|
|
85
84
|
from .lmarena import lmarena
|
|
85
|
+
from .oivscode import oivscode
|
|
86
86
|
__all__ = [
|
|
87
87
|
'SCNet',
|
|
88
|
+
'oivscode',
|
|
88
89
|
'lmarena',
|
|
89
90
|
'NEMOTRON',
|
|
90
91
|
'Flowith',
|
|
@@ -109,7 +110,6 @@ __all__ = [
|
|
|
109
110
|
'WiseCat',
|
|
110
111
|
'IBMGranite',
|
|
111
112
|
'QwenLM',
|
|
112
|
-
'ChatGPTGratis',
|
|
113
113
|
'LambdaChat',
|
|
114
114
|
'TextPollinationsAI',
|
|
115
115
|
'GliderAI',
|
webscout/Provider/ai4chat.py
CHANGED
|
@@ -76,9 +76,10 @@ class AI4Chat(Provider):
|
|
|
76
76
|
conversationally: bool = False,
|
|
77
77
|
country: str = None,
|
|
78
78
|
user_id: str = None,
|
|
79
|
-
)
|
|
79
|
+
):
|
|
80
80
|
"""
|
|
81
81
|
Sends a prompt to the AI4Chat API and returns the response.
|
|
82
|
+
If stream=True, yields small chunks of the response (simulated streaming).
|
|
82
83
|
"""
|
|
83
84
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
84
85
|
if optimizer:
|
|
@@ -107,9 +108,20 @@ class AI4Chat(Provider):
|
|
|
107
108
|
response_text = response_text[1:]
|
|
108
109
|
if response_text.endswith('"'):
|
|
109
110
|
response_text = response_text[:-1]
|
|
111
|
+
response_text = response_text.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
110
112
|
self.last_response.update(dict(text=response_text))
|
|
111
113
|
self.conversation.update_chat_history(prompt, response_text)
|
|
112
|
-
|
|
114
|
+
if stream:
|
|
115
|
+
# Simulate streaming by yielding fixed-size character chunks (e.g., 48 chars)
|
|
116
|
+
buffer = response_text
|
|
117
|
+
chunk_size = 48
|
|
118
|
+
while buffer:
|
|
119
|
+
chunk = buffer[:chunk_size]
|
|
120
|
+
buffer = buffer[chunk_size:]
|
|
121
|
+
if chunk.strip():
|
|
122
|
+
yield {"text": chunk}
|
|
123
|
+
else:
|
|
124
|
+
return self.last_response
|
|
113
125
|
|
|
114
126
|
def chat(
|
|
115
127
|
self,
|
|
@@ -119,19 +131,31 @@ class AI4Chat(Provider):
|
|
|
119
131
|
conversationally: bool = False,
|
|
120
132
|
country: str = None,
|
|
121
133
|
user_id: str = None,
|
|
122
|
-
)
|
|
134
|
+
):
|
|
123
135
|
"""
|
|
124
136
|
Generates a response from the AI4Chat API.
|
|
137
|
+
If stream=True, yields each chunk as a string.
|
|
125
138
|
"""
|
|
126
|
-
|
|
127
|
-
self.ask(
|
|
139
|
+
if stream:
|
|
140
|
+
for chunk in self.ask(
|
|
128
141
|
prompt,
|
|
142
|
+
stream=True,
|
|
129
143
|
optimizer=optimizer,
|
|
130
144
|
conversationally=conversationally,
|
|
131
145
|
country=country,
|
|
132
146
|
user_id=user_id,
|
|
147
|
+
):
|
|
148
|
+
yield self.get_message(chunk)
|
|
149
|
+
else:
|
|
150
|
+
return self.get_message(
|
|
151
|
+
self.ask(
|
|
152
|
+
prompt,
|
|
153
|
+
optimizer=optimizer,
|
|
154
|
+
conversationally=conversationally,
|
|
155
|
+
country=country,
|
|
156
|
+
user_id=user_id,
|
|
157
|
+
)
|
|
133
158
|
)
|
|
134
|
-
)
|
|
135
159
|
|
|
136
160
|
def get_message(self, response: Union[dict, str]) -> str:
|
|
137
161
|
"""
|
|
@@ -145,5 +169,6 @@ class AI4Chat(Provider):
|
|
|
145
169
|
if __name__ == "__main__":
|
|
146
170
|
from rich import print
|
|
147
171
|
ai = AI4Chat()
|
|
148
|
-
response = ai.chat("Tell me
|
|
149
|
-
|
|
172
|
+
response = ai.chat("Tell me about humans in points", stream=True)
|
|
173
|
+
for c in response:
|
|
174
|
+
print(c, end="")
|