webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
############################################################
|
|
2
|
+
# NOT WORKING
|
|
3
|
+
############################################################
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
import json
|
|
7
|
+
from typing import Union, Any, Dict, Optional, Generator
|
|
8
|
+
|
|
9
|
+
from webscout.AIutel import Optimizers
|
|
10
|
+
from webscout.AIutel import Conversation
|
|
11
|
+
from webscout.AIutel import AwesomePrompts
|
|
12
|
+
from webscout.AIbase import Provider
|
|
13
|
+
from webscout import exceptions
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class oivscode(Provider):
|
|
17
|
+
"""
|
|
18
|
+
A class to interact with a test API.
|
|
19
|
+
"""
|
|
20
|
+
AVAILABLE_MODELS = [
|
|
21
|
+
"deepseek/deepseek-chat",
|
|
22
|
+
"claude-3-5-haiku-20241022",
|
|
23
|
+
"gpt-4o-mini",
|
|
24
|
+
"claude-3-5-sonnet-20240620",
|
|
25
|
+
"ours/deepseek-chat",
|
|
26
|
+
"custom/deepseek",
|
|
27
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
28
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
29
|
+
"claude-3-5-sonnet-20241022",
|
|
30
|
+
"omni-moderation-latest",
|
|
31
|
+
"omni-moderation-latest-intents",
|
|
32
|
+
"omni-moderation-2024-09-26",
|
|
33
|
+
"gpt-4",
|
|
34
|
+
"gpt-4o",
|
|
35
|
+
"gpt-4o-audio-preview",
|
|
36
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
37
|
+
"gpt-4o-audio-preview-2024-10-01",
|
|
38
|
+
"gpt-4o-mini-audio-preview-2024-12-17",
|
|
39
|
+
"gpt-4o-mini",
|
|
40
|
+
"gpt-4o-mini-2024-07-18",
|
|
41
|
+
"o1",
|
|
42
|
+
"o1-mini",
|
|
43
|
+
"o1-mini-2024-09-12",
|
|
44
|
+
"o1-preview",
|
|
45
|
+
"o1-preview-2024-09-12",
|
|
46
|
+
"o1-2024-12-17",
|
|
47
|
+
"chatgpt-4o-latest",
|
|
48
|
+
"gpt-4o-2024-05-13",
|
|
49
|
+
"gpt-4o-2024-08-06",
|
|
50
|
+
"gpt-4o-2024-11-20",
|
|
51
|
+
"gpt-4o-realtime-preview-2024-10-01",
|
|
52
|
+
"gpt-4o-realtime-preview",
|
|
53
|
+
"gpt-4o-realtime-preview-2024-12-17",
|
|
54
|
+
"gpt-4o-mini-realtime-preview",
|
|
55
|
+
"gpt-4o-mini-realtime-preview-2024-12-17",
|
|
56
|
+
"gpt-4-turbo-preview",
|
|
57
|
+
"gpt-4-0314",
|
|
58
|
+
"gpt-4-0613",
|
|
59
|
+
"gpt-4-32k",
|
|
60
|
+
"gpt-4-32k-0314",
|
|
61
|
+
"gpt-4-32k-0613",
|
|
62
|
+
"gpt-4-turbo",
|
|
63
|
+
"gpt-4-turbo-2024-04-09",
|
|
64
|
+
"gpt-4-1106-preview",
|
|
65
|
+
"gpt-4-0125-preview",
|
|
66
|
+
"gpt-4-vision-preview",
|
|
67
|
+
"gpt-4-1106-vision-preview",
|
|
68
|
+
"gpt-3.5-turbo",
|
|
69
|
+
"gpt-3.5-turbo-0301",
|
|
70
|
+
"gpt-3.5-turbo-0613",
|
|
71
|
+
"gpt-3.5-turbo-1106",
|
|
72
|
+
"gpt-3.5-turbo-0125",
|
|
73
|
+
"gpt-3.5-turbo-16k",
|
|
74
|
+
"gpt-3.5-turbo-16k-0613",
|
|
75
|
+
"text-embedding-3-large",
|
|
76
|
+
"text-embedding-3-small",
|
|
77
|
+
"text-embedding-ada-002",
|
|
78
|
+
"text-embedding-ada-002-v2",
|
|
79
|
+
"text-moderation-stable",
|
|
80
|
+
"text-moderation-007",
|
|
81
|
+
"text-moderation-latest",
|
|
82
|
+
"256-x-256/dall-e-2",
|
|
83
|
+
"512-x-512/dall-e-2",
|
|
84
|
+
"1024-x-1024/dall-e-2",
|
|
85
|
+
"hd/1024-x-1792/dall-e-3",
|
|
86
|
+
"hd/1792-x-1024/dall-e-3",
|
|
87
|
+
"hd/1024-x-1024/dall-e-3",
|
|
88
|
+
"standard/1024-x-1792/dall-e-3",
|
|
89
|
+
"standard/1792-x-1024/dall-e-3",
|
|
90
|
+
"standard/1024-x-1024/dall-e-3",
|
|
91
|
+
"whisper-1",
|
|
92
|
+
"tts-1",
|
|
93
|
+
"tts-1-hd",
|
|
94
|
+
"ft:davinci-002",
|
|
95
|
+
"ft:babbage-002",
|
|
96
|
+
"babbage-002",
|
|
97
|
+
"davinci-002",
|
|
98
|
+
"gpt-3.5-turbo-instruct",
|
|
99
|
+
"gpt-3.5-turbo-instruct-0914",
|
|
100
|
+
"claude-instant-1",
|
|
101
|
+
"claude-instant-1.2",
|
|
102
|
+
"claude-2",
|
|
103
|
+
"claude-2.1",
|
|
104
|
+
"claude-3-haiku-20240307",
|
|
105
|
+
"claude-3-5-haiku-20241022",
|
|
106
|
+
"claude-3-opus-20240229",
|
|
107
|
+
"claude-3-sonnet-20240229",
|
|
108
|
+
"claude-3-5-sonnet-20240620",
|
|
109
|
+
"claude-3-5-sonnet-20241022",
|
|
110
|
+
"togethercomputer/llama-2-70b-chat",
|
|
111
|
+
"togethercomputer/llama-2-70b",
|
|
112
|
+
"togethercomputer/LLaMA-2-7B-32K",
|
|
113
|
+
"togethercomputer/Llama-2-7B-32K-Instruct",
|
|
114
|
+
"togethercomputer/llama-2-7b",
|
|
115
|
+
"togethercomputer/falcon-40b-instruct",
|
|
116
|
+
"togethercomputer/falcon-7b-instruct",
|
|
117
|
+
"togethercomputer/alpaca-7b",
|
|
118
|
+
"HuggingFaceH4/starchat-alpha",
|
|
119
|
+
"togethercomputer/CodeLlama-34b",
|
|
120
|
+
"togethercomputer/CodeLlama-34b-Instruct",
|
|
121
|
+
"togethercomputer/CodeLlama-34b-Python",
|
|
122
|
+
"defog/sqlcoder",
|
|
123
|
+
"NumbersStation/nsql-llama-2-7B",
|
|
124
|
+
"WizardLM/WizardCoder-15B-V1.0",
|
|
125
|
+
"WizardLM/WizardCoder-Python-34B-V1.0",
|
|
126
|
+
"NousResearch/Nous-Hermes-Llama2-13b",
|
|
127
|
+
"Austism/chronos-hermes-13b",
|
|
128
|
+
"upstage/SOLAR-0-70b-16bit",
|
|
129
|
+
"WizardLM/WizardLM-70B-V1.0",
|
|
130
|
+
"deepseek/deepseek-chat",
|
|
131
|
+
"deepseek/deepseek-coder",
|
|
132
|
+
"fireworks_ai/accounts/fireworks/models/llama-v3p2-1b-instruct",
|
|
133
|
+
"fireworks_ai/accounts/fireworks/models/llama-v3p2-3b-instruct",
|
|
134
|
+
"fireworks_ai/accounts/fireworks/models/llama-v3p2-11b-vision-instruct",
|
|
135
|
+
"accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
|
|
136
|
+
"fireworks_ai/accounts/fireworks/models/firefunction-v2",
|
|
137
|
+
"fireworks_ai/accounts/fireworks/models/mixtral-8x22b-instruct-hf",
|
|
138
|
+
"fireworks_ai/accounts/fireworks/models/qwen2-72b-instruct",
|
|
139
|
+
"fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct",
|
|
140
|
+
"fireworks_ai/accounts/fireworks/models/yi-large",
|
|
141
|
+
"fireworks_ai/accounts/fireworks/models/deepseek-coder-v2-instruct",
|
|
142
|
+
"fireworks_ai/accounts/fireworks/models/deepseek-v3",
|
|
143
|
+
"fireworks_ai/nomic-ai/nomic-embed-text-v1.5",
|
|
144
|
+
"fireworks_ai/nomic-ai/nomic-embed-text-v1",
|
|
145
|
+
"fireworks_ai/WhereIsAI/UAE-Large-V1",
|
|
146
|
+
"fireworks_ai/thenlper/gte-large",
|
|
147
|
+
"fireworks_ai/thenlper/gte-base",
|
|
148
|
+
]
|
|
149
|
+
|
|
150
|
+
def __init__(
|
|
151
|
+
self,
|
|
152
|
+
is_conversation: bool = True,
|
|
153
|
+
max_tokens: int = 1024,
|
|
154
|
+
timeout: int = 30,
|
|
155
|
+
intro: str = None,
|
|
156
|
+
filepath: str = None,
|
|
157
|
+
update_file: bool = True,
|
|
158
|
+
proxies: dict = {},
|
|
159
|
+
history_offset: int = 10250,
|
|
160
|
+
act: str = None,
|
|
161
|
+
model: str = "claude-3-5-sonnet-20240620",
|
|
162
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
163
|
+
|
|
164
|
+
):
|
|
165
|
+
"""
|
|
166
|
+
Initializes the oivscode with given parameters.
|
|
167
|
+
"""
|
|
168
|
+
if model not in self.AVAILABLE_MODELS:
|
|
169
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
self.session = requests.Session()
|
|
173
|
+
self.is_conversation = is_conversation
|
|
174
|
+
self.max_tokens_to_sample = max_tokens
|
|
175
|
+
self.api_endpoint = "https://oi-vscode-server.onrender.com/v1/chat/completions"
|
|
176
|
+
self.timeout = timeout
|
|
177
|
+
self.last_response = {}
|
|
178
|
+
self.model = model
|
|
179
|
+
self.system_prompt = system_prompt
|
|
180
|
+
self.headers = {
|
|
181
|
+
"accept": "*/*",
|
|
182
|
+
"accept-language": "en-US,en;q=0.9,en-GB;q=0.8,en-IN;q=0.7",
|
|
183
|
+
"cache-control": "no-cache",
|
|
184
|
+
"content-type": "application/json",
|
|
185
|
+
"pragma": "no-cache",
|
|
186
|
+
"priority": "u=1, i",
|
|
187
|
+
"sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
|
|
188
|
+
"sec-ch-ua-mobile": "?0",
|
|
189
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
190
|
+
"sec-fetch-dest": "empty",
|
|
191
|
+
"sec-fetch-mode": "cors",
|
|
192
|
+
"sec-fetch-site": "same-site"
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
self.__available_optimizers = (
|
|
197
|
+
method
|
|
198
|
+
for method in dir(Optimizers)
|
|
199
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
200
|
+
)
|
|
201
|
+
self.session.headers.update(self.headers)
|
|
202
|
+
Conversation.intro = (
|
|
203
|
+
AwesomePrompts().get_act(
|
|
204
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
205
|
+
)
|
|
206
|
+
if act
|
|
207
|
+
else intro or Conversation.intro
|
|
208
|
+
)
|
|
209
|
+
self.conversation = Conversation(
|
|
210
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
211
|
+
)
|
|
212
|
+
self.conversation.history_offset = history_offset
|
|
213
|
+
self.session.proxies = proxies
|
|
214
|
+
|
|
215
|
+
def ask(
|
|
216
|
+
self,
|
|
217
|
+
prompt: str,
|
|
218
|
+
stream: bool = False,
|
|
219
|
+
raw: bool = False,
|
|
220
|
+
optimizer: str = None,
|
|
221
|
+
conversationally: bool = False,
|
|
222
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
223
|
+
"""Chat with AI
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
prompt (str): Prompt to be send.
|
|
227
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
228
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
229
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
230
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
231
|
+
Returns:
|
|
232
|
+
dict or generator:
|
|
233
|
+
If stream is False, returns a dict
|
|
234
|
+
If stream is True, returns a generator
|
|
235
|
+
"""
|
|
236
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
237
|
+
if optimizer:
|
|
238
|
+
if optimizer in self.__available_optimizers:
|
|
239
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
240
|
+
conversation_prompt if conversationally else prompt
|
|
241
|
+
)
|
|
242
|
+
else:
|
|
243
|
+
raise Exception(
|
|
244
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
payload = {
|
|
248
|
+
"model": self.model,
|
|
249
|
+
"stream": stream,
|
|
250
|
+
"messages": [
|
|
251
|
+
{"role": "system", "content": self.system_prompt},
|
|
252
|
+
{"role": "user", "content": conversation_prompt}
|
|
253
|
+
]
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
def for_stream():
|
|
257
|
+
response = self.session.post(
|
|
258
|
+
self.api_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
259
|
+
)
|
|
260
|
+
if not response.ok:
|
|
261
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
262
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
263
|
+
)
|
|
264
|
+
from rich import print
|
|
265
|
+
print(response.text)
|
|
266
|
+
message_load = ""
|
|
267
|
+
for value in response.iter_lines(
|
|
268
|
+
decode_unicode=True,
|
|
269
|
+
delimiter="" if raw else "data:",
|
|
270
|
+
chunk_size=64,
|
|
271
|
+
):
|
|
272
|
+
try:
|
|
273
|
+
resp = json.loads(value)
|
|
274
|
+
incomplete_message = self.get_message(resp)
|
|
275
|
+
if incomplete_message:
|
|
276
|
+
message_load += incomplete_message
|
|
277
|
+
resp["choices"][0]["delta"]["content"] = message_load
|
|
278
|
+
self.last_response.update(resp)
|
|
279
|
+
yield value if raw else resp
|
|
280
|
+
elif raw:
|
|
281
|
+
yield value
|
|
282
|
+
except json.decoder.JSONDecodeError:
|
|
283
|
+
pass
|
|
284
|
+
self.conversation.update_chat_history(
|
|
285
|
+
prompt, self.get_message(self.last_response)
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
def for_non_stream():
|
|
289
|
+
response = self.session.post(
|
|
290
|
+
self.chat_endpoint, json=payload, stream=False, timeout=self.timeout
|
|
291
|
+
)
|
|
292
|
+
if (
|
|
293
|
+
not response.ok
|
|
294
|
+
or not response.headers.get("Content-Type", "") == "application/json"
|
|
295
|
+
):
|
|
296
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
297
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
298
|
+
)
|
|
299
|
+
resp = response.json()
|
|
300
|
+
self.last_response.update(resp)
|
|
301
|
+
self.conversation.update_chat_history(
|
|
302
|
+
prompt, self.get_message(self.last_response)
|
|
303
|
+
)
|
|
304
|
+
return resp
|
|
305
|
+
|
|
306
|
+
return for_stream() if stream else for_non_stream()
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def chat(
|
|
310
|
+
self,
|
|
311
|
+
prompt: str,
|
|
312
|
+
stream: bool = False,
|
|
313
|
+
optimizer: str = None,
|
|
314
|
+
conversationally: bool = False,
|
|
315
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
316
|
+
"""Generate response `str`
|
|
317
|
+
Args:
|
|
318
|
+
prompt (str): Prompt to be send.
|
|
319
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
320
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
321
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
322
|
+
Returns:
|
|
323
|
+
str: Response generated
|
|
324
|
+
"""
|
|
325
|
+
def for_stream():
|
|
326
|
+
for response in self.ask(
|
|
327
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
328
|
+
):
|
|
329
|
+
yield self.get_message(response)
|
|
330
|
+
def for_non_stream():
|
|
331
|
+
return self.get_message(
|
|
332
|
+
self.ask(
|
|
333
|
+
prompt,
|
|
334
|
+
False,
|
|
335
|
+
optimizer=optimizer,
|
|
336
|
+
conversationally=conversationally,
|
|
337
|
+
)
|
|
338
|
+
)
|
|
339
|
+
return for_stream() if stream else for_non_stream()
|
|
340
|
+
|
|
341
|
+
def get_message(self, response: dict) -> str:
|
|
342
|
+
"""Retrieves message only from response"""
|
|
343
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
344
|
+
return response["text"]
|
|
345
|
+
|
|
346
|
+
if __name__ == "__main__":
|
|
347
|
+
from rich import print
|
|
348
|
+
chatbot = oivscode()
|
|
349
|
+
response = chatbot.chat(input(">>> "), stream=True)
|
|
350
|
+
for chunk in response:
|
|
351
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import cloudscraper
|
|
2
|
+
|
|
3
|
+
def main():
|
|
4
|
+
print("Testing cloudscraper access to LMArena...")
|
|
5
|
+
try:
|
|
6
|
+
scraper = cloudscraper.create_scraper(browser={
|
|
7
|
+
'browser': 'chrome',
|
|
8
|
+
'platform': 'windows',
|
|
9
|
+
'desktop': True
|
|
10
|
+
})
|
|
11
|
+
|
|
12
|
+
# Test basic access
|
|
13
|
+
response = scraper.get("https://lmarena.ai")
|
|
14
|
+
print(f"Status code: {response.status_code}")
|
|
15
|
+
print(f"Response length: {len(response.text)}")
|
|
16
|
+
print("Cloudscraper test successful!")
|
|
17
|
+
|
|
18
|
+
# Generate a session hash
|
|
19
|
+
import uuid
|
|
20
|
+
session_hash = str(uuid.uuid4()).replace("-", "")
|
|
21
|
+
print(f"Session hash: {session_hash}")
|
|
22
|
+
|
|
23
|
+
# Create payloads
|
|
24
|
+
model_id = "gpt-4o"
|
|
25
|
+
prompt = "Hello, what is your name?"
|
|
26
|
+
|
|
27
|
+
first_payload = {
|
|
28
|
+
"data": [
|
|
29
|
+
None,
|
|
30
|
+
model_id,
|
|
31
|
+
{"text": prompt, "files": []},
|
|
32
|
+
{
|
|
33
|
+
"text_models": [model_id],
|
|
34
|
+
"all_text_models": [model_id],
|
|
35
|
+
"vision_models": [],
|
|
36
|
+
"all_vision_models": [],
|
|
37
|
+
"image_gen_models": [],
|
|
38
|
+
"all_image_gen_models": [],
|
|
39
|
+
"search_models": [],
|
|
40
|
+
"all_search_models": [],
|
|
41
|
+
"models": [model_id],
|
|
42
|
+
"all_models": [model_id],
|
|
43
|
+
"arena_type": "text-arena"
|
|
44
|
+
}
|
|
45
|
+
],
|
|
46
|
+
"event_data": None,
|
|
47
|
+
"fn_index": 117,
|
|
48
|
+
"trigger_id": 159,
|
|
49
|
+
"session_hash": session_hash
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
second_payload = {
|
|
53
|
+
"data": [],
|
|
54
|
+
"event_data": None,
|
|
55
|
+
"fn_index": 118,
|
|
56
|
+
"trigger_id": 159,
|
|
57
|
+
"session_hash": session_hash
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
third_payload = {
|
|
61
|
+
"data": [None, 0.7, 1, 2048],
|
|
62
|
+
"event_data": None,
|
|
63
|
+
"fn_index": 119,
|
|
64
|
+
"trigger_id": 159,
|
|
65
|
+
"session_hash": session_hash
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
# Set up headers
|
|
69
|
+
headers = {
|
|
70
|
+
"Content-Type": "application/json",
|
|
71
|
+
"Accept": "application/json",
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# Make requests
|
|
75
|
+
print("Sending first request...")
|
|
76
|
+
response = scraper.post(
|
|
77
|
+
"https://lmarena.ai/queue/join?",
|
|
78
|
+
json=first_payload,
|
|
79
|
+
headers=headers
|
|
80
|
+
)
|
|
81
|
+
print(f"First response status: {response.status_code}")
|
|
82
|
+
|
|
83
|
+
print("Sending second request...")
|
|
84
|
+
response = scraper.post(
|
|
85
|
+
"https://lmarena.ai/queue/join?",
|
|
86
|
+
json=second_payload,
|
|
87
|
+
headers=headers
|
|
88
|
+
)
|
|
89
|
+
print(f"Second response status: {response.status_code}")
|
|
90
|
+
|
|
91
|
+
print("Sending third request...")
|
|
92
|
+
response = scraper.post(
|
|
93
|
+
"https://lmarena.ai/queue/join?",
|
|
94
|
+
json=third_payload,
|
|
95
|
+
headers=headers
|
|
96
|
+
)
|
|
97
|
+
print(f"Third response status: {response.status_code}")
|
|
98
|
+
|
|
99
|
+
# Stream the response
|
|
100
|
+
stream_url = f"https://lmarena.ai/queue/data?session_hash={session_hash}"
|
|
101
|
+
print(f"Streaming from: {stream_url}")
|
|
102
|
+
|
|
103
|
+
with scraper.get(stream_url, headers={"Accept": "text/event-stream"}, stream=True) as response:
|
|
104
|
+
print(f"Stream response status: {response.status_code}")
|
|
105
|
+
text_position = 0
|
|
106
|
+
response_text = ""
|
|
107
|
+
|
|
108
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
109
|
+
if line:
|
|
110
|
+
print(line)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
except Exception as e:
|
|
114
|
+
print(f"Error: {e}")
|
|
115
|
+
import traceback
|
|
116
|
+
traceback.print_exc()
|
|
117
|
+
|
|
118
|
+
if __name__ == "__main__":
|
|
119
|
+
main()
|
webscout/Provider/Writecream.py
CHANGED
|
@@ -195,8 +195,17 @@ class Writecream(Provider):
|
|
|
195
195
|
)
|
|
196
196
|
)
|
|
197
197
|
|
|
198
|
-
|
|
199
|
-
|
|
198
|
+
if stream:
|
|
199
|
+
# For compatibility with AUTO streaming interface, yield a dict
|
|
200
|
+
response_dict = self.ask(
|
|
201
|
+
prompt,
|
|
202
|
+
stream=False,
|
|
203
|
+
optimizer=optimizer,
|
|
204
|
+
conversationally=conversationally,
|
|
205
|
+
)
|
|
206
|
+
yield response_dict
|
|
207
|
+
else:
|
|
208
|
+
return for_non_stream()
|
|
200
209
|
|
|
201
210
|
def get_message(self, response: dict) -> str:
|
|
202
211
|
"""
|
webscout/Provider/__init__.py
CHANGED
|
@@ -20,22 +20,16 @@ from .Llama3 import *
|
|
|
20
20
|
from .koala import *
|
|
21
21
|
from .meta import *
|
|
22
22
|
from .julius import *
|
|
23
|
-
from .Youchat import *
|
|
24
23
|
from .yep import *
|
|
25
24
|
from .Cloudflare import *
|
|
26
25
|
from .turboseek import *
|
|
27
|
-
from .Free2GPT import *
|
|
28
26
|
from .TeachAnything import *
|
|
29
27
|
from .AI21 import *
|
|
30
28
|
from .x0gpt import *
|
|
31
29
|
from .cerebras import *
|
|
32
30
|
from .geminiapi import *
|
|
33
31
|
from .elmo import *
|
|
34
|
-
from .GPTWeb import *
|
|
35
32
|
from .Netwrck import Netwrck
|
|
36
|
-
from .bagoodex import *
|
|
37
|
-
from .aimathgpt import *
|
|
38
|
-
from .geminiprorealtime import *
|
|
39
33
|
from .llmchat import *
|
|
40
34
|
from .llmchatco import LLMChatCo # Add new LLMChat.co provider
|
|
41
35
|
from .talkai import *
|
|
@@ -59,7 +53,6 @@ from .AllenAI import *
|
|
|
59
53
|
from .HeckAI import *
|
|
60
54
|
from .TwoAI import *
|
|
61
55
|
from .Venice import *
|
|
62
|
-
from .ElectronHub import *
|
|
63
56
|
from .HuggingFaceChat import *
|
|
64
57
|
from .GithubChat import *
|
|
65
58
|
from .copilot import *
|
|
@@ -86,9 +79,17 @@ from .ChatSandbox import ChatSandbox
|
|
|
86
79
|
from .GizAI import GizAI
|
|
87
80
|
from .WrDoChat import WrDoChat
|
|
88
81
|
from .Nemotron import NEMOTRON
|
|
82
|
+
from .FreeGemini import FreeGemini
|
|
83
|
+
from .Flowith import Flowith
|
|
84
|
+
from .samurai import samurai
|
|
85
|
+
from .lmarena import lmarena
|
|
89
86
|
__all__ = [
|
|
90
87
|
'SCNet',
|
|
88
|
+
'lmarena',
|
|
91
89
|
'NEMOTRON',
|
|
90
|
+
'Flowith',
|
|
91
|
+
'samurai',
|
|
92
|
+
'FreeGemini',
|
|
92
93
|
'WrDoChat',
|
|
93
94
|
'GizAI',
|
|
94
95
|
'ChatSandbox',
|
|
@@ -131,7 +132,6 @@ __all__ = [
|
|
|
131
132
|
'Meta',
|
|
132
133
|
'PiAI',
|
|
133
134
|
'Julius',
|
|
134
|
-
'YouChat',
|
|
135
135
|
'YEPCHAT',
|
|
136
136
|
'Cloudflare',
|
|
137
137
|
'TurboSeek',
|
|
@@ -145,12 +145,7 @@ __all__ = [
|
|
|
145
145
|
'Elmo',
|
|
146
146
|
'ChatGPTClone',
|
|
147
147
|
'TypefullyAI',
|
|
148
|
-
'Free2GPT',
|
|
149
|
-
'GPTWeb',
|
|
150
148
|
'Netwrck',
|
|
151
|
-
'Bagoodex',
|
|
152
|
-
'AIMathGPT',
|
|
153
|
-
'GeminiPro',
|
|
154
149
|
'LLMChat',
|
|
155
150
|
'LLMChatCo',
|
|
156
151
|
'Talkai',
|
|
@@ -163,7 +158,6 @@ __all__ = [
|
|
|
163
158
|
'ChatGLM',
|
|
164
159
|
'NousHermes',
|
|
165
160
|
'FreeAIChat',
|
|
166
|
-
'ElectronHub',
|
|
167
161
|
'GithubChat',
|
|
168
162
|
'UncovrAI',
|
|
169
163
|
'VercelAI',
|
webscout/Provider/ai4chat.py
CHANGED
|
@@ -27,23 +27,6 @@ class AI4Chat(Provider):
|
|
|
27
27
|
country: str = "Asia",
|
|
28
28
|
user_id: str = "usersmjb2oaz7y"
|
|
29
29
|
) -> None:
|
|
30
|
-
"""
|
|
31
|
-
Initializes the AI4Chat API with given parameters.
|
|
32
|
-
|
|
33
|
-
Args:
|
|
34
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
35
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
36
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
37
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
38
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
39
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
40
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
41
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
42
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
|
-
system_prompt (str, optional): System prompt to guide the AI's behavior. Defaults to "You are a helpful and informative AI assistant.".
|
|
44
|
-
country (str, optional): Country parameter for API. Defaults to "Asia".
|
|
45
|
-
user_id (str, optional): User ID for API. Defaults to "usersmjb2oaz7y".
|
|
46
|
-
"""
|
|
47
30
|
self.session = Session(timeout=timeout, proxies=proxies)
|
|
48
31
|
self.is_conversation = is_conversation
|
|
49
32
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -66,10 +49,8 @@ class AI4Chat(Provider):
|
|
|
66
49
|
"Sec-Fetch-Site": "cross-site",
|
|
67
50
|
"User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36"
|
|
68
51
|
}
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
method
|
|
72
|
-
for method in dir(Optimizers)
|
|
52
|
+
self.__available_optimizers = tuple(
|
|
53
|
+
method for method in dir(Optimizers)
|
|
73
54
|
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
74
55
|
)
|
|
75
56
|
self.session.headers.update(self.headers)
|
|
@@ -98,18 +79,6 @@ class AI4Chat(Provider):
|
|
|
98
79
|
) -> Dict[str, Any]:
|
|
99
80
|
"""
|
|
100
81
|
Sends a prompt to the AI4Chat API and returns the response.
|
|
101
|
-
|
|
102
|
-
Args:
|
|
103
|
-
prompt: The text prompt to generate text from.
|
|
104
|
-
stream (bool, optional): Not supported. Defaults to False.
|
|
105
|
-
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
106
|
-
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
107
|
-
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
108
|
-
country (str, optional): Country parameter for API. Defaults to None.
|
|
109
|
-
user_id (str, optional): User ID for API. Defaults to None.
|
|
110
|
-
|
|
111
|
-
Returns:
|
|
112
|
-
dict: A dictionary containing the AI's response.
|
|
113
82
|
"""
|
|
114
83
|
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
115
84
|
if optimizer:
|
|
@@ -121,30 +90,23 @@ class AI4Chat(Provider):
|
|
|
121
90
|
raise Exception(
|
|
122
91
|
f"Optimizer is not one of {self.__available_optimizers}"
|
|
123
92
|
)
|
|
124
|
-
|
|
125
93
|
country_param = country or self.country
|
|
126
94
|
user_id_param = user_id or self.user_id
|
|
127
|
-
|
|
128
95
|
encoded_text = urllib.parse.quote(conversation_prompt)
|
|
129
96
|
encoded_country = urllib.parse.quote(country_param)
|
|
130
97
|
encoded_user_id = urllib.parse.quote(user_id_param)
|
|
131
|
-
|
|
132
98
|
url = f"{self.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
|
|
133
|
-
|
|
134
99
|
try:
|
|
135
100
|
response = self.session.get(url, headers=self.headers, timeout=self.timeout)
|
|
136
101
|
except RequestsError as e:
|
|
137
102
|
raise Exception(f"Failed to generate response: {e}")
|
|
138
103
|
if not response.ok:
|
|
139
104
|
raise Exception(f"Failed to generate response: {response.status_code} - {response.reason}")
|
|
140
|
-
|
|
141
105
|
response_text = response.text
|
|
142
|
-
|
|
143
106
|
if response_text.startswith('"'):
|
|
144
107
|
response_text = response_text[1:]
|
|
145
108
|
if response_text.endswith('"'):
|
|
146
109
|
response_text = response_text[:-1]
|
|
147
|
-
|
|
148
110
|
self.last_response.update(dict(text=response_text))
|
|
149
111
|
self.conversation.update_chat_history(prompt, response_text)
|
|
150
112
|
return self.last_response
|
|
@@ -160,17 +122,6 @@ class AI4Chat(Provider):
|
|
|
160
122
|
) -> str:
|
|
161
123
|
"""
|
|
162
124
|
Generates a response from the AI4Chat API.
|
|
163
|
-
|
|
164
|
-
Args:
|
|
165
|
-
prompt (str): The prompt to send to the AI.
|
|
166
|
-
stream (bool, optional): Not supported.
|
|
167
|
-
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
168
|
-
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
169
|
-
country (str, optional): Country parameter for API. Defaults to None.
|
|
170
|
-
user_id (str, optional): User ID for API. Defaults to None.
|
|
171
|
-
|
|
172
|
-
Returns:
|
|
173
|
-
str: The response generated by the AI.
|
|
174
125
|
"""
|
|
175
126
|
return self.get_message(
|
|
176
127
|
self.ask(
|
|
@@ -183,13 +134,8 @@ class AI4Chat(Provider):
|
|
|
183
134
|
)
|
|
184
135
|
|
|
185
136
|
def get_message(self, response: Union[dict, str]) -> str:
|
|
186
|
-
"""
|
|
187
|
-
|
|
188
|
-
Args:
|
|
189
|
-
response (Union[dict, str]): Response generated by `self.ask`
|
|
190
|
-
|
|
191
|
-
Returns:
|
|
192
|
-
str: Message extracted
|
|
137
|
+
"""
|
|
138
|
+
Retrieves message only from response
|
|
193
139
|
"""
|
|
194
140
|
if isinstance(response, str):
|
|
195
141
|
return response.replace('\\n', '\n').replace('\\n\\n', '\n\n')
|