webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/ElectronHub.py
DELETED
|
@@ -1,773 +0,0 @@
|
|
|
1
|
-
from curl_cffi import CurlError
|
|
2
|
-
from curl_cffi.requests import Session
|
|
3
|
-
import json
|
|
4
|
-
import os
|
|
5
|
-
from typing import Any, Dict, Optional, Generator, Union, List
|
|
6
|
-
|
|
7
|
-
import requests
|
|
8
|
-
|
|
9
|
-
from webscout.AIutel import Optimizers
|
|
10
|
-
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
11
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
12
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
13
|
-
from webscout import exceptions
|
|
14
|
-
from webscout.litagent import LitAgent
|
|
15
|
-
|
|
16
|
-
class ElectronHub(Provider):
|
|
17
|
-
"""
|
|
18
|
-
A class to interact with the ElectronHub API with LitAgent user-agent.
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
|
-
# Default models list (will be updated dynamically)
|
|
22
|
-
AVAILABLE_MODELS = [
|
|
23
|
-
# OpenAI GPT models
|
|
24
|
-
"gpt-3.5-turbo",
|
|
25
|
-
"gpt-3.5-turbo-16k",
|
|
26
|
-
"gpt-3.5-turbo-1106",
|
|
27
|
-
"gpt-3.5-turbo-0125",
|
|
28
|
-
"gpt-4",
|
|
29
|
-
"gpt-4-turbo",
|
|
30
|
-
"gpt-4-turbo-preview",
|
|
31
|
-
"gpt-4-0125-preview",
|
|
32
|
-
"gpt-4-1106-preview",
|
|
33
|
-
"gpt-4o",
|
|
34
|
-
"gpt-4o-2024-05-13",
|
|
35
|
-
"gpt-4o-2024-08-06",
|
|
36
|
-
"gpt-4o-2024-11-20",
|
|
37
|
-
"gpt-4o-search-preview",
|
|
38
|
-
"gpt-4o-search-preview-2025-03-11",
|
|
39
|
-
"gpt-4o-mini",
|
|
40
|
-
"gpt-4o-mini-2024-07-18",
|
|
41
|
-
"gpt-4o-mini-search-preview",
|
|
42
|
-
"gpt-4o-mini-search-preview-2025-03-11",
|
|
43
|
-
"chatgpt-4o-latest",
|
|
44
|
-
"gpt-4.5-preview",
|
|
45
|
-
"gpt-4.5-preview-2025-02-27",
|
|
46
|
-
"o1-mini",
|
|
47
|
-
"o1-preview",
|
|
48
|
-
"o1",
|
|
49
|
-
"o1-low",
|
|
50
|
-
"o1-high",
|
|
51
|
-
"o3-mini",
|
|
52
|
-
"o3-mini-low",
|
|
53
|
-
"o3-mini-high",
|
|
54
|
-
"o3-mini-online",
|
|
55
|
-
|
|
56
|
-
# Anthropic Claude models
|
|
57
|
-
"claude-2",
|
|
58
|
-
"claude-2.1",
|
|
59
|
-
"claude-3-haiku-20240307",
|
|
60
|
-
"claude-3-5-haiku-20241022",
|
|
61
|
-
"claude-3-opus-20240229",
|
|
62
|
-
"claude-3-sonnet-20240229",
|
|
63
|
-
"claude-3-5-sonnet-20240620",
|
|
64
|
-
"claude-3-5-sonnet-20241022",
|
|
65
|
-
"claude-3-7-sonnet-20250219",
|
|
66
|
-
"claude-3-7-sonnet-20250219-thinking",
|
|
67
|
-
"claude-3-opus-20240229:safe",
|
|
68
|
-
"claude-3-sonnet-20240229:safe",
|
|
69
|
-
"claude-3-5-sonnet-20240620:safe",
|
|
70
|
-
"claude-3-5-sonnet-20241022:safe",
|
|
71
|
-
"claude-3-7-sonnet-20250219:safe",
|
|
72
|
-
"claude-3-7-sonnet-20250219-thinking:safe",
|
|
73
|
-
|
|
74
|
-
# Google Gemini models
|
|
75
|
-
"gemini-1.0-pro",
|
|
76
|
-
"gemini-1.0-pro-vision",
|
|
77
|
-
"gemini-1.5-pro",
|
|
78
|
-
"gemini-1.5-pro-latest",
|
|
79
|
-
"gemini-1.5-flash-8b",
|
|
80
|
-
"gemini-1.5-flash",
|
|
81
|
-
"gemini-1.5-flash-latest",
|
|
82
|
-
"gemini-1.5-flash-exp",
|
|
83
|
-
"gemini-1.5-flash-online",
|
|
84
|
-
"gemini-exp-1206",
|
|
85
|
-
"learnlm-1.5-pro-experimental",
|
|
86
|
-
"gemini-2.0-flash-001",
|
|
87
|
-
"gemini-2.0-flash-exp",
|
|
88
|
-
"gemini-2.0-flash-thinking-exp",
|
|
89
|
-
"gemini-2.0-flash-thinking-exp-1219",
|
|
90
|
-
"gemini-2.0-flash-thinking-exp-01-21",
|
|
91
|
-
"gemini-2.0-flash-lite-preview-02-05",
|
|
92
|
-
"gemini-2.0-flash-lite-001",
|
|
93
|
-
"gemini-2.0-pro-exp-02-05",
|
|
94
|
-
"gemini-2.5-pro-exp-03-25",
|
|
95
|
-
|
|
96
|
-
# Google PaLM models
|
|
97
|
-
"palm-2-chat-bison",
|
|
98
|
-
"palm-2-codechat-bison",
|
|
99
|
-
"palm-2-chat-bison-32k",
|
|
100
|
-
"palm-2-codechat-bison-32k",
|
|
101
|
-
|
|
102
|
-
# Meta Llama models
|
|
103
|
-
"llama-2-13b-chat-awq",
|
|
104
|
-
"llama-2-7b-chat-fp16",
|
|
105
|
-
"llama-2-7b-chat-int8",
|
|
106
|
-
"llama-2-70b-chat",
|
|
107
|
-
"llama-3-8b-instruct",
|
|
108
|
-
"llama-3-8b-instruct-awq",
|
|
109
|
-
"llama-3-70b",
|
|
110
|
-
"llama-3.1-8b-instruct",
|
|
111
|
-
"llama-3.1-8b-instruct-awq",
|
|
112
|
-
"llama-3.1-8b-instruct-fp8",
|
|
113
|
-
"llama-3.1-70b",
|
|
114
|
-
"llama-3.1-405b",
|
|
115
|
-
"llama-3.2-11b-vision-instruct",
|
|
116
|
-
"llama-3.2-1b-instruct",
|
|
117
|
-
"llama-3.2-3b-instruct",
|
|
118
|
-
"llama-3.2-90b",
|
|
119
|
-
"llama-3.3-70b-instruct-fp8-fast",
|
|
120
|
-
"llama-guard-3-8b",
|
|
121
|
-
"llamaguard-7b-awq",
|
|
122
|
-
"meta-llama-3-8b-instruct",
|
|
123
|
-
"llama-3.1-nemotron-70b-instruct",
|
|
124
|
-
"llama-3.1-tulu-3-70b",
|
|
125
|
-
"llama-3.1-tulu-3-405b",
|
|
126
|
-
"llama-3.1-sonar-small-128k-online",
|
|
127
|
-
"llama-3.1-sonar-large-128k-online",
|
|
128
|
-
"llama-3.1-sonar-huge-128k-online",
|
|
129
|
-
"llama-3.1-sonar-small-128k-chat",
|
|
130
|
-
"llama-3.1-sonar-large-128k-chat",
|
|
131
|
-
"llama-3.1-swallow-70b-instruct-v0.3",
|
|
132
|
-
"llama-3.1-8b-lexi-uncensored-v2",
|
|
133
|
-
"llama-3.1-lumimaid-8b",
|
|
134
|
-
"llama-3.1-lumimaid-70b",
|
|
135
|
-
"llama3-openbiollm-70b",
|
|
136
|
-
|
|
137
|
-
# Mistral models
|
|
138
|
-
"mistral-7b-instruct-v0.1",
|
|
139
|
-
"mistral-7b-instruct-v0.1-awq",
|
|
140
|
-
"mistral-7b-instruct-v0.2",
|
|
141
|
-
"mistral-tiny-latest",
|
|
142
|
-
"mistral-tiny",
|
|
143
|
-
"mistral-tiny-2312",
|
|
144
|
-
"mistral-tiny-2407",
|
|
145
|
-
"mistral-small-3.1-24b-instruct",
|
|
146
|
-
"mistral-small-24b-instruct-2501",
|
|
147
|
-
"mistral-small-latest",
|
|
148
|
-
"mistral-small",
|
|
149
|
-
"mistral-small-2312",
|
|
150
|
-
"mistral-small-2402",
|
|
151
|
-
"mistral-small-2409",
|
|
152
|
-
"mistral-medium-latest",
|
|
153
|
-
"mistral-medium",
|
|
154
|
-
"mistral-medium-2312",
|
|
155
|
-
"mistral-large-latest",
|
|
156
|
-
"mistral-large-2411",
|
|
157
|
-
"mistral-large-2407",
|
|
158
|
-
"mistral-large-2402",
|
|
159
|
-
"open-mistral-nemo",
|
|
160
|
-
"open-mistral-nemo-2407",
|
|
161
|
-
"open-mixtral-8x22b-2404",
|
|
162
|
-
"open-mixtral-8x7b",
|
|
163
|
-
|
|
164
|
-
# Codestral models
|
|
165
|
-
"codestral-mamba",
|
|
166
|
-
"codestral-latest",
|
|
167
|
-
"codestral-2405",
|
|
168
|
-
"codestral-2412",
|
|
169
|
-
"codestral-2501",
|
|
170
|
-
|
|
171
|
-
# Ministral models
|
|
172
|
-
"ministral-3b",
|
|
173
|
-
"ministral-3b-2410",
|
|
174
|
-
"ministral-8b",
|
|
175
|
-
"ministral-8b-2410",
|
|
176
|
-
|
|
177
|
-
# Mistral Saba models
|
|
178
|
-
"mistral-saba-latest",
|
|
179
|
-
"mistral-saba-2502",
|
|
180
|
-
|
|
181
|
-
# Mixtral models
|
|
182
|
-
"mixtral-8x7b",
|
|
183
|
-
"mixtral-8x22b",
|
|
184
|
-
|
|
185
|
-
# DeepSeek models
|
|
186
|
-
"deepseek-coder",
|
|
187
|
-
"deepseek-coder-6.7b-base-awq",
|
|
188
|
-
"deepseek-coder-6.7b-instruct-awq",
|
|
189
|
-
"deepseek-llm-67b-chat",
|
|
190
|
-
"deepseek-math-7b-instruct",
|
|
191
|
-
"deepseek-r1",
|
|
192
|
-
"deepseek-r1-distill-llama-70b",
|
|
193
|
-
"deepseek-r1-distill-llama-8b",
|
|
194
|
-
"deepseek-r1-distill-qwen-1.5b",
|
|
195
|
-
"deepseek-r1-distill-qwen-14b",
|
|
196
|
-
"deepseek-r1-distill-qwen-32b",
|
|
197
|
-
"deepseek-r1-distill-qwen-7b",
|
|
198
|
-
"deepseek-r1-nitro",
|
|
199
|
-
"deepseek-r1-zero",
|
|
200
|
-
"deepseek-v2.5",
|
|
201
|
-
"deepseek-v3",
|
|
202
|
-
"deepseek-v3-0324",
|
|
203
|
-
"deepseek-vl2",
|
|
204
|
-
|
|
205
|
-
# Qwen models
|
|
206
|
-
"qwen-1.5-0.5b-chat",
|
|
207
|
-
"qwen-1.5-1.8b-chat",
|
|
208
|
-
"qwen-1.5-14b-chat-awq",
|
|
209
|
-
"qwen-1.5-7b-chat-awq",
|
|
210
|
-
"qwen-2-7b-instruct",
|
|
211
|
-
"qwen-2-72b-instruct",
|
|
212
|
-
"qwen-2-vl-7b-instruct",
|
|
213
|
-
"qwen-2-vl-72b-instruct",
|
|
214
|
-
"qwen-2.5-7b-instruct",
|
|
215
|
-
"qwen-2.5-32b-instruct",
|
|
216
|
-
"qwen-2.5-72b-instruct",
|
|
217
|
-
"qwen-2.5-coder-32b-instruct",
|
|
218
|
-
"qwq-32b-preview",
|
|
219
|
-
"qwq-32b",
|
|
220
|
-
"qwen-vl-plus",
|
|
221
|
-
"qwen2.5-vl-3b-instruct",
|
|
222
|
-
"qwen2.5-vl-7b-instruct",
|
|
223
|
-
"qwen2.5-vl-72b-instruct",
|
|
224
|
-
"qwen-turbo",
|
|
225
|
-
"qwen-plus",
|
|
226
|
-
"qwen-max",
|
|
227
|
-
|
|
228
|
-
# F1 models
|
|
229
|
-
"f1-mini-preview",
|
|
230
|
-
"f1-preview",
|
|
231
|
-
|
|
232
|
-
# Command models
|
|
233
|
-
"command",
|
|
234
|
-
"command-light",
|
|
235
|
-
"command-nightly",
|
|
236
|
-
"command-light-nightly",
|
|
237
|
-
"command-r",
|
|
238
|
-
"command-r-03-2024",
|
|
239
|
-
"command-r-08-2024",
|
|
240
|
-
"command-r-plus",
|
|
241
|
-
"command-r-plus-04-2024",
|
|
242
|
-
"command-r-plus-08-2024",
|
|
243
|
-
"command-r7b-12-2024",
|
|
244
|
-
"command-a-03-2025",
|
|
245
|
-
|
|
246
|
-
# Dolphin models
|
|
247
|
-
"dolphin-mixtral-8x7b",
|
|
248
|
-
"dolphin-mixtral-8x22b",
|
|
249
|
-
"dolphin3.0-mistral-24b",
|
|
250
|
-
"dolphin3.0-r1-mistral-24b",
|
|
251
|
-
|
|
252
|
-
# Cohere models
|
|
253
|
-
"c4ai-aya-expanse-8b",
|
|
254
|
-
"c4ai-aya-expanse-32b",
|
|
255
|
-
|
|
256
|
-
# Reka models
|
|
257
|
-
"reka-flash",
|
|
258
|
-
"reka-core",
|
|
259
|
-
"reka-flash-3",
|
|
260
|
-
|
|
261
|
-
# OpenChat models
|
|
262
|
-
"openchat-3.5-0106",
|
|
263
|
-
"openchat-3.5-7b",
|
|
264
|
-
"openchat-3.6-8b",
|
|
265
|
-
|
|
266
|
-
# Yi models
|
|
267
|
-
"yi-34b-chat-200k",
|
|
268
|
-
"yi-large",
|
|
269
|
-
"yi-large-rag",
|
|
270
|
-
"yi-large-turbo",
|
|
271
|
-
"yi-medium",
|
|
272
|
-
"yi-vl-plus",
|
|
273
|
-
|
|
274
|
-
# Phi models
|
|
275
|
-
"phi-2",
|
|
276
|
-
"phi-3-mini-128k-instruct",
|
|
277
|
-
"phi-3-medium-128k-instruct",
|
|
278
|
-
"phi-3.5-mini-128k-instruct",
|
|
279
|
-
"phi-4",
|
|
280
|
-
"phi-4-multimodal-instruct",
|
|
281
|
-
|
|
282
|
-
# Claude models by AION-LABS
|
|
283
|
-
"aion-1.0",
|
|
284
|
-
"aion-1.0-mini",
|
|
285
|
-
"aion-rp-llama-3.1-8b",
|
|
286
|
-
|
|
287
|
-
# Other AI models
|
|
288
|
-
"nemotron-4-340b",
|
|
289
|
-
"pixtral-large-2411",
|
|
290
|
-
"pixtral-12b",
|
|
291
|
-
"dbrx-instruct",
|
|
292
|
-
"grok-2",
|
|
293
|
-
"grok-2-mini",
|
|
294
|
-
"grok-beta",
|
|
295
|
-
"grok-vision-beta",
|
|
296
|
-
"grok-2-1212",
|
|
297
|
-
"grok-2-vision-1212",
|
|
298
|
-
"grok-3-early",
|
|
299
|
-
"grok-3-preview-02-24",
|
|
300
|
-
"r1-1776",
|
|
301
|
-
"sonar-deep-research",
|
|
302
|
-
"sonar-reasoning-pro",
|
|
303
|
-
"sonar-reasoning",
|
|
304
|
-
"sonar-pro",
|
|
305
|
-
"sonar",
|
|
306
|
-
"wizardlm-2-7b",
|
|
307
|
-
"wizardlm-2-8x22b",
|
|
308
|
-
"minimax-01",
|
|
309
|
-
"jamba-1.5-large",
|
|
310
|
-
"jamba-1.5-mini",
|
|
311
|
-
"jamba-1.6-large",
|
|
312
|
-
"jamba-1.6-mini",
|
|
313
|
-
"jamba-instruct",
|
|
314
|
-
|
|
315
|
-
# Chinese language models
|
|
316
|
-
"doubao-lite-4k",
|
|
317
|
-
"doubao-lite-32k",
|
|
318
|
-
"doubao-pro-4k",
|
|
319
|
-
"doubao-pro-32k",
|
|
320
|
-
"ui-tars-72b-dpo",
|
|
321
|
-
"ernie-lite-8k",
|
|
322
|
-
"ernie-tiny-8k",
|
|
323
|
-
"ernie-speed-8k",
|
|
324
|
-
"ernie-speed-128k",
|
|
325
|
-
"hunyuan-lite",
|
|
326
|
-
"hunyuan-standard-2025-02-10",
|
|
327
|
-
"hunyuan-large-2025-02-10",
|
|
328
|
-
"glm-3-130b",
|
|
329
|
-
"glm-4-flash",
|
|
330
|
-
"glm-4-long",
|
|
331
|
-
"glm-4-airx",
|
|
332
|
-
"glm-4-air",
|
|
333
|
-
"glm-4-plus",
|
|
334
|
-
"glm-4-alltools",
|
|
335
|
-
"spark-desk-v1.5",
|
|
336
|
-
|
|
337
|
-
# Other language models
|
|
338
|
-
"discolm-german-7b-v1-awq",
|
|
339
|
-
"falcon-7b-instruct",
|
|
340
|
-
"neural-chat-7b-v3-1-awq",
|
|
341
|
-
"openhermes-2.5-mistral-7b",
|
|
342
|
-
"openhermes-2.5-mistral-7b-awq",
|
|
343
|
-
"sqlcoder-7b-2",
|
|
344
|
-
"starling-lm-7b-beta",
|
|
345
|
-
"tinyllama-1.1b-chat-v1.0",
|
|
346
|
-
"una-cybertron-7b-v2-bf16",
|
|
347
|
-
"zephyr-7b-beta",
|
|
348
|
-
"zephyr-7b-beta-awq",
|
|
349
|
-
|
|
350
|
-
# Inference-optimized models
|
|
351
|
-
"mistral-nemo-inferor-12b",
|
|
352
|
-
"rocinante-12b-v1",
|
|
353
|
-
"rocinante-12b-v1.1",
|
|
354
|
-
"unslopnemo-12b",
|
|
355
|
-
|
|
356
|
-
# Additional specialty models
|
|
357
|
-
"granite-3.1-2b-instruct",
|
|
358
|
-
"granite-3.1-8b-instruct",
|
|
359
|
-
"solar-0-70b-16bit",
|
|
360
|
-
"olympiccoder-7b",
|
|
361
|
-
"olympiccoder-32b",
|
|
362
|
-
"anubis-pro-105b-v1",
|
|
363
|
-
"fallen-llama-3.3-r1-70b-v1",
|
|
364
|
-
"skyfall-36b-v2",
|
|
365
|
-
"wayfarer-large-70b-llama-3.3",
|
|
366
|
-
"qwq-32b-snowdrop-v0",
|
|
367
|
-
"qwq-32b-abliterated",
|
|
368
|
-
"sky-t1-32b-preview",
|
|
369
|
-
"tiny-r1-32b-preview",
|
|
370
|
-
"lfm-3b",
|
|
371
|
-
"lfm-7b",
|
|
372
|
-
"lfm-40b",
|
|
373
|
-
"eva-llama-3.33-70b-v0.0",
|
|
374
|
-
"eva-llama-3.33-70b-v0.1",
|
|
375
|
-
"eva-qwen2.5-72b",
|
|
376
|
-
"eva-qwen2.5-32b-v0.2",
|
|
377
|
-
"sorcererlm-8x22b",
|
|
378
|
-
"mythalion-13b",
|
|
379
|
-
"toppy-m-7b",
|
|
380
|
-
"l3-lunaris-8b",
|
|
381
|
-
"l3.1-70b-hanami-x1",
|
|
382
|
-
"lumimaid-magnum-v4-12b",
|
|
383
|
-
"magnum-v4-72b",
|
|
384
|
-
"magnum-v4-12b",
|
|
385
|
-
"magnum-v3-34b",
|
|
386
|
-
"magnum-v2.5-12b-kto",
|
|
387
|
-
"magnum-v2-72b",
|
|
388
|
-
"magnum-v2-32b",
|
|
389
|
-
"magnum-v2-12b",
|
|
390
|
-
"magnum-72b",
|
|
391
|
-
"mini-magnum-12b-v1.1",
|
|
392
|
-
"remm-slerp-l2-13b",
|
|
393
|
-
"patricide-12b-unslop-mell",
|
|
394
|
-
"midnight-rose-70b",
|
|
395
|
-
"airoboros-l2-13b-gpt4-m2.0",
|
|
396
|
-
"airoboros-l2-70b",
|
|
397
|
-
"xwin-lm-70b",
|
|
398
|
-
"noromaid-20b",
|
|
399
|
-
"violet-twilight-v0.2",
|
|
400
|
-
"saiga-nemo-12b",
|
|
401
|
-
"l3-8b-stheno-v3.2",
|
|
402
|
-
"l3.3-electra-r1-70b",
|
|
403
|
-
"l3.3-cu-mai-r1-70b",
|
|
404
|
-
"l3.3-mokume-gane-r1-70b-v1.1",
|
|
405
|
-
"l3.3-70b-euryale-v2.3",
|
|
406
|
-
"l3.3-ms-evayale-70b",
|
|
407
|
-
"70b-l3.3-cirrus-x1",
|
|
408
|
-
"l31-70b-euryale-v2.2",
|
|
409
|
-
"l3-70b-euryale-v2.1",
|
|
410
|
-
"fimbulvetr-11b-v2",
|
|
411
|
-
"goliath-120b",
|
|
412
|
-
"hermes-2-pro-mistral-7b",
|
|
413
|
-
"mytho-max-l2-13b",
|
|
414
|
-
"deephermes-3-llama-3-8b-preview",
|
|
415
|
-
"nous-hermes-llama2-13b",
|
|
416
|
-
"hermes-3-llama-3.1-405b",
|
|
417
|
-
"nous-hermes-2-mixtral-8x7b-dpo",
|
|
418
|
-
"nova-lite-v1",
|
|
419
|
-
"nova-micro-v1",
|
|
420
|
-
"nova-pro-v1",
|
|
421
|
-
"inflection-3-pi",
|
|
422
|
-
"inflection-3-productivity",
|
|
423
|
-
|
|
424
|
-
# Image generation models
|
|
425
|
-
"weaver",
|
|
426
|
-
"sdxl",
|
|
427
|
-
"sdxl-turbo",
|
|
428
|
-
"sdxl-lightning",
|
|
429
|
-
"stable-diffusion-3",
|
|
430
|
-
"stable-diffusion-3-2b",
|
|
431
|
-
"stable-diffusion-3.5-large",
|
|
432
|
-
"stable-diffusion-3.5-turbo",
|
|
433
|
-
"playground-v3",
|
|
434
|
-
"playground-v2.5",
|
|
435
|
-
"animaginexl-3.1",
|
|
436
|
-
"realvisxl-4.0",
|
|
437
|
-
"imagen",
|
|
438
|
-
"imagen-3-fast",
|
|
439
|
-
"imagen-3",
|
|
440
|
-
"luma-photon",
|
|
441
|
-
"luma-photon-flash",
|
|
442
|
-
"recraft-20b",
|
|
443
|
-
"recraft-v3",
|
|
444
|
-
"grok-2-aurora",
|
|
445
|
-
"flux-schnell",
|
|
446
|
-
"flux-dev",
|
|
447
|
-
"flux-pro",
|
|
448
|
-
"flux-1.1-pro",
|
|
449
|
-
"flux-1.1-pro-ultra",
|
|
450
|
-
"flux-1.1-pro-ultra-raw",
|
|
451
|
-
"flux-realism",
|
|
452
|
-
"flux-half-illustration",
|
|
453
|
-
"ideogram-v2-turbo",
|
|
454
|
-
"ideogram-v2",
|
|
455
|
-
"amazon-titan",
|
|
456
|
-
"amazon-titan-v2",
|
|
457
|
-
"nova-canvas",
|
|
458
|
-
"omni-gen",
|
|
459
|
-
"aura-flow",
|
|
460
|
-
"cogview-3-flash",
|
|
461
|
-
"sana",
|
|
462
|
-
"kandinsky-3",
|
|
463
|
-
"dall-e-3",
|
|
464
|
-
"midjourney-v6.1",
|
|
465
|
-
"midjourney-v6",
|
|
466
|
-
"midjourney-v5.2",
|
|
467
|
-
"midjourney-v5.1",
|
|
468
|
-
"midjourney-v5",
|
|
469
|
-
"midjourney-v7",
|
|
470
|
-
"niji-v6",
|
|
471
|
-
"niji-v5",
|
|
472
|
-
|
|
473
|
-
# Video generation models
|
|
474
|
-
"t2v-turbo",
|
|
475
|
-
"cogvideox-5b",
|
|
476
|
-
"ltx-video",
|
|
477
|
-
"mochi-1",
|
|
478
|
-
"dream-machine",
|
|
479
|
-
"hailuo-ai",
|
|
480
|
-
"haiper-video-2.5",
|
|
481
|
-
"haiper-video-2",
|
|
482
|
-
"hunyuan-video",
|
|
483
|
-
"kling-video/v1/standard/text-to-video",
|
|
484
|
-
"kling-video/v1/pro/text-to-video",
|
|
485
|
-
"kling-video/v1.6/standard/text-to-video",
|
|
486
|
-
"kling-video/v1.5/pro/text-to-video",
|
|
487
|
-
"kokoro-82m",
|
|
488
|
-
|
|
489
|
-
# Audio models
|
|
490
|
-
"elevenlabs",
|
|
491
|
-
"myshell-tts",
|
|
492
|
-
"deepinfra-tts",
|
|
493
|
-
"whisper-large-v3",
|
|
494
|
-
"distil-large-v3",
|
|
495
|
-
|
|
496
|
-
# Embedding and moderation models
|
|
497
|
-
"text-embedding-3-large",
|
|
498
|
-
"text-embedding-3-small",
|
|
499
|
-
"omni-moderation-latest",
|
|
500
|
-
"omni-moderation-2024-09-26",
|
|
501
|
-
"text-moderation-latest",
|
|
502
|
-
"text-moderation-stable",
|
|
503
|
-
"text-moderation-007"
|
|
504
|
-
]
|
|
505
|
-
|
|
506
|
-
@classmethod
|
|
507
|
-
def get_models(cls, api_key: str = None):
|
|
508
|
-
"""Fetch available models from ElectronHub API.
|
|
509
|
-
|
|
510
|
-
Args:
|
|
511
|
-
api_key (str, optional): ElectronHub API key. If not provided, returns default models.
|
|
512
|
-
|
|
513
|
-
Returns:
|
|
514
|
-
list: List of available model IDs
|
|
515
|
-
"""
|
|
516
|
-
if not api_key:
|
|
517
|
-
return cls.AVAILABLE_MODELS
|
|
518
|
-
|
|
519
|
-
try:
|
|
520
|
-
headers = {
|
|
521
|
-
'Content-Type': 'application/json',
|
|
522
|
-
'Accept': '*/*',
|
|
523
|
-
'User-Agent': LitAgent().random(),
|
|
524
|
-
'Authorization': f'Bearer {api_key}'
|
|
525
|
-
}
|
|
526
|
-
|
|
527
|
-
response = requests.get(
|
|
528
|
-
"https://api.electronhub.top/v1/models",
|
|
529
|
-
headers=headers,
|
|
530
|
-
timeout=10
|
|
531
|
-
)
|
|
532
|
-
|
|
533
|
-
if response.status_code != 200:
|
|
534
|
-
return cls.AVAILABLE_MODELS
|
|
535
|
-
|
|
536
|
-
data = response.json()
|
|
537
|
-
if "data" in data and isinstance(data["data"], list):
|
|
538
|
-
return [model["id"] for model in data["data"]]
|
|
539
|
-
return cls.AVAILABLE_MODELS
|
|
540
|
-
|
|
541
|
-
except Exception:
|
|
542
|
-
# Fallback to default models list if fetching fails
|
|
543
|
-
return cls.AVAILABLE_MODELS
|
|
544
|
-
|
|
545
|
-
@classmethod
|
|
546
|
-
def update_available_models(cls, api_key=None):
|
|
547
|
-
"""Update the available models list from ElectronHub API"""
|
|
548
|
-
try:
|
|
549
|
-
models = cls.get_models(api_key)
|
|
550
|
-
if models and len(models) > 0:
|
|
551
|
-
cls.AVAILABLE_MODELS = models
|
|
552
|
-
except Exception:
|
|
553
|
-
# Fallback to default models list if fetching fails
|
|
554
|
-
pass
|
|
555
|
-
|
|
556
|
-
@staticmethod
|
|
557
|
-
def _electronhub_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
558
|
-
"""Extracts content from ElectronHub stream JSON objects."""
|
|
559
|
-
if isinstance(chunk, dict):
|
|
560
|
-
return chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
|
561
|
-
return None
|
|
562
|
-
|
|
563
|
-
def __init__(
|
|
564
|
-
self,
|
|
565
|
-
is_conversation: bool = True,
|
|
566
|
-
max_tokens: int = 16000,
|
|
567
|
-
timeout: int = 30,
|
|
568
|
-
intro: str = None,
|
|
569
|
-
filepath: str = None,
|
|
570
|
-
update_file: bool = True,
|
|
571
|
-
proxies: dict = {},
|
|
572
|
-
history_offset: int = 10250,
|
|
573
|
-
act: str = None,
|
|
574
|
-
model: str = "claude-3-7-sonnet-20250219",
|
|
575
|
-
system_prompt: str = "You're helpful assistant that can help me with my questions.",
|
|
576
|
-
api_key: str = None
|
|
577
|
-
):
|
|
578
|
-
"""Initializes the ElectronHub API client."""
|
|
579
|
-
# Update available models from API
|
|
580
|
-
self.update_available_models(api_key)
|
|
581
|
-
|
|
582
|
-
# Validate model after updating available models
|
|
583
|
-
if model not in self.AVAILABLE_MODELS:
|
|
584
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
585
|
-
|
|
586
|
-
self.url = "https://api.electronhub.top/v1/chat/completions"
|
|
587
|
-
# Use LitAgent for user-agent
|
|
588
|
-
self.headers = {
|
|
589
|
-
'User-Agent': LitAgent().random(),
|
|
590
|
-
'Content-Type': 'application/json',
|
|
591
|
-
'Accept': '*/*',
|
|
592
|
-
'Accept-Language': 'en-US,en;q=0.9',
|
|
593
|
-
'DNT': '1',
|
|
594
|
-
'Origin': 'https://playground.electronhub.top',
|
|
595
|
-
'Referer': 'https://playground.electronhub.top/',
|
|
596
|
-
'Sec-Fetch-Dest': 'empty',
|
|
597
|
-
'Sec-Fetch-Mode': 'cors',
|
|
598
|
-
'Sec-Fetch-Site': 'same-site',
|
|
599
|
-
'Priority': 'u=1, i'
|
|
600
|
-
}
|
|
601
|
-
|
|
602
|
-
# Add API key if provided
|
|
603
|
-
if api_key:
|
|
604
|
-
self.headers['Authorization'] = f'Bearer {api_key}'
|
|
605
|
-
self.system_prompt = system_prompt
|
|
606
|
-
self.session = Session() # Use curl_cffi Session
|
|
607
|
-
self.session.headers.update(self.headers)
|
|
608
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
609
|
-
|
|
610
|
-
self.is_conversation = is_conversation
|
|
611
|
-
self.max_tokens = max_tokens
|
|
612
|
-
self.timeout = timeout
|
|
613
|
-
self.last_response = {}
|
|
614
|
-
self.model = model
|
|
615
|
-
|
|
616
|
-
self.__available_optimizers = (
|
|
617
|
-
method
|
|
618
|
-
for method in dir(Optimizers)
|
|
619
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
620
|
-
)
|
|
621
|
-
Conversation.intro = (
|
|
622
|
-
AwesomePrompts().get_act(
|
|
623
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
624
|
-
)
|
|
625
|
-
if act
|
|
626
|
-
else intro or Conversation.intro
|
|
627
|
-
)
|
|
628
|
-
|
|
629
|
-
self.conversation = Conversation(
|
|
630
|
-
is_conversation, self.max_tokens, filepath, update_file
|
|
631
|
-
)
|
|
632
|
-
self.conversation.history_offset = history_offset
|
|
633
|
-
|
|
634
|
-
def ask(
|
|
635
|
-
self,
|
|
636
|
-
prompt: str,
|
|
637
|
-
stream: bool = True,
|
|
638
|
-
raw: bool = False,
|
|
639
|
-
optimizer: str = None,
|
|
640
|
-
conversationally: bool = False,
|
|
641
|
-
temperature: float = 0.5,
|
|
642
|
-
top_p: float = 1.0,
|
|
643
|
-
top_k: int = 5,
|
|
644
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
645
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
646
|
-
if optimizer:
|
|
647
|
-
if optimizer in self.__available_optimizers:
|
|
648
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
649
|
-
conversation_prompt if conversationally else prompt
|
|
650
|
-
)
|
|
651
|
-
else:
|
|
652
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
653
|
-
|
|
654
|
-
# Construct messages for the conversation
|
|
655
|
-
messages = [
|
|
656
|
-
{"role": "system", "content": self.system_prompt},
|
|
657
|
-
{"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
|
|
658
|
-
]
|
|
659
|
-
|
|
660
|
-
# Payload construction based on ElectronHub API requirements
|
|
661
|
-
payload = {
|
|
662
|
-
"model": self.model,
|
|
663
|
-
"messages": messages,
|
|
664
|
-
"stream": stream,
|
|
665
|
-
"stream_options": {"include_usage": True},
|
|
666
|
-
"max_tokens": self.max_tokens,
|
|
667
|
-
"temperature": temperature,
|
|
668
|
-
"top_p": top_p,
|
|
669
|
-
"top_k": top_k,
|
|
670
|
-
"web_search": False,
|
|
671
|
-
"customId": None
|
|
672
|
-
}
|
|
673
|
-
|
|
674
|
-
def for_stream():
|
|
675
|
-
try:
|
|
676
|
-
response = self.session.post(
|
|
677
|
-
self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout,
|
|
678
|
-
impersonate="chrome120" # Add impersonate
|
|
679
|
-
)
|
|
680
|
-
response.raise_for_status()
|
|
681
|
-
|
|
682
|
-
streaming_text = ""
|
|
683
|
-
# Use sanitize_stream
|
|
684
|
-
processed_stream = sanitize_stream(
|
|
685
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
686
|
-
intro_value="data:",
|
|
687
|
-
to_json=True, # Stream sends JSON
|
|
688
|
-
skip_markers=["[DONE]"],
|
|
689
|
-
content_extractor=self._electronhub_extractor, # Use the specific extractor
|
|
690
|
-
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
691
|
-
)
|
|
692
|
-
|
|
693
|
-
for content_chunk in processed_stream:
|
|
694
|
-
# content_chunk is the string extracted by _electronhub_extractor
|
|
695
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
696
|
-
streaming_text += content_chunk
|
|
697
|
-
resp = dict(text=content_chunk)
|
|
698
|
-
yield resp if not raw else content_chunk
|
|
699
|
-
|
|
700
|
-
except CurlError as e:
|
|
701
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
702
|
-
except Exception as e:
|
|
703
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
704
|
-
finally:
|
|
705
|
-
# Update history after stream finishes or fails
|
|
706
|
-
if streaming_text:
|
|
707
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
708
|
-
|
|
709
|
-
def for_non_stream():
|
|
710
|
-
collected_response = ""
|
|
711
|
-
try:
|
|
712
|
-
for chunk in for_stream():
|
|
713
|
-
if isinstance(chunk, dict) and "text" in chunk:
|
|
714
|
-
content = chunk["text"]
|
|
715
|
-
if content is not None:
|
|
716
|
-
collected_response += content
|
|
717
|
-
except Exception as e:
|
|
718
|
-
raise exceptions.FailedToGenerateResponseError(f"Error during non-stream processing: {str(e)}")
|
|
719
|
-
|
|
720
|
-
# Update history and last_response after aggregation
|
|
721
|
-
self.last_response = {"text": collected_response}
|
|
722
|
-
self.conversation.update_chat_history(prompt, collected_response)
|
|
723
|
-
return self.last_response
|
|
724
|
-
|
|
725
|
-
return for_stream() if stream else for_non_stream()
|
|
726
|
-
|
|
727
|
-
def chat(
|
|
728
|
-
self,
|
|
729
|
-
prompt: str,
|
|
730
|
-
stream: bool = True,
|
|
731
|
-
optimizer: str = None,
|
|
732
|
-
conversationally: bool = False,
|
|
733
|
-
temperature: float = 0.5,
|
|
734
|
-
top_p: float = 1.0,
|
|
735
|
-
top_k: int = 5,
|
|
736
|
-
) -> str:
|
|
737
|
-
def for_stream():
|
|
738
|
-
for response in self.ask(
|
|
739
|
-
prompt,
|
|
740
|
-
True,
|
|
741
|
-
optimizer=optimizer,
|
|
742
|
-
conversationally=conversationally,
|
|
743
|
-
temperature=temperature,
|
|
744
|
-
top_p=top_p,
|
|
745
|
-
top_k=top_k
|
|
746
|
-
):
|
|
747
|
-
yield self.get_message(response)
|
|
748
|
-
def for_non_stream():
|
|
749
|
-
return self.get_message(
|
|
750
|
-
self.ask(
|
|
751
|
-
prompt,
|
|
752
|
-
False,
|
|
753
|
-
optimizer=optimizer,
|
|
754
|
-
conversationally=conversationally,
|
|
755
|
-
temperature=temperature,
|
|
756
|
-
top_p=top_p,
|
|
757
|
-
top_k=top_k
|
|
758
|
-
)
|
|
759
|
-
)
|
|
760
|
-
return for_stream() if stream else for_non_stream()
|
|
761
|
-
|
|
762
|
-
def get_message(self, response: dict) -> str:
|
|
763
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
764
|
-
return response["text"]
|
|
765
|
-
|
|
766
|
-
if __name__ == "__main__":
|
|
767
|
-
from rich import print
|
|
768
|
-
# You need to provide your own API key
|
|
769
|
-
api_key = "" # U can get free API key from https://playground.electronhub.top/console
|
|
770
|
-
ai = ElectronHub(timeout=5000, api_key=api_key)
|
|
771
|
-
response = ai.chat("hi there, how are you today?", stream=True)
|
|
772
|
-
for chunk in response:
|
|
773
|
-
print(chunk, end="", flush=True)
|