webscout 8.3.7__py3-none-any.whl → 2025.10.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Andi.py +1 -1
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +237 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +27 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +663 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/search/__init__.py +51 -0
- webscout/search/base.py +195 -0
- webscout/search/duckduckgo_main.py +54 -0
- webscout/search/engines/__init__.py +48 -0
- webscout/search/engines/bing.py +84 -0
- webscout/search/engines/bing_news.py +52 -0
- webscout/search/engines/brave.py +43 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +78 -0
- webscout/search/engines/duckduckgo/base.py +187 -0
- webscout/search/engines/duckduckgo/images.py +97 -0
- webscout/search/engines/duckduckgo/maps.py +168 -0
- webscout/search/engines/duckduckgo/news.py +68 -0
- webscout/search/engines/duckduckgo/suggestions.py +21 -0
- webscout/search/engines/duckduckgo/text.py +211 -0
- webscout/search/engines/duckduckgo/translate.py +47 -0
- webscout/search/engines/duckduckgo/videos.py +63 -0
- webscout/search/engines/duckduckgo/weather.py +74 -0
- webscout/search/engines/mojeek.py +37 -0
- webscout/search/engines/wikipedia.py +56 -0
- webscout/search/engines/yahoo.py +65 -0
- webscout/search/engines/yahoo_news.py +64 -0
- webscout/search/engines/yandex.py +43 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +32 -0
- webscout/search/engines/yep/images.py +99 -0
- webscout/search/engines/yep/suggestions.py +35 -0
- webscout/search/engines/yep/text.py +114 -0
- webscout/search/http_client.py +156 -0
- webscout/search/results.py +137 -0
- webscout/search/yep_main.py +44 -0
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/version.py.bak +2 -0
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/METADATA +936 -937
- webscout-2025.10.13.dist-info/RECORD +329 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout/webscout_search.py +0 -1183
- webscout/webscout_search_async.py +0 -649
- webscout/yep_search.py +0 -346
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/top_level.txt +0 -0
|
@@ -1,314 +1,314 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import time
|
|
4
|
-
import uuid
|
|
5
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
-
import re
|
|
7
|
-
import random
|
|
8
|
-
import string
|
|
9
|
-
from rich import print
|
|
10
|
-
from webscout.litagent.agent import LitAgent
|
|
11
|
-
import cloudscraper
|
|
12
|
-
# Import base classes and utility structures
|
|
13
|
-
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
14
|
-
from webscout.Provider.OPENAI.utils import (
|
|
15
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
16
|
-
ChatCompletionMessage, CompletionUsage
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
# --- ChutesAI API Key Auto-Generator ---
|
|
20
|
-
def generate_chutesai_api_key():
|
|
21
|
-
url = "https://chutes.ai/auth/start?/create"
|
|
22
|
-
def generate_username(length=8):
|
|
23
|
-
return ''.join(random.choices(string.ascii_letters, k=length))
|
|
24
|
-
username = generate_username()
|
|
25
|
-
agent = LitAgent()
|
|
26
|
-
fingerprint = agent.generate_fingerprint("chrome")
|
|
27
|
-
headers = {
|
|
28
|
-
"Content-Type": "application/x-www-form-urlencoded",
|
|
29
|
-
"Accept": "application/json",
|
|
30
|
-
"User-Agent": fingerprint["user_agent"],
|
|
31
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
32
|
-
"Accept-Language": fingerprint["accept_language"],
|
|
33
|
-
"DNT": "1",
|
|
34
|
-
"Origin": "https://chutes.ai",
|
|
35
|
-
"Referer": "https://chutes.ai/auth/start",
|
|
36
|
-
"Sec-Ch-Ua": fingerprint["sec_ch_ua"],
|
|
37
|
-
"Sec-Ch-Ua-Mobile": "?0",
|
|
38
|
-
"Sec-Ch-Ua-Platform": fingerprint["platform"],
|
|
39
|
-
"X-Sveltekit-Action": "true"
|
|
40
|
-
}
|
|
41
|
-
data = {
|
|
42
|
-
"username": username,
|
|
43
|
-
"coldkey": "hotkey",
|
|
44
|
-
"__superform_id": "xpsmbd"
|
|
45
|
-
}
|
|
46
|
-
scraper = cloudscraper.create_scraper()
|
|
47
|
-
response = scraper.post(url, headers=headers, data=data)
|
|
48
|
-
print(f"[bold green]Status:[/] {response.status_code}")
|
|
49
|
-
|
|
50
|
-
# Ensure response is decoded as UTF-8
|
|
51
|
-
response.encoding = 'utf-8'
|
|
52
|
-
|
|
53
|
-
try:
|
|
54
|
-
resp_json = response.json()
|
|
55
|
-
except Exception:
|
|
56
|
-
try:
|
|
57
|
-
# Try to decode the response text with UTF-8 explicitly
|
|
58
|
-
decoded_text = response.content.decode('utf-8', errors='replace')
|
|
59
|
-
print(decoded_text)
|
|
60
|
-
except Exception:
|
|
61
|
-
print("Failed to decode response content")
|
|
62
|
-
return None
|
|
63
|
-
print(resp_json)
|
|
64
|
-
# Extract the api_key using regex from the 'data' field
|
|
65
|
-
if 'data' in resp_json:
|
|
66
|
-
api_key_match = re.search(r'(cpk_[a-zA-Z0-9.]+)', resp_json['data'])
|
|
67
|
-
if api_key_match:
|
|
68
|
-
api_key = api_key_match.group(1)
|
|
69
|
-
print(f"[bold yellow]Auto-generated ChutesAI API Key:[/] {api_key}")
|
|
70
|
-
return api_key
|
|
71
|
-
else:
|
|
72
|
-
print("[red]API key not found in response data.")
|
|
73
|
-
return None
|
|
74
|
-
|
|
75
|
-
# --- ChutesAI Client ---
|
|
76
|
-
|
|
77
|
-
class Completions(BaseCompletions):
|
|
78
|
-
def __init__(self, client: 'ChutesAI'):
|
|
79
|
-
self._client = client
|
|
80
|
-
|
|
81
|
-
def create(
|
|
82
|
-
self,
|
|
83
|
-
*,
|
|
84
|
-
model: str,
|
|
85
|
-
messages: List[Dict[str, str]],
|
|
86
|
-
max_tokens: Optional[int] = 1024,
|
|
87
|
-
stream: bool = False,
|
|
88
|
-
temperature: Optional[float] = None,
|
|
89
|
-
top_p: Optional[float] = None,
|
|
90
|
-
**kwargs: Any
|
|
91
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
92
|
-
"""
|
|
93
|
-
Creates a model response for the given chat conversation.
|
|
94
|
-
Mimics openai.chat.completions.create
|
|
95
|
-
"""
|
|
96
|
-
payload = {
|
|
97
|
-
"model": model,
|
|
98
|
-
"messages": messages,
|
|
99
|
-
"max_tokens": max_tokens,
|
|
100
|
-
"stream": stream,
|
|
101
|
-
}
|
|
102
|
-
if temperature is not None:
|
|
103
|
-
payload["temperature"] = temperature
|
|
104
|
-
if top_p is not None:
|
|
105
|
-
payload["top_p"] = top_p
|
|
106
|
-
payload.update(kwargs)
|
|
107
|
-
|
|
108
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
109
|
-
created_time = int(time.time())
|
|
110
|
-
|
|
111
|
-
if stream:
|
|
112
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
113
|
-
else:
|
|
114
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
115
|
-
|
|
116
|
-
def _create_stream(
|
|
117
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
118
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
119
|
-
try:
|
|
120
|
-
response = self._client.scraper.post(
|
|
121
|
-
self._client.base_url,
|
|
122
|
-
headers=self._client.headers,
|
|
123
|
-
json=payload,
|
|
124
|
-
stream=True,
|
|
125
|
-
timeout=self._client.timeout
|
|
126
|
-
)
|
|
127
|
-
response.raise_for_status()
|
|
128
|
-
|
|
129
|
-
prompt_tokens = 0
|
|
130
|
-
completion_tokens = 0
|
|
131
|
-
total_tokens = 0
|
|
132
|
-
|
|
133
|
-
for line in response.iter_lines():
|
|
134
|
-
if line:
|
|
135
|
-
decoded_line = line.decode('utf-8', errors='replace').strip()
|
|
136
|
-
if decoded_line.startswith("data: "):
|
|
137
|
-
json_str = decoded_line[6:]
|
|
138
|
-
if json_str == "[DONE]":
|
|
139
|
-
break
|
|
140
|
-
try:
|
|
141
|
-
data = json.loads(json_str)
|
|
142
|
-
choice_data = data.get('choices', [{}])[0]
|
|
143
|
-
delta_data = choice_data.get('delta', {})
|
|
144
|
-
finish_reason = choice_data.get('finish_reason')
|
|
145
|
-
|
|
146
|
-
usage_data = data.get('usage', {})
|
|
147
|
-
if usage_data:
|
|
148
|
-
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
149
|
-
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
150
|
-
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
151
|
-
|
|
152
|
-
delta = ChoiceDelta(
|
|
153
|
-
content=delta_data.get('content'),
|
|
154
|
-
role=delta_data.get('role'),
|
|
155
|
-
tool_calls=delta_data.get('tool_calls')
|
|
156
|
-
)
|
|
157
|
-
choice = Choice(
|
|
158
|
-
index=choice_data.get('index', 0),
|
|
159
|
-
delta=delta,
|
|
160
|
-
finish_reason=finish_reason,
|
|
161
|
-
logprobs=choice_data.get('logprobs')
|
|
162
|
-
)
|
|
163
|
-
chunk = ChatCompletionChunk(
|
|
164
|
-
id=request_id,
|
|
165
|
-
choices=[choice],
|
|
166
|
-
created=created_time,
|
|
167
|
-
model=model,
|
|
168
|
-
system_fingerprint=data.get('system_fingerprint')
|
|
169
|
-
)
|
|
170
|
-
if hasattr(chunk, "model_dump"):
|
|
171
|
-
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
172
|
-
else:
|
|
173
|
-
chunk_dict = chunk.dict(exclude_none=True)
|
|
174
|
-
usage_dict = {
|
|
175
|
-
"prompt_tokens": prompt_tokens or 10,
|
|
176
|
-
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
177
|
-
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
178
|
-
"estimated_cost": None
|
|
179
|
-
}
|
|
180
|
-
if delta_data.get('content'):
|
|
181
|
-
completion_tokens += 1
|
|
182
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
183
|
-
usage_dict["completion_tokens"] = completion_tokens
|
|
184
|
-
usage_dict["total_tokens"] = total_tokens
|
|
185
|
-
chunk_dict["usage"] = usage_dict
|
|
186
|
-
yield chunk
|
|
187
|
-
except json.JSONDecodeError:
|
|
188
|
-
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
189
|
-
continue
|
|
190
|
-
except requests.exceptions.RequestException as e:
|
|
191
|
-
print(f"Error during ChutesAI stream request: {e}")
|
|
192
|
-
raise IOError(f"ChutesAI request failed: {e}") from e
|
|
193
|
-
except Exception as e:
|
|
194
|
-
print(f"Error processing ChutesAI stream: {e}")
|
|
195
|
-
raise
|
|
196
|
-
|
|
197
|
-
def _create_non_stream(
|
|
198
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
199
|
-
) -> ChatCompletion:
|
|
200
|
-
try:
|
|
201
|
-
response = self._client.scraper.post(
|
|
202
|
-
self._client.base_url,
|
|
203
|
-
headers=self._client.headers,
|
|
204
|
-
json=payload,
|
|
205
|
-
timeout=self._client.timeout
|
|
206
|
-
)
|
|
207
|
-
response.raise_for_status()
|
|
208
|
-
data = response.json()
|
|
209
|
-
choices_data = data.get('choices', [])
|
|
210
|
-
usage_data = data.get('usage', {})
|
|
211
|
-
choices = []
|
|
212
|
-
for choice_d in choices_data:
|
|
213
|
-
message_d = choice_d.get('message', {})
|
|
214
|
-
message = ChatCompletionMessage(
|
|
215
|
-
role=message_d.get('role', 'assistant'),
|
|
216
|
-
content=message_d.get('content', '')
|
|
217
|
-
)
|
|
218
|
-
choice = Choice(
|
|
219
|
-
index=choice_d.get('index', 0),
|
|
220
|
-
message=message,
|
|
221
|
-
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
222
|
-
)
|
|
223
|
-
choices.append(choice)
|
|
224
|
-
usage = CompletionUsage(
|
|
225
|
-
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
226
|
-
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
227
|
-
total_tokens=usage_data.get('total_tokens', 0)
|
|
228
|
-
)
|
|
229
|
-
completion = ChatCompletion(
|
|
230
|
-
id=request_id,
|
|
231
|
-
choices=choices,
|
|
232
|
-
created=created_time,
|
|
233
|
-
model=data.get('model', model),
|
|
234
|
-
usage=usage,
|
|
235
|
-
)
|
|
236
|
-
return completion
|
|
237
|
-
except requests.exceptions.RequestException as e:
|
|
238
|
-
print(f"Error during ChutesAI non-stream request: {e}")
|
|
239
|
-
raise IOError(f"ChutesAI request failed: {e}") from e
|
|
240
|
-
except Exception as e:
|
|
241
|
-
print(f"Error processing ChutesAI response: {e}")
|
|
242
|
-
raise
|
|
243
|
-
|
|
244
|
-
class Chat(BaseChat):
|
|
245
|
-
def __init__(self, client: 'ChutesAI'):
|
|
246
|
-
self.completions = Completions(client)
|
|
247
|
-
|
|
248
|
-
class ChutesAI(OpenAICompatibleProvider):
|
|
249
|
-
AVAILABLE_MODELS = [
|
|
250
|
-
"deepseek-ai/DeepSeek-V3-0324",
|
|
251
|
-
"deepseek-ai/DeepSeek-R1",
|
|
252
|
-
"NousResearch/DeepHermes-3-Mistral-24B-Preview",
|
|
253
|
-
"chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
254
|
-
]
|
|
255
|
-
def __init__(self, api_key: str = None,):
|
|
256
|
-
self.timeout = None # Infinite timeout
|
|
257
|
-
self.base_url = "https://llm.chutes.ai/v1/chat/completions"
|
|
258
|
-
|
|
259
|
-
# Always generate a new API key, ignore any provided key
|
|
260
|
-
print("[yellow]Generating new ChutesAI API key...[/]")
|
|
261
|
-
self.api_key = generate_chutesai_api_key()
|
|
262
|
-
|
|
263
|
-
if not self.api_key:
|
|
264
|
-
print("[red]Failed to generate API key. Retrying...[/]")
|
|
265
|
-
# Retry once more
|
|
266
|
-
self.api_key = generate_chutesai_api_key()
|
|
267
|
-
|
|
268
|
-
if not self.api_key:
|
|
269
|
-
raise ValueError("Failed to generate ChutesAI API key after multiple attempts.")
|
|
270
|
-
|
|
271
|
-
print(f"[green]Successfully generated API key: {self.api_key[:20]}...[/]")
|
|
272
|
-
|
|
273
|
-
self.scraper = cloudscraper.create_scraper()
|
|
274
|
-
self.headers = {
|
|
275
|
-
"Authorization": f"Bearer {self.api_key}",
|
|
276
|
-
"Content-Type": "application/json"
|
|
277
|
-
}
|
|
278
|
-
self.scraper.headers.update(self.headers)
|
|
279
|
-
self.chat = Chat(self)
|
|
280
|
-
|
|
281
|
-
@property
|
|
282
|
-
def models(self):
|
|
283
|
-
class _ModelList:
|
|
284
|
-
def list(inner_self):
|
|
285
|
-
return type(self).AVAILABLE_MODELS
|
|
286
|
-
return _ModelList()
|
|
287
|
-
|
|
288
|
-
if __name__ == "__main__":
|
|
289
|
-
try:
|
|
290
|
-
# Example usage - always use generated API key
|
|
291
|
-
client = ChutesAI()
|
|
292
|
-
|
|
293
|
-
messages = [
|
|
294
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
|
295
|
-
{"role": "user", "content": "What is the capital of France?"}
|
|
296
|
-
]
|
|
297
|
-
|
|
298
|
-
print("[cyan]Making API request...[/]")
|
|
299
|
-
response = client.chat.completions.create(
|
|
300
|
-
model="deepseek-ai/DeepSeek-V3-0324",
|
|
301
|
-
messages=messages,
|
|
302
|
-
max_tokens=50,
|
|
303
|
-
stream=True
|
|
304
|
-
)
|
|
305
|
-
for chunk in response:
|
|
306
|
-
if hasattr(chunk, "model_dump"):
|
|
307
|
-
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
308
|
-
else:
|
|
309
|
-
chunk_dict = chunk.dict(exclude_none=True)
|
|
310
|
-
print(f"[green]Response Chunk:[/] {chunk_dict}")
|
|
311
|
-
|
|
312
|
-
except Exception as e:
|
|
313
|
-
print(f"[red]Error: {e}[/]")
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
|
+
import re
|
|
7
|
+
import random
|
|
8
|
+
import string
|
|
9
|
+
from rich import print
|
|
10
|
+
from webscout.litagent.agent import LitAgent
|
|
11
|
+
import cloudscraper
|
|
12
|
+
# Import base classes and utility structures
|
|
13
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
14
|
+
from webscout.Provider.OPENAI.utils import (
|
|
15
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
16
|
+
ChatCompletionMessage, CompletionUsage
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
# --- ChutesAI API Key Auto-Generator ---
|
|
20
|
+
def generate_chutesai_api_key():
|
|
21
|
+
url = "https://chutes.ai/auth/start?/create"
|
|
22
|
+
def generate_username(length=8):
|
|
23
|
+
return ''.join(random.choices(string.ascii_letters, k=length))
|
|
24
|
+
username = generate_username()
|
|
25
|
+
agent = LitAgent()
|
|
26
|
+
fingerprint = agent.generate_fingerprint("chrome")
|
|
27
|
+
headers = {
|
|
28
|
+
"Content-Type": "application/x-www-form-urlencoded",
|
|
29
|
+
"Accept": "application/json",
|
|
30
|
+
"User-Agent": fingerprint["user_agent"],
|
|
31
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
32
|
+
"Accept-Language": fingerprint["accept_language"],
|
|
33
|
+
"DNT": "1",
|
|
34
|
+
"Origin": "https://chutes.ai",
|
|
35
|
+
"Referer": "https://chutes.ai/auth/start",
|
|
36
|
+
"Sec-Ch-Ua": fingerprint["sec_ch_ua"],
|
|
37
|
+
"Sec-Ch-Ua-Mobile": "?0",
|
|
38
|
+
"Sec-Ch-Ua-Platform": fingerprint["platform"],
|
|
39
|
+
"X-Sveltekit-Action": "true"
|
|
40
|
+
}
|
|
41
|
+
data = {
|
|
42
|
+
"username": username,
|
|
43
|
+
"coldkey": "hotkey",
|
|
44
|
+
"__superform_id": "xpsmbd"
|
|
45
|
+
}
|
|
46
|
+
scraper = cloudscraper.create_scraper()
|
|
47
|
+
response = scraper.post(url, headers=headers, data=data)
|
|
48
|
+
print(f"[bold green]Status:[/] {response.status_code}")
|
|
49
|
+
|
|
50
|
+
# Ensure response is decoded as UTF-8
|
|
51
|
+
response.encoding = 'utf-8'
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
resp_json = response.json()
|
|
55
|
+
except Exception:
|
|
56
|
+
try:
|
|
57
|
+
# Try to decode the response text with UTF-8 explicitly
|
|
58
|
+
decoded_text = response.content.decode('utf-8', errors='replace')
|
|
59
|
+
print(decoded_text)
|
|
60
|
+
except Exception:
|
|
61
|
+
print("Failed to decode response content")
|
|
62
|
+
return None
|
|
63
|
+
print(resp_json)
|
|
64
|
+
# Extract the api_key using regex from the 'data' field
|
|
65
|
+
if 'data' in resp_json:
|
|
66
|
+
api_key_match = re.search(r'(cpk_[a-zA-Z0-9.]+)', resp_json['data'])
|
|
67
|
+
if api_key_match:
|
|
68
|
+
api_key = api_key_match.group(1)
|
|
69
|
+
print(f"[bold yellow]Auto-generated ChutesAI API Key:[/] {api_key}")
|
|
70
|
+
return api_key
|
|
71
|
+
else:
|
|
72
|
+
print("[red]API key not found in response data.")
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
# --- ChutesAI Client ---
|
|
76
|
+
|
|
77
|
+
class Completions(BaseCompletions):
|
|
78
|
+
def __init__(self, client: 'ChutesAI'):
|
|
79
|
+
self._client = client
|
|
80
|
+
|
|
81
|
+
def create(
|
|
82
|
+
self,
|
|
83
|
+
*,
|
|
84
|
+
model: str,
|
|
85
|
+
messages: List[Dict[str, str]],
|
|
86
|
+
max_tokens: Optional[int] = 1024,
|
|
87
|
+
stream: bool = False,
|
|
88
|
+
temperature: Optional[float] = None,
|
|
89
|
+
top_p: Optional[float] = None,
|
|
90
|
+
**kwargs: Any
|
|
91
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
92
|
+
"""
|
|
93
|
+
Creates a model response for the given chat conversation.
|
|
94
|
+
Mimics openai.chat.completions.create
|
|
95
|
+
"""
|
|
96
|
+
payload = {
|
|
97
|
+
"model": model,
|
|
98
|
+
"messages": messages,
|
|
99
|
+
"max_tokens": max_tokens,
|
|
100
|
+
"stream": stream,
|
|
101
|
+
}
|
|
102
|
+
if temperature is not None:
|
|
103
|
+
payload["temperature"] = temperature
|
|
104
|
+
if top_p is not None:
|
|
105
|
+
payload["top_p"] = top_p
|
|
106
|
+
payload.update(kwargs)
|
|
107
|
+
|
|
108
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
109
|
+
created_time = int(time.time())
|
|
110
|
+
|
|
111
|
+
if stream:
|
|
112
|
+
return self._create_stream(request_id, created_time, model, payload)
|
|
113
|
+
else:
|
|
114
|
+
return self._create_non_stream(request_id, created_time, model, payload)
|
|
115
|
+
|
|
116
|
+
def _create_stream(
|
|
117
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
118
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
119
|
+
try:
|
|
120
|
+
response = self._client.scraper.post(
|
|
121
|
+
self._client.base_url,
|
|
122
|
+
headers=self._client.headers,
|
|
123
|
+
json=payload,
|
|
124
|
+
stream=True,
|
|
125
|
+
timeout=self._client.timeout
|
|
126
|
+
)
|
|
127
|
+
response.raise_for_status()
|
|
128
|
+
|
|
129
|
+
prompt_tokens = 0
|
|
130
|
+
completion_tokens = 0
|
|
131
|
+
total_tokens = 0
|
|
132
|
+
|
|
133
|
+
for line in response.iter_lines():
|
|
134
|
+
if line:
|
|
135
|
+
decoded_line = line.decode('utf-8', errors='replace').strip()
|
|
136
|
+
if decoded_line.startswith("data: "):
|
|
137
|
+
json_str = decoded_line[6:]
|
|
138
|
+
if json_str == "[DONE]":
|
|
139
|
+
break
|
|
140
|
+
try:
|
|
141
|
+
data = json.loads(json_str)
|
|
142
|
+
choice_data = data.get('choices', [{}])[0]
|
|
143
|
+
delta_data = choice_data.get('delta', {})
|
|
144
|
+
finish_reason = choice_data.get('finish_reason')
|
|
145
|
+
|
|
146
|
+
usage_data = data.get('usage', {})
|
|
147
|
+
if usage_data:
|
|
148
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
149
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
150
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
151
|
+
|
|
152
|
+
delta = ChoiceDelta(
|
|
153
|
+
content=delta_data.get('content'),
|
|
154
|
+
role=delta_data.get('role'),
|
|
155
|
+
tool_calls=delta_data.get('tool_calls')
|
|
156
|
+
)
|
|
157
|
+
choice = Choice(
|
|
158
|
+
index=choice_data.get('index', 0),
|
|
159
|
+
delta=delta,
|
|
160
|
+
finish_reason=finish_reason,
|
|
161
|
+
logprobs=choice_data.get('logprobs')
|
|
162
|
+
)
|
|
163
|
+
chunk = ChatCompletionChunk(
|
|
164
|
+
id=request_id,
|
|
165
|
+
choices=[choice],
|
|
166
|
+
created=created_time,
|
|
167
|
+
model=model,
|
|
168
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
169
|
+
)
|
|
170
|
+
if hasattr(chunk, "model_dump"):
|
|
171
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
172
|
+
else:
|
|
173
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
174
|
+
usage_dict = {
|
|
175
|
+
"prompt_tokens": prompt_tokens or 10,
|
|
176
|
+
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
177
|
+
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
178
|
+
"estimated_cost": None
|
|
179
|
+
}
|
|
180
|
+
if delta_data.get('content'):
|
|
181
|
+
completion_tokens += 1
|
|
182
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
183
|
+
usage_dict["completion_tokens"] = completion_tokens
|
|
184
|
+
usage_dict["total_tokens"] = total_tokens
|
|
185
|
+
chunk_dict["usage"] = usage_dict
|
|
186
|
+
yield chunk
|
|
187
|
+
except json.JSONDecodeError:
|
|
188
|
+
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
189
|
+
continue
|
|
190
|
+
except requests.exceptions.RequestException as e:
|
|
191
|
+
print(f"Error during ChutesAI stream request: {e}")
|
|
192
|
+
raise IOError(f"ChutesAI request failed: {e}") from e
|
|
193
|
+
except Exception as e:
|
|
194
|
+
print(f"Error processing ChutesAI stream: {e}")
|
|
195
|
+
raise
|
|
196
|
+
|
|
197
|
+
def _create_non_stream(
|
|
198
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
199
|
+
) -> ChatCompletion:
|
|
200
|
+
try:
|
|
201
|
+
response = self._client.scraper.post(
|
|
202
|
+
self._client.base_url,
|
|
203
|
+
headers=self._client.headers,
|
|
204
|
+
json=payload,
|
|
205
|
+
timeout=self._client.timeout
|
|
206
|
+
)
|
|
207
|
+
response.raise_for_status()
|
|
208
|
+
data = response.json()
|
|
209
|
+
choices_data = data.get('choices', [])
|
|
210
|
+
usage_data = data.get('usage', {})
|
|
211
|
+
choices = []
|
|
212
|
+
for choice_d in choices_data:
|
|
213
|
+
message_d = choice_d.get('message', {})
|
|
214
|
+
message = ChatCompletionMessage(
|
|
215
|
+
role=message_d.get('role', 'assistant'),
|
|
216
|
+
content=message_d.get('content', '')
|
|
217
|
+
)
|
|
218
|
+
choice = Choice(
|
|
219
|
+
index=choice_d.get('index', 0),
|
|
220
|
+
message=message,
|
|
221
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
222
|
+
)
|
|
223
|
+
choices.append(choice)
|
|
224
|
+
usage = CompletionUsage(
|
|
225
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
226
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
227
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
228
|
+
)
|
|
229
|
+
completion = ChatCompletion(
|
|
230
|
+
id=request_id,
|
|
231
|
+
choices=choices,
|
|
232
|
+
created=created_time,
|
|
233
|
+
model=data.get('model', model),
|
|
234
|
+
usage=usage,
|
|
235
|
+
)
|
|
236
|
+
return completion
|
|
237
|
+
except requests.exceptions.RequestException as e:
|
|
238
|
+
print(f"Error during ChutesAI non-stream request: {e}")
|
|
239
|
+
raise IOError(f"ChutesAI request failed: {e}") from e
|
|
240
|
+
except Exception as e:
|
|
241
|
+
print(f"Error processing ChutesAI response: {e}")
|
|
242
|
+
raise
|
|
243
|
+
|
|
244
|
+
class Chat(BaseChat):
|
|
245
|
+
def __init__(self, client: 'ChutesAI'):
|
|
246
|
+
self.completions = Completions(client)
|
|
247
|
+
|
|
248
|
+
class ChutesAI(OpenAICompatibleProvider):
|
|
249
|
+
AVAILABLE_MODELS = [
|
|
250
|
+
"deepseek-ai/DeepSeek-V3-0324",
|
|
251
|
+
"deepseek-ai/DeepSeek-R1",
|
|
252
|
+
"NousResearch/DeepHermes-3-Mistral-24B-Preview",
|
|
253
|
+
"chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
254
|
+
]
|
|
255
|
+
def __init__(self, api_key: str = None,):
|
|
256
|
+
self.timeout = None # Infinite timeout
|
|
257
|
+
self.base_url = "https://llm.chutes.ai/v1/chat/completions"
|
|
258
|
+
|
|
259
|
+
# Always generate a new API key, ignore any provided key
|
|
260
|
+
print("[yellow]Generating new ChutesAI API key...[/]")
|
|
261
|
+
self.api_key = generate_chutesai_api_key()
|
|
262
|
+
|
|
263
|
+
if not self.api_key:
|
|
264
|
+
print("[red]Failed to generate API key. Retrying...[/]")
|
|
265
|
+
# Retry once more
|
|
266
|
+
self.api_key = generate_chutesai_api_key()
|
|
267
|
+
|
|
268
|
+
if not self.api_key:
|
|
269
|
+
raise ValueError("Failed to generate ChutesAI API key after multiple attempts.")
|
|
270
|
+
|
|
271
|
+
print(f"[green]Successfully generated API key: {self.api_key[:20]}...[/]")
|
|
272
|
+
|
|
273
|
+
self.scraper = cloudscraper.create_scraper()
|
|
274
|
+
self.headers = {
|
|
275
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
276
|
+
"Content-Type": "application/json"
|
|
277
|
+
}
|
|
278
|
+
self.scraper.headers.update(self.headers)
|
|
279
|
+
self.chat = Chat(self)
|
|
280
|
+
|
|
281
|
+
@property
|
|
282
|
+
def models(self):
|
|
283
|
+
class _ModelList:
|
|
284
|
+
def list(inner_self):
|
|
285
|
+
return type(self).AVAILABLE_MODELS
|
|
286
|
+
return _ModelList()
|
|
287
|
+
|
|
288
|
+
if __name__ == "__main__":
|
|
289
|
+
try:
|
|
290
|
+
# Example usage - always use generated API key
|
|
291
|
+
client = ChutesAI()
|
|
292
|
+
|
|
293
|
+
messages = [
|
|
294
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
295
|
+
{"role": "user", "content": "What is the capital of France?"}
|
|
296
|
+
]
|
|
297
|
+
|
|
298
|
+
print("[cyan]Making API request...[/]")
|
|
299
|
+
response = client.chat.completions.create(
|
|
300
|
+
model="deepseek-ai/DeepSeek-V3-0324",
|
|
301
|
+
messages=messages,
|
|
302
|
+
max_tokens=50,
|
|
303
|
+
stream=True
|
|
304
|
+
)
|
|
305
|
+
for chunk in response:
|
|
306
|
+
if hasattr(chunk, "model_dump"):
|
|
307
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
308
|
+
else:
|
|
309
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
310
|
+
print(f"[green]Response Chunk:[/] {chunk_dict}")
|
|
311
|
+
|
|
312
|
+
except Exception as e:
|
|
313
|
+
print(f"[red]Error: {e}[/]")
|
|
314
314
|
print("[yellow]If the issue persists, the ChutesAI service might be down or the API key generation method needs updating.[/]")
|