webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,241 +1,241 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import requests
|
|
4
|
-
import json
|
|
5
|
-
import random
|
|
6
|
-
import datetime
|
|
7
|
-
import re
|
|
8
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
|
-
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
-
from webscout.Provider.OPENAI.utils import (
|
|
11
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
-
ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
|
|
13
|
-
)
|
|
14
|
-
try:
|
|
15
|
-
from webscout.litagent import LitAgent
|
|
16
|
-
except ImportError:
|
|
17
|
-
class LitAgent:
|
|
18
|
-
def random(self) -> str:
|
|
19
|
-
return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
|
20
|
-
from webscout.AIutel import sanitize_stream
|
|
21
|
-
from webscout import exceptions
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
class Completions(BaseCompletions):
|
|
25
|
-
def __init__(self, client: 'NEMOTRON'):
|
|
26
|
-
self._client = client
|
|
27
|
-
|
|
28
|
-
def create(
|
|
29
|
-
self,
|
|
30
|
-
*,
|
|
31
|
-
model: str,
|
|
32
|
-
messages: List[Dict[str, str]],
|
|
33
|
-
max_tokens: Optional[int] = None,
|
|
34
|
-
stream: bool = False,
|
|
35
|
-
temperature: Optional[float] = None,
|
|
36
|
-
top_p: Optional[float] = None,
|
|
37
|
-
timeout: Optional[int] = None,
|
|
38
|
-
proxies: Optional[dict] = None,
|
|
39
|
-
**kwargs: Any
|
|
40
|
-
) -> ChatCompletion:
|
|
41
|
-
nemotron_model_name = self._client.convert_model_name(model)
|
|
42
|
-
prompt_content = format_prompt(messages, add_special_tokens=True, include_system=True, do_continue=True)
|
|
43
|
-
payload = {
|
|
44
|
-
"content": prompt_content,
|
|
45
|
-
"imageSrc": "",
|
|
46
|
-
"model": nemotron_model_name,
|
|
47
|
-
"user": self._client._get_user_data(),
|
|
48
|
-
"conversationId": kwargs.get("conversation_id", "")
|
|
49
|
-
}
|
|
50
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
51
|
-
created_time = int(time.time())
|
|
52
|
-
# Always use non-stream mode, ignore 'stream' argument
|
|
53
|
-
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
54
|
-
|
|
55
|
-
def _create_stream(
|
|
56
|
-
self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any],
|
|
57
|
-
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
58
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
-
try:
|
|
60
|
-
response_generator = self._client._internal_make_request(payload, stream=True, request_timeout=timeout, request_proxies=proxies)
|
|
61
|
-
for text_chunk in response_generator:
|
|
62
|
-
if text_chunk:
|
|
63
|
-
delta = ChoiceDelta(content=text_chunk, role="assistant")
|
|
64
|
-
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
65
|
-
chunk = ChatCompletionChunk(
|
|
66
|
-
id=request_id,
|
|
67
|
-
choices=[choice],
|
|
68
|
-
created=created_time,
|
|
69
|
-
model=model_name,
|
|
70
|
-
)
|
|
71
|
-
yield chunk
|
|
72
|
-
final_delta = ChoiceDelta()
|
|
73
|
-
final_choice = Choice(index=0, delta=final_delta, finish_reason="stop")
|
|
74
|
-
final_chunk = ChatCompletionChunk(
|
|
75
|
-
id=request_id,
|
|
76
|
-
choices=[final_choice],
|
|
77
|
-
created=created_time,
|
|
78
|
-
model=model_name,
|
|
79
|
-
)
|
|
80
|
-
yield final_chunk
|
|
81
|
-
except Exception as e:
|
|
82
|
-
raise IOError(f"NEMOTRON request failed: {e}") from e
|
|
83
|
-
|
|
84
|
-
def _create_non_stream(
|
|
85
|
-
self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any],
|
|
86
|
-
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
87
|
-
) -> ChatCompletion:
|
|
88
|
-
full_response_content = ""
|
|
89
|
-
try:
|
|
90
|
-
response_generator = self._client._internal_make_request(payload, stream=False, request_timeout=timeout, request_proxies=proxies)
|
|
91
|
-
full_response_content = next(response_generator, "")
|
|
92
|
-
except Exception as e:
|
|
93
|
-
pass
|
|
94
|
-
message = ChatCompletionMessage(role="assistant", content=full_response_content)
|
|
95
|
-
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
96
|
-
prompt_tokens = count_tokens(payload.get("content", ""))
|
|
97
|
-
completion_tokens = count_tokens(full_response_content)
|
|
98
|
-
usage = CompletionUsage(
|
|
99
|
-
prompt_tokens=prompt_tokens,
|
|
100
|
-
completion_tokens=completion_tokens,
|
|
101
|
-
total_tokens=prompt_tokens + completion_tokens
|
|
102
|
-
)
|
|
103
|
-
completion = ChatCompletion(
|
|
104
|
-
id=request_id,
|
|
105
|
-
choices=[choice],
|
|
106
|
-
created=created_time,
|
|
107
|
-
model=model_name,
|
|
108
|
-
usage=usage,
|
|
109
|
-
)
|
|
110
|
-
return completion
|
|
111
|
-
|
|
112
|
-
class Chat(BaseChat):
|
|
113
|
-
def __init__(self, client: 'NEMOTRON'):
|
|
114
|
-
self.completions = Completions(client)
|
|
115
|
-
|
|
116
|
-
class NEMOTRON(OpenAICompatibleProvider):
|
|
117
|
-
AVAILABLE_MODELS = [
|
|
118
|
-
"gpt4o",
|
|
119
|
-
"nemotron70b",
|
|
120
|
-
]
|
|
121
|
-
|
|
122
|
-
API_BASE_URL = "https://nemotron.one/api/chat"
|
|
123
|
-
def __init__(self, proxies: Optional[dict] = None):
|
|
124
|
-
super().__init__(proxies=proxies)
|
|
125
|
-
self.timeout = 30
|
|
126
|
-
agent = LitAgent()
|
|
127
|
-
user_agent = agent.random()
|
|
128
|
-
self.base_headers = {
|
|
129
|
-
"authority": "nemotron.one",
|
|
130
|
-
"accept": "*/*",
|
|
131
|
-
"accept-language": "en-US,en;q=0.9",
|
|
132
|
-
"content-type": "application/json",
|
|
133
|
-
"origin": "https://nemotron.one",
|
|
134
|
-
"sec-ch-ua": '"Chromium";v="136", "Not.A/Brand";v="99"',
|
|
135
|
-
"sec-ch-ua-mobile": "?0",
|
|
136
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
137
|
-
"user-agent": user_agent
|
|
138
|
-
}
|
|
139
|
-
self.session.headers.update(self.base_headers)
|
|
140
|
-
self.chat = Chat(self)
|
|
141
|
-
|
|
142
|
-
def _generate_random_email(self) -> str:
|
|
143
|
-
random_letter = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
|
144
|
-
random_string = ''.join(random.choice(random_letter) for _ in range(10))
|
|
145
|
-
return f"{random_string}@gmail.com"
|
|
146
|
-
|
|
147
|
-
def _generate_random_id(self) -> str:
|
|
148
|
-
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
|
|
149
|
-
random_letter = "abcdefghijklmnopqrstuvwxyz0123456789"
|
|
150
|
-
random_string = ''.join(random.choice(random_letter) for _ in range(8))
|
|
151
|
-
return f"cm{random_string}{timestamp[:10]}"
|
|
152
|
-
|
|
153
|
-
def _get_user_data(self) -> Dict[str, Any]:
|
|
154
|
-
current_time = datetime.datetime.now().isoformat()
|
|
155
|
-
return {
|
|
156
|
-
"name": "user",
|
|
157
|
-
"email": self._generate_random_email(),
|
|
158
|
-
"image": "https://lh3.googleusercontent.com/a/default-user=s96-c",
|
|
159
|
-
"id": self._generate_random_id(),
|
|
160
|
-
"password": None,
|
|
161
|
-
"emailVerified": None,
|
|
162
|
-
"credits": 100000000000,
|
|
163
|
-
"isPro": False,
|
|
164
|
-
"createdAt": current_time,
|
|
165
|
-
"updatedAt": current_time
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
def convert_model_name(self, model_alias: str) -> str:
|
|
169
|
-
"""
|
|
170
|
-
Convert model names to ones supported by NEMOTRON API.
|
|
171
|
-
|
|
172
|
-
Args:
|
|
173
|
-
model_alias: Model name to convert
|
|
174
|
-
|
|
175
|
-
Returns:
|
|
176
|
-
NEMOTRON model name for API payload
|
|
177
|
-
"""
|
|
178
|
-
# Accept only direct model names
|
|
179
|
-
if model_alias in self.AVAILABLE_MODELS:
|
|
180
|
-
return model_alias
|
|
181
|
-
|
|
182
|
-
# Case-insensitive matching
|
|
183
|
-
for m in self.AVAILABLE_MODELS:
|
|
184
|
-
if m.lower() == model_alias.lower():
|
|
185
|
-
return m
|
|
186
|
-
|
|
187
|
-
# Default to gpt4o if no match
|
|
188
|
-
print(f"Warning: Unknown model '{model_alias}'. Using 'gpt4o' instead.")
|
|
189
|
-
return "gpt4o"
|
|
190
|
-
|
|
191
|
-
def _internal_make_request(
|
|
192
|
-
self,
|
|
193
|
-
payload: Dict[str, Any],
|
|
194
|
-
stream: bool = False,
|
|
195
|
-
request_timeout: Optional[int] = None,
|
|
196
|
-
request_proxies: Optional[dict] = None
|
|
197
|
-
) -> Generator[str, None, None]:
|
|
198
|
-
request_headers = self.base_headers.copy()
|
|
199
|
-
request_headers["referer"] = f"https://nemotron.one/chat/{payload['model']}"
|
|
200
|
-
original_proxies = self.session.proxies.copy()
|
|
201
|
-
if request_proxies is not None:
|
|
202
|
-
self.session.proxies.update(request_proxies)
|
|
203
|
-
elif not self.session.proxies:
|
|
204
|
-
pass
|
|
205
|
-
else:
|
|
206
|
-
self.session.proxies = {}
|
|
207
|
-
try:
|
|
208
|
-
if stream:
|
|
209
|
-
with self.session.post(
|
|
210
|
-
self.API_BASE_URL,
|
|
211
|
-
headers=request_headers,
|
|
212
|
-
json=payload,
|
|
213
|
-
stream=True,
|
|
214
|
-
timeout=request_timeout if request_timeout is not None else self.timeout
|
|
215
|
-
) as response:
|
|
216
|
-
response.raise_for_status()
|
|
217
|
-
yield from sanitize_stream(
|
|
218
|
-
response.iter_content(chunk_size=1024),
|
|
219
|
-
to_json=False,
|
|
220
|
-
)
|
|
221
|
-
else:
|
|
222
|
-
response = self.session.post(
|
|
223
|
-
self.API_BASE_URL,
|
|
224
|
-
headers=request_headers,
|
|
225
|
-
json=payload,
|
|
226
|
-
timeout=request_timeout if request_timeout is not None else self.timeout
|
|
227
|
-
)
|
|
228
|
-
response.raise_for_status()
|
|
229
|
-
yield response.text
|
|
230
|
-
except requests.exceptions.RequestException as e:
|
|
231
|
-
raise exceptions.ProviderConnectionError(f"NEMOTRON API Connection error: {str(e)}")
|
|
232
|
-
except Exception as e:
|
|
233
|
-
raise RuntimeError(f"NEMOTRON API request unexpected error: {str(e)}")
|
|
234
|
-
finally:
|
|
235
|
-
self.session.proxies = original_proxies
|
|
236
|
-
@property
|
|
237
|
-
def models(self):
|
|
238
|
-
class _ModelList:
|
|
239
|
-
def list(inner_self):
|
|
240
|
-
return type(self).AVAILABLE_MODELS
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
import random
|
|
6
|
+
import datetime
|
|
7
|
+
import re
|
|
8
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
+
from webscout.Provider.OPENAI.utils import (
|
|
11
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
+
ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
|
|
13
|
+
)
|
|
14
|
+
try:
|
|
15
|
+
from webscout.litagent import LitAgent
|
|
16
|
+
except ImportError:
|
|
17
|
+
class LitAgent:
|
|
18
|
+
def random(self) -> str:
|
|
19
|
+
return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
|
20
|
+
from webscout.AIutel import sanitize_stream
|
|
21
|
+
from webscout import exceptions
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Completions(BaseCompletions):
|
|
25
|
+
def __init__(self, client: 'NEMOTRON'):
|
|
26
|
+
self._client = client
|
|
27
|
+
|
|
28
|
+
def create(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
model: str,
|
|
32
|
+
messages: List[Dict[str, str]],
|
|
33
|
+
max_tokens: Optional[int] = None,
|
|
34
|
+
stream: bool = False,
|
|
35
|
+
temperature: Optional[float] = None,
|
|
36
|
+
top_p: Optional[float] = None,
|
|
37
|
+
timeout: Optional[int] = None,
|
|
38
|
+
proxies: Optional[dict] = None,
|
|
39
|
+
**kwargs: Any
|
|
40
|
+
) -> ChatCompletion:
|
|
41
|
+
nemotron_model_name = self._client.convert_model_name(model)
|
|
42
|
+
prompt_content = format_prompt(messages, add_special_tokens=True, include_system=True, do_continue=True)
|
|
43
|
+
payload = {
|
|
44
|
+
"content": prompt_content,
|
|
45
|
+
"imageSrc": "",
|
|
46
|
+
"model": nemotron_model_name,
|
|
47
|
+
"user": self._client._get_user_data(),
|
|
48
|
+
"conversationId": kwargs.get("conversation_id", "")
|
|
49
|
+
}
|
|
50
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
51
|
+
created_time = int(time.time())
|
|
52
|
+
# Always use non-stream mode, ignore 'stream' argument
|
|
53
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
54
|
+
|
|
55
|
+
def _create_stream(
|
|
56
|
+
self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any],
|
|
57
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
58
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
59
|
+
try:
|
|
60
|
+
response_generator = self._client._internal_make_request(payload, stream=True, request_timeout=timeout, request_proxies=proxies)
|
|
61
|
+
for text_chunk in response_generator:
|
|
62
|
+
if text_chunk:
|
|
63
|
+
delta = ChoiceDelta(content=text_chunk, role="assistant")
|
|
64
|
+
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
65
|
+
chunk = ChatCompletionChunk(
|
|
66
|
+
id=request_id,
|
|
67
|
+
choices=[choice],
|
|
68
|
+
created=created_time,
|
|
69
|
+
model=model_name,
|
|
70
|
+
)
|
|
71
|
+
yield chunk
|
|
72
|
+
final_delta = ChoiceDelta()
|
|
73
|
+
final_choice = Choice(index=0, delta=final_delta, finish_reason="stop")
|
|
74
|
+
final_chunk = ChatCompletionChunk(
|
|
75
|
+
id=request_id,
|
|
76
|
+
choices=[final_choice],
|
|
77
|
+
created=created_time,
|
|
78
|
+
model=model_name,
|
|
79
|
+
)
|
|
80
|
+
yield final_chunk
|
|
81
|
+
except Exception as e:
|
|
82
|
+
raise IOError(f"NEMOTRON request failed: {e}") from e
|
|
83
|
+
|
|
84
|
+
def _create_non_stream(
|
|
85
|
+
self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any],
|
|
86
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
87
|
+
) -> ChatCompletion:
|
|
88
|
+
full_response_content = ""
|
|
89
|
+
try:
|
|
90
|
+
response_generator = self._client._internal_make_request(payload, stream=False, request_timeout=timeout, request_proxies=proxies)
|
|
91
|
+
full_response_content = next(response_generator, "")
|
|
92
|
+
except Exception as e:
|
|
93
|
+
pass
|
|
94
|
+
message = ChatCompletionMessage(role="assistant", content=full_response_content)
|
|
95
|
+
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
96
|
+
prompt_tokens = count_tokens(payload.get("content", ""))
|
|
97
|
+
completion_tokens = count_tokens(full_response_content)
|
|
98
|
+
usage = CompletionUsage(
|
|
99
|
+
prompt_tokens=prompt_tokens,
|
|
100
|
+
completion_tokens=completion_tokens,
|
|
101
|
+
total_tokens=prompt_tokens + completion_tokens
|
|
102
|
+
)
|
|
103
|
+
completion = ChatCompletion(
|
|
104
|
+
id=request_id,
|
|
105
|
+
choices=[choice],
|
|
106
|
+
created=created_time,
|
|
107
|
+
model=model_name,
|
|
108
|
+
usage=usage,
|
|
109
|
+
)
|
|
110
|
+
return completion
|
|
111
|
+
|
|
112
|
+
class Chat(BaseChat):
|
|
113
|
+
def __init__(self, client: 'NEMOTRON'):
|
|
114
|
+
self.completions = Completions(client)
|
|
115
|
+
|
|
116
|
+
class NEMOTRON(OpenAICompatibleProvider):
|
|
117
|
+
AVAILABLE_MODELS = [
|
|
118
|
+
"gpt4o",
|
|
119
|
+
"nemotron70b",
|
|
120
|
+
]
|
|
121
|
+
|
|
122
|
+
API_BASE_URL = "https://nemotron.one/api/chat"
|
|
123
|
+
def __init__(self, proxies: Optional[dict] = None):
|
|
124
|
+
super().__init__(proxies=proxies)
|
|
125
|
+
self.timeout = 30
|
|
126
|
+
agent = LitAgent()
|
|
127
|
+
user_agent = agent.random()
|
|
128
|
+
self.base_headers = {
|
|
129
|
+
"authority": "nemotron.one",
|
|
130
|
+
"accept": "*/*",
|
|
131
|
+
"accept-language": "en-US,en;q=0.9",
|
|
132
|
+
"content-type": "application/json",
|
|
133
|
+
"origin": "https://nemotron.one",
|
|
134
|
+
"sec-ch-ua": '"Chromium";v="136", "Not.A/Brand";v="99"',
|
|
135
|
+
"sec-ch-ua-mobile": "?0",
|
|
136
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
137
|
+
"user-agent": user_agent
|
|
138
|
+
}
|
|
139
|
+
self.session.headers.update(self.base_headers)
|
|
140
|
+
self.chat = Chat(self)
|
|
141
|
+
|
|
142
|
+
def _generate_random_email(self) -> str:
|
|
143
|
+
random_letter = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
|
144
|
+
random_string = ''.join(random.choice(random_letter) for _ in range(10))
|
|
145
|
+
return f"{random_string}@gmail.com"
|
|
146
|
+
|
|
147
|
+
def _generate_random_id(self) -> str:
|
|
148
|
+
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
|
|
149
|
+
random_letter = "abcdefghijklmnopqrstuvwxyz0123456789"
|
|
150
|
+
random_string = ''.join(random.choice(random_letter) for _ in range(8))
|
|
151
|
+
return f"cm{random_string}{timestamp[:10]}"
|
|
152
|
+
|
|
153
|
+
def _get_user_data(self) -> Dict[str, Any]:
|
|
154
|
+
current_time = datetime.datetime.now().isoformat()
|
|
155
|
+
return {
|
|
156
|
+
"name": "user",
|
|
157
|
+
"email": self._generate_random_email(),
|
|
158
|
+
"image": "https://lh3.googleusercontent.com/a/default-user=s96-c",
|
|
159
|
+
"id": self._generate_random_id(),
|
|
160
|
+
"password": None,
|
|
161
|
+
"emailVerified": None,
|
|
162
|
+
"credits": 100000000000,
|
|
163
|
+
"isPro": False,
|
|
164
|
+
"createdAt": current_time,
|
|
165
|
+
"updatedAt": current_time
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
def convert_model_name(self, model_alias: str) -> str:
|
|
169
|
+
"""
|
|
170
|
+
Convert model names to ones supported by NEMOTRON API.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
model_alias: Model name to convert
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
NEMOTRON model name for API payload
|
|
177
|
+
"""
|
|
178
|
+
# Accept only direct model names
|
|
179
|
+
if model_alias in self.AVAILABLE_MODELS:
|
|
180
|
+
return model_alias
|
|
181
|
+
|
|
182
|
+
# Case-insensitive matching
|
|
183
|
+
for m in self.AVAILABLE_MODELS:
|
|
184
|
+
if m.lower() == model_alias.lower():
|
|
185
|
+
return m
|
|
186
|
+
|
|
187
|
+
# Default to gpt4o if no match
|
|
188
|
+
print(f"Warning: Unknown model '{model_alias}'. Using 'gpt4o' instead.")
|
|
189
|
+
return "gpt4o"
|
|
190
|
+
|
|
191
|
+
def _internal_make_request(
|
|
192
|
+
self,
|
|
193
|
+
payload: Dict[str, Any],
|
|
194
|
+
stream: bool = False,
|
|
195
|
+
request_timeout: Optional[int] = None,
|
|
196
|
+
request_proxies: Optional[dict] = None
|
|
197
|
+
) -> Generator[str, None, None]:
|
|
198
|
+
request_headers = self.base_headers.copy()
|
|
199
|
+
request_headers["referer"] = f"https://nemotron.one/chat/{payload['model']}"
|
|
200
|
+
original_proxies = self.session.proxies.copy()
|
|
201
|
+
if request_proxies is not None:
|
|
202
|
+
self.session.proxies.update(request_proxies)
|
|
203
|
+
elif not self.session.proxies:
|
|
204
|
+
pass
|
|
205
|
+
else:
|
|
206
|
+
self.session.proxies = {}
|
|
207
|
+
try:
|
|
208
|
+
if stream:
|
|
209
|
+
with self.session.post(
|
|
210
|
+
self.API_BASE_URL,
|
|
211
|
+
headers=request_headers,
|
|
212
|
+
json=payload,
|
|
213
|
+
stream=True,
|
|
214
|
+
timeout=request_timeout if request_timeout is not None else self.timeout
|
|
215
|
+
) as response:
|
|
216
|
+
response.raise_for_status()
|
|
217
|
+
yield from sanitize_stream(
|
|
218
|
+
response.iter_content(chunk_size=1024),
|
|
219
|
+
to_json=False,
|
|
220
|
+
)
|
|
221
|
+
else:
|
|
222
|
+
response = self.session.post(
|
|
223
|
+
self.API_BASE_URL,
|
|
224
|
+
headers=request_headers,
|
|
225
|
+
json=payload,
|
|
226
|
+
timeout=request_timeout if request_timeout is not None else self.timeout
|
|
227
|
+
)
|
|
228
|
+
response.raise_for_status()
|
|
229
|
+
yield response.text
|
|
230
|
+
except requests.exceptions.RequestException as e:
|
|
231
|
+
raise exceptions.ProviderConnectionError(f"NEMOTRON API Connection error: {str(e)}")
|
|
232
|
+
except Exception as e:
|
|
233
|
+
raise RuntimeError(f"NEMOTRON API request unexpected error: {str(e)}")
|
|
234
|
+
finally:
|
|
235
|
+
self.session.proxies = original_proxies
|
|
236
|
+
@property
|
|
237
|
+
def models(self):
|
|
238
|
+
class _ModelList:
|
|
239
|
+
def list(inner_self):
|
|
240
|
+
return type(self).AVAILABLE_MODELS
|
|
241
241
|
return _ModelList()
|