webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
webscout/Provider/Nemotron.py
CHANGED
|
@@ -1,245 +1,245 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import random
|
|
3
|
-
import datetime
|
|
4
|
-
from typing import Any, Dict, Union, Generator
|
|
5
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
|
-
from webscout.AIbase import Provider
|
|
7
|
-
from webscout import exceptions
|
|
8
|
-
|
|
9
|
-
class NEMOTRON(Provider):
|
|
10
|
-
"""NEMOTRON provider for interacting with the nemotron.one API."""
|
|
11
|
-
url = "https://nemotron.one/api/chat"
|
|
12
|
-
required_auth = False
|
|
13
|
-
AVAILABLE_MODELS = [
|
|
14
|
-
"gpt4o",
|
|
15
|
-
"nemotron70b",
|
|
16
|
-
]
|
|
17
|
-
|
|
18
|
-
def __init__(
|
|
19
|
-
self,
|
|
20
|
-
is_conversation: bool = True,
|
|
21
|
-
max_tokens: int = 8000,
|
|
22
|
-
timeout: int = 30,
|
|
23
|
-
intro: str = None,
|
|
24
|
-
filepath: str = None,
|
|
25
|
-
update_file: bool = True,
|
|
26
|
-
proxies: dict = {},
|
|
27
|
-
history_offset: int = 10250,
|
|
28
|
-
act: str = None,
|
|
29
|
-
model: str = "gpt4o"
|
|
30
|
-
):
|
|
31
|
-
"""Initialize NEMOTRON with configuration options."""
|
|
32
|
-
self.session = requests.Session()
|
|
33
|
-
self.max_tokens = max_tokens
|
|
34
|
-
self.is_conversation = is_conversation
|
|
35
|
-
self.timeout = timeout
|
|
36
|
-
self.last_response = {}
|
|
37
|
-
self.model = self.get_model(model)
|
|
38
|
-
|
|
39
|
-
self.headers = {
|
|
40
|
-
"authority": "nemotron.one",
|
|
41
|
-
"accept": "*/*",
|
|
42
|
-
"accept-language": "en-US,en;q=0.9",
|
|
43
|
-
"content-type": "application/json",
|
|
44
|
-
"origin": "https://nemotron.one",
|
|
45
|
-
"referer": f"https://nemotron.one/chat/{self.model}",
|
|
46
|
-
"sec-ch-ua": '"Chromium";v="136", "Not.A/Brand";v="99"',
|
|
47
|
-
"sec-ch-ua-mobile": "?0",
|
|
48
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
49
|
-
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36"
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
self.__available_optimizers = (
|
|
53
|
-
method for method in dir(Optimizers)
|
|
54
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
Conversation.intro = (
|
|
58
|
-
AwesomePrompts().get_act(
|
|
59
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
60
|
-
)
|
|
61
|
-
if act
|
|
62
|
-
else intro or Conversation.intro
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
self.conversation = Conversation(
|
|
66
|
-
is_conversation, self.max_tokens, filepath, update_file
|
|
67
|
-
)
|
|
68
|
-
self.conversation.history_offset = history_offset
|
|
69
|
-
self.session.proxies = proxies
|
|
70
|
-
|
|
71
|
-
@staticmethod
|
|
72
|
-
def _generate_random_email() -> str:
|
|
73
|
-
"""Generate a random email address."""
|
|
74
|
-
random_letter = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
|
75
|
-
random_string = ''.join(random.choice(random_letter) for _ in range(10))
|
|
76
|
-
return f"{random_string}@gmail.com"
|
|
77
|
-
|
|
78
|
-
@staticmethod
|
|
79
|
-
def _generate_random_id() -> str:
|
|
80
|
-
"""Generate a random user ID."""
|
|
81
|
-
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
|
|
82
|
-
random_letter = "abcdefghijklmnopqrstuvwxyz0123456789"
|
|
83
|
-
random_string = ''.join(random.choice(random_letter) for _ in range(8))
|
|
84
|
-
return f"cm{random_string}{timestamp[:10]}"
|
|
85
|
-
|
|
86
|
-
@classmethod
|
|
87
|
-
def get_model(cls, model: str) -> str:
|
|
88
|
-
"""Resolve model name from alias."""
|
|
89
|
-
if model in cls.AVAILABLE_MODELS:
|
|
90
|
-
return model # Simply return the model name if it's in the list
|
|
91
|
-
raise ValueError(f"Unknown model: {model}. Available models: {', '.join(cls.AVAILABLE_MODELS)}")
|
|
92
|
-
|
|
93
|
-
def _get_user_data(self) -> Dict[str, Any]:
|
|
94
|
-
"""Generate user data for the request."""
|
|
95
|
-
current_time = datetime.datetime.now().isoformat()
|
|
96
|
-
return {
|
|
97
|
-
"name": "user",
|
|
98
|
-
"email": self._generate_random_email(),
|
|
99
|
-
"image": "https://lh3.googleusercontent.com/a/default-user=s96-c",
|
|
100
|
-
"id": self._generate_random_id(),
|
|
101
|
-
"password": None,
|
|
102
|
-
"emailVerified": None,
|
|
103
|
-
"credits": 100000000000,
|
|
104
|
-
"isPro": False,
|
|
105
|
-
"createdAt": current_time,
|
|
106
|
-
"updatedAt": current_time
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
def _make_request(
|
|
110
|
-
self,
|
|
111
|
-
message: str,
|
|
112
|
-
stream: bool = False,
|
|
113
|
-
raw: bool = False
|
|
114
|
-
) -> Generator[str, None, None]:
|
|
115
|
-
"""Make request to NEMOTRON API."""
|
|
116
|
-
payload = {
|
|
117
|
-
"content": message,
|
|
118
|
-
"imageSrc": "",
|
|
119
|
-
"model": self.model,
|
|
120
|
-
"user": self._get_user_data(),
|
|
121
|
-
"conversationId": ""
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
try:
|
|
125
|
-
if stream:
|
|
126
|
-
with self.session.post(
|
|
127
|
-
self.url,
|
|
128
|
-
headers=self.headers,
|
|
129
|
-
json=payload,
|
|
130
|
-
stream=True,
|
|
131
|
-
timeout=self.timeout
|
|
132
|
-
) as response:
|
|
133
|
-
response.raise_for_status()
|
|
134
|
-
buffer = ""
|
|
135
|
-
chunk_size = 32
|
|
136
|
-
for chunk in response.iter_content(chunk_size=chunk_size):
|
|
137
|
-
if not chunk:
|
|
138
|
-
continue
|
|
139
|
-
text = chunk.decode(errors="ignore")
|
|
140
|
-
buffer += text
|
|
141
|
-
while len(buffer) >= chunk_size:
|
|
142
|
-
out = buffer[:chunk_size]
|
|
143
|
-
buffer = buffer[chunk_size:]
|
|
144
|
-
if out.strip():
|
|
145
|
-
if raw:
|
|
146
|
-
yield out
|
|
147
|
-
else:
|
|
148
|
-
yield out
|
|
149
|
-
if buffer.strip():
|
|
150
|
-
if raw:
|
|
151
|
-
yield buffer
|
|
152
|
-
else:
|
|
153
|
-
yield buffer
|
|
154
|
-
else:
|
|
155
|
-
response = self.session.post(
|
|
156
|
-
self.url,
|
|
157
|
-
headers=self.headers,
|
|
158
|
-
json=payload,
|
|
159
|
-
timeout=self.timeout
|
|
160
|
-
)
|
|
161
|
-
response.raise_for_status()
|
|
162
|
-
if raw:
|
|
163
|
-
yield response.text
|
|
164
|
-
else:
|
|
165
|
-
yield response.text
|
|
166
|
-
|
|
167
|
-
except requests.exceptions.RequestException as e:
|
|
168
|
-
raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
|
|
169
|
-
|
|
170
|
-
def ask(
|
|
171
|
-
self,
|
|
172
|
-
prompt: str,
|
|
173
|
-
stream: bool = False,
|
|
174
|
-
raw: bool = False,
|
|
175
|
-
optimizer: str = None,
|
|
176
|
-
conversationally: bool = False,
|
|
177
|
-
) -> Union[Dict[str, str], Generator[Dict[str, str], None, None]]:
|
|
178
|
-
"""Send a prompt to NEMOTRON API and return the response."""
|
|
179
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
180
|
-
if optimizer:
|
|
181
|
-
if optimizer in self.__available_optimizers:
|
|
182
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
183
|
-
conversation_prompt if conversationally else prompt
|
|
184
|
-
)
|
|
185
|
-
else:
|
|
186
|
-
raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
|
|
187
|
-
|
|
188
|
-
def for_stream():
|
|
189
|
-
for text in self._make_request(conversation_prompt, stream=True, raw=raw):
|
|
190
|
-
if raw:
|
|
191
|
-
yield text
|
|
192
|
-
else:
|
|
193
|
-
yield {"text": text}
|
|
194
|
-
|
|
195
|
-
def for_non_stream():
|
|
196
|
-
response_text = next(self._make_request(conversation_prompt, stream=False, raw=raw))
|
|
197
|
-
if raw:
|
|
198
|
-
self.last_response = response_text
|
|
199
|
-
return response_text
|
|
200
|
-
else:
|
|
201
|
-
self.last_response = {"text": response_text}
|
|
202
|
-
return self.last_response
|
|
203
|
-
|
|
204
|
-
return for_stream() if stream else for_non_stream()
|
|
205
|
-
|
|
206
|
-
def chat(
|
|
207
|
-
self,
|
|
208
|
-
prompt: str,
|
|
209
|
-
stream: bool = False,
|
|
210
|
-
optimizer: str = None,
|
|
211
|
-
conversationally: bool = False,
|
|
212
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
213
|
-
"""Generate response as string."""
|
|
214
|
-
def for_stream():
|
|
215
|
-
for response in self.ask(
|
|
216
|
-
prompt,
|
|
217
|
-
stream=True,
|
|
218
|
-
optimizer=optimizer,
|
|
219
|
-
conversationally=conversationally
|
|
220
|
-
):
|
|
221
|
-
yield self.get_message(response)
|
|
222
|
-
|
|
223
|
-
def for_non_stream():
|
|
224
|
-
return self.get_message(
|
|
225
|
-
self.ask(
|
|
226
|
-
prompt,
|
|
227
|
-
stream=False,
|
|
228
|
-
optimizer=optimizer,
|
|
229
|
-
conversationally=conversationally,
|
|
230
|
-
)
|
|
231
|
-
)
|
|
232
|
-
|
|
233
|
-
return for_stream() if stream else for_non_stream()
|
|
234
|
-
|
|
235
|
-
def get_message(self, response: Dict[str, Any]) -> str:
|
|
236
|
-
"""Extract message from response dictionary."""
|
|
237
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
238
|
-
return response["text"]
|
|
239
|
-
|
|
240
|
-
if __name__ == "__main__":
|
|
241
|
-
# Example usage
|
|
242
|
-
nemotron = NEMOTRON()
|
|
243
|
-
response = nemotron.chat("write me about humans in points", stream=True)
|
|
244
|
-
for part in response:
|
|
1
|
+
import requests
|
|
2
|
+
import random
|
|
3
|
+
import datetime
|
|
4
|
+
from typing import Any, Dict, Union, Generator
|
|
5
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
|
+
from webscout.AIbase import Provider
|
|
7
|
+
from webscout import exceptions
|
|
8
|
+
|
|
9
|
+
class NEMOTRON(Provider):
|
|
10
|
+
"""NEMOTRON provider for interacting with the nemotron.one API."""
|
|
11
|
+
url = "https://nemotron.one/api/chat"
|
|
12
|
+
required_auth = False
|
|
13
|
+
AVAILABLE_MODELS = [
|
|
14
|
+
"gpt4o",
|
|
15
|
+
"nemotron70b",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
is_conversation: bool = True,
|
|
21
|
+
max_tokens: int = 8000,
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
intro: str = None,
|
|
24
|
+
filepath: str = None,
|
|
25
|
+
update_file: bool = True,
|
|
26
|
+
proxies: dict = {},
|
|
27
|
+
history_offset: int = 10250,
|
|
28
|
+
act: str = None,
|
|
29
|
+
model: str = "gpt4o"
|
|
30
|
+
):
|
|
31
|
+
"""Initialize NEMOTRON with configuration options."""
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.max_tokens = max_tokens
|
|
34
|
+
self.is_conversation = is_conversation
|
|
35
|
+
self.timeout = timeout
|
|
36
|
+
self.last_response = {}
|
|
37
|
+
self.model = self.get_model(model)
|
|
38
|
+
|
|
39
|
+
self.headers = {
|
|
40
|
+
"authority": "nemotron.one",
|
|
41
|
+
"accept": "*/*",
|
|
42
|
+
"accept-language": "en-US,en;q=0.9",
|
|
43
|
+
"content-type": "application/json",
|
|
44
|
+
"origin": "https://nemotron.one",
|
|
45
|
+
"referer": f"https://nemotron.one/chat/{self.model}",
|
|
46
|
+
"sec-ch-ua": '"Chromium";v="136", "Not.A/Brand";v="99"',
|
|
47
|
+
"sec-ch-ua-mobile": "?0",
|
|
48
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
49
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36"
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
self.__available_optimizers = (
|
|
53
|
+
method for method in dir(Optimizers)
|
|
54
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
Conversation.intro = (
|
|
58
|
+
AwesomePrompts().get_act(
|
|
59
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
60
|
+
)
|
|
61
|
+
if act
|
|
62
|
+
else intro or Conversation.intro
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
self.conversation = Conversation(
|
|
66
|
+
is_conversation, self.max_tokens, filepath, update_file
|
|
67
|
+
)
|
|
68
|
+
self.conversation.history_offset = history_offset
|
|
69
|
+
self.session.proxies = proxies
|
|
70
|
+
|
|
71
|
+
@staticmethod
|
|
72
|
+
def _generate_random_email() -> str:
|
|
73
|
+
"""Generate a random email address."""
|
|
74
|
+
random_letter = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
|
75
|
+
random_string = ''.join(random.choice(random_letter) for _ in range(10))
|
|
76
|
+
return f"{random_string}@gmail.com"
|
|
77
|
+
|
|
78
|
+
@staticmethod
|
|
79
|
+
def _generate_random_id() -> str:
|
|
80
|
+
"""Generate a random user ID."""
|
|
81
|
+
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
|
|
82
|
+
random_letter = "abcdefghijklmnopqrstuvwxyz0123456789"
|
|
83
|
+
random_string = ''.join(random.choice(random_letter) for _ in range(8))
|
|
84
|
+
return f"cm{random_string}{timestamp[:10]}"
|
|
85
|
+
|
|
86
|
+
@classmethod
|
|
87
|
+
def get_model(cls, model: str) -> str:
|
|
88
|
+
"""Resolve model name from alias."""
|
|
89
|
+
if model in cls.AVAILABLE_MODELS:
|
|
90
|
+
return model # Simply return the model name if it's in the list
|
|
91
|
+
raise ValueError(f"Unknown model: {model}. Available models: {', '.join(cls.AVAILABLE_MODELS)}")
|
|
92
|
+
|
|
93
|
+
def _get_user_data(self) -> Dict[str, Any]:
|
|
94
|
+
"""Generate user data for the request."""
|
|
95
|
+
current_time = datetime.datetime.now().isoformat()
|
|
96
|
+
return {
|
|
97
|
+
"name": "user",
|
|
98
|
+
"email": self._generate_random_email(),
|
|
99
|
+
"image": "https://lh3.googleusercontent.com/a/default-user=s96-c",
|
|
100
|
+
"id": self._generate_random_id(),
|
|
101
|
+
"password": None,
|
|
102
|
+
"emailVerified": None,
|
|
103
|
+
"credits": 100000000000,
|
|
104
|
+
"isPro": False,
|
|
105
|
+
"createdAt": current_time,
|
|
106
|
+
"updatedAt": current_time
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
def _make_request(
|
|
110
|
+
self,
|
|
111
|
+
message: str,
|
|
112
|
+
stream: bool = False,
|
|
113
|
+
raw: bool = False
|
|
114
|
+
) -> Generator[str, None, None]:
|
|
115
|
+
"""Make request to NEMOTRON API."""
|
|
116
|
+
payload = {
|
|
117
|
+
"content": message,
|
|
118
|
+
"imageSrc": "",
|
|
119
|
+
"model": self.model,
|
|
120
|
+
"user": self._get_user_data(),
|
|
121
|
+
"conversationId": ""
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
if stream:
|
|
126
|
+
with self.session.post(
|
|
127
|
+
self.url,
|
|
128
|
+
headers=self.headers,
|
|
129
|
+
json=payload,
|
|
130
|
+
stream=True,
|
|
131
|
+
timeout=self.timeout
|
|
132
|
+
) as response:
|
|
133
|
+
response.raise_for_status()
|
|
134
|
+
buffer = ""
|
|
135
|
+
chunk_size = 32
|
|
136
|
+
for chunk in response.iter_content(chunk_size=chunk_size):
|
|
137
|
+
if not chunk:
|
|
138
|
+
continue
|
|
139
|
+
text = chunk.decode(errors="ignore")
|
|
140
|
+
buffer += text
|
|
141
|
+
while len(buffer) >= chunk_size:
|
|
142
|
+
out = buffer[:chunk_size]
|
|
143
|
+
buffer = buffer[chunk_size:]
|
|
144
|
+
if out.strip():
|
|
145
|
+
if raw:
|
|
146
|
+
yield out
|
|
147
|
+
else:
|
|
148
|
+
yield out
|
|
149
|
+
if buffer.strip():
|
|
150
|
+
if raw:
|
|
151
|
+
yield buffer
|
|
152
|
+
else:
|
|
153
|
+
yield buffer
|
|
154
|
+
else:
|
|
155
|
+
response = self.session.post(
|
|
156
|
+
self.url,
|
|
157
|
+
headers=self.headers,
|
|
158
|
+
json=payload,
|
|
159
|
+
timeout=self.timeout
|
|
160
|
+
)
|
|
161
|
+
response.raise_for_status()
|
|
162
|
+
if raw:
|
|
163
|
+
yield response.text
|
|
164
|
+
else:
|
|
165
|
+
yield response.text
|
|
166
|
+
|
|
167
|
+
except requests.exceptions.RequestException as e:
|
|
168
|
+
raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
|
|
169
|
+
|
|
170
|
+
def ask(
|
|
171
|
+
self,
|
|
172
|
+
prompt: str,
|
|
173
|
+
stream: bool = False,
|
|
174
|
+
raw: bool = False,
|
|
175
|
+
optimizer: str = None,
|
|
176
|
+
conversationally: bool = False,
|
|
177
|
+
) -> Union[Dict[str, str], Generator[Dict[str, str], None, None]]:
|
|
178
|
+
"""Send a prompt to NEMOTRON API and return the response."""
|
|
179
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
180
|
+
if optimizer:
|
|
181
|
+
if optimizer in self.__available_optimizers:
|
|
182
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
183
|
+
conversation_prompt if conversationally else prompt
|
|
184
|
+
)
|
|
185
|
+
else:
|
|
186
|
+
raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
|
|
187
|
+
|
|
188
|
+
def for_stream():
|
|
189
|
+
for text in self._make_request(conversation_prompt, stream=True, raw=raw):
|
|
190
|
+
if raw:
|
|
191
|
+
yield text
|
|
192
|
+
else:
|
|
193
|
+
yield {"text": text}
|
|
194
|
+
|
|
195
|
+
def for_non_stream():
|
|
196
|
+
response_text = next(self._make_request(conversation_prompt, stream=False, raw=raw))
|
|
197
|
+
if raw:
|
|
198
|
+
self.last_response = response_text
|
|
199
|
+
return response_text
|
|
200
|
+
else:
|
|
201
|
+
self.last_response = {"text": response_text}
|
|
202
|
+
return self.last_response
|
|
203
|
+
|
|
204
|
+
return for_stream() if stream else for_non_stream()
|
|
205
|
+
|
|
206
|
+
def chat(
|
|
207
|
+
self,
|
|
208
|
+
prompt: str,
|
|
209
|
+
stream: bool = False,
|
|
210
|
+
optimizer: str = None,
|
|
211
|
+
conversationally: bool = False,
|
|
212
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
213
|
+
"""Generate response as string."""
|
|
214
|
+
def for_stream():
|
|
215
|
+
for response in self.ask(
|
|
216
|
+
prompt,
|
|
217
|
+
stream=True,
|
|
218
|
+
optimizer=optimizer,
|
|
219
|
+
conversationally=conversationally
|
|
220
|
+
):
|
|
221
|
+
yield self.get_message(response)
|
|
222
|
+
|
|
223
|
+
def for_non_stream():
|
|
224
|
+
return self.get_message(
|
|
225
|
+
self.ask(
|
|
226
|
+
prompt,
|
|
227
|
+
stream=False,
|
|
228
|
+
optimizer=optimizer,
|
|
229
|
+
conversationally=conversationally,
|
|
230
|
+
)
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
return for_stream() if stream else for_non_stream()
|
|
234
|
+
|
|
235
|
+
def get_message(self, response: Dict[str, Any]) -> str:
|
|
236
|
+
"""Extract message from response dictionary."""
|
|
237
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
238
|
+
return response["text"]
|
|
239
|
+
|
|
240
|
+
if __name__ == "__main__":
|
|
241
|
+
# Example usage
|
|
242
|
+
nemotron = NEMOTRON()
|
|
243
|
+
response = nemotron.chat("write me about humans in points", stream=True)
|
|
244
|
+
for part in response:
|
|
245
245
|
print(part, end="", flush=True)
|