webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -58
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +33 -11
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -314
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -315
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -342
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -207
- webscout/Provider/Deepinfra.py +370 -369
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -387
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -162
- webscout/Provider/GeminiProxy.py +167 -166
- webscout/Provider/GithubChat.py +371 -370
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -379
- webscout/Provider/Jadve.py +282 -297
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +206 -384
- webscout/Provider/LambdaChat.py +423 -425
- webscout/Provider/Nemotron.py +244 -245
- webscout/Provider/Netwrck.py +248 -247
- webscout/Provider/OLLAMA.py +395 -394
- webscout/Provider/OPENAI/Cloudflare.py +394 -395
- webscout/Provider/OPENAI/FalconH1.py +452 -457
- webscout/Provider/OPENAI/FreeGemini.py +297 -299
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
- webscout/Provider/OPENAI/NEMOTRON.py +241 -244
- webscout/Provider/OPENAI/PI.py +428 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +466 -467
- webscout/Provider/OPENAI/__init__.py +33 -59
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +249 -269
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +593 -588
- webscout/Provider/OPENAI/chatgptclone.py +521 -524
- webscout/Provider/OPENAI/chatsandbox.py +202 -177
- webscout/Provider/OPENAI/deepinfra.py +319 -315
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -311
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -354
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -529
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -348
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +244 -496
- webscout/Provider/PI.py +405 -404
- webscout/Provider/Perplexitylabs.py +430 -431
- webscout/Provider/QwenLM.py +272 -254
- webscout/Provider/STT/__init__.py +32 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -12
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -10
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -236
- webscout/Provider/TextPollinationsAI.py +311 -318
- webscout/Provider/TogetherAI.py +356 -357
- webscout/Provider/TwoAI.py +313 -569
- webscout/Provider/TypliAI.py +312 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
- webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +251 -250
- webscout/Provider/VercelAI.py +256 -255
- webscout/Provider/WiseCat.py +232 -231
- webscout/Provider/WrDoChat.py +367 -366
- webscout/Provider/__init__.py +33 -86
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -334
- webscout/Provider/cerebras.py +446 -340
- webscout/Provider/chatglm.py +394 -214
- webscout/Provider/cleeai.py +211 -212
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -265
- webscout/Provider/julius.py +223 -222
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +384 -457
- webscout/Provider/searchchat.py +292 -291
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -364
- webscout/Provider/turboseek.py +274 -265
- webscout/Provider/typefully.py +208 -207
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +372 -371
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/Qwen3.py +0 -304
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- webscout/tempid.py +0 -128
- webscout-8.3.6.dist-info/RECORD +0 -327
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,295 +1,295 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import base64
|
|
3
|
-
import random
|
|
4
|
-
import json
|
|
5
|
-
from typing import Union, Dict, Any, Optional, Generator
|
|
6
|
-
from urllib import response
|
|
7
|
-
|
|
8
|
-
from curl_cffi import CurlError
|
|
9
|
-
from curl_cffi.requests import Session
|
|
10
|
-
from curl_cffi.const import CurlHttpVersion
|
|
11
|
-
|
|
12
|
-
from webscout.AIutel import Optimizers
|
|
13
|
-
from webscout.AIutel import Conversation
|
|
14
|
-
from webscout.AIutel import AwesomePrompts
|
|
15
|
-
from webscout.AIbase import Provider
|
|
16
|
-
from webscout import exceptions
|
|
17
|
-
from webscout.litagent import LitAgent
|
|
18
|
-
|
|
19
|
-
class GizAI(Provider):
|
|
20
|
-
"""
|
|
21
|
-
A class to interact with the GizAI API.
|
|
22
|
-
|
|
23
|
-
Attributes:
|
|
24
|
-
system_prompt (str): The system prompt to define the assistant's role.
|
|
25
|
-
|
|
26
|
-
Examples:
|
|
27
|
-
>>> from webscout.Provider.GizAI import GizAI
|
|
28
|
-
>>> ai = GizAI()
|
|
29
|
-
>>> response = ai.chat("What's the weather today?")
|
|
30
|
-
>>> print(response)
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
AVAILABLE_MODELS = [
|
|
34
|
-
"azure-gpt-4-1",
|
|
35
|
-
"chat-gpt4",
|
|
36
|
-
"chat-grok-2",
|
|
37
|
-
"chat-o4-mini",
|
|
38
|
-
"chat-o4-mini-high",
|
|
39
|
-
"chat-o4-mini-medium",
|
|
40
|
-
"claude-haiku",
|
|
41
|
-
"claude-sonnet",
|
|
42
|
-
"deepinfra-llama-4-maverick",
|
|
43
|
-
"deepseek",
|
|
44
|
-
"deepseek-r1-distill-llama-70b",
|
|
45
|
-
"gemini-2.0-flash-lite",
|
|
46
|
-
"gemini-2.5-flash",
|
|
47
|
-
"gemini-2.5-pro",
|
|
48
|
-
"gpt-4-1-mini",
|
|
49
|
-
"gpt-4-1-nano",
|
|
50
|
-
"gpt-4o-image",
|
|
51
|
-
"hyperbolic-deepseek-r1",
|
|
52
|
-
"llama-3-70b",
|
|
53
|
-
"llama-4-scout",
|
|
54
|
-
"o3",
|
|
55
|
-
"phi-4",
|
|
56
|
-
"qwq-32b"
|
|
57
|
-
]
|
|
58
|
-
|
|
59
|
-
def __init__(
|
|
60
|
-
self,
|
|
61
|
-
is_conversation: bool = True,
|
|
62
|
-
max_tokens: int = 2049,
|
|
63
|
-
timeout: int = 30,
|
|
64
|
-
intro: str = None,
|
|
65
|
-
filepath: str = None,
|
|
66
|
-
update_file: bool = True,
|
|
67
|
-
proxies: dict = {},
|
|
68
|
-
history_offset: int = 10250,
|
|
69
|
-
act: str = None,
|
|
70
|
-
model: str = "gemini-2.0-flash-lite",
|
|
71
|
-
system_prompt: str = "You are a helpful assistant."
|
|
72
|
-
):
|
|
73
|
-
"""Initializes the GizAI API client."""
|
|
74
|
-
if model not in self.AVAILABLE_MODELS:
|
|
75
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
76
|
-
|
|
77
|
-
self.api_url = "https://app.giz.ai/api/data/users/inferenceServer.infer"
|
|
78
|
-
|
|
79
|
-
# Initialize LitAgent for user-agent generation
|
|
80
|
-
self.agent = LitAgent()
|
|
81
|
-
|
|
82
|
-
# Initialize curl_cffi Session
|
|
83
|
-
self.session = Session()
|
|
84
|
-
|
|
85
|
-
# Set up the headers
|
|
86
|
-
self.headers = {
|
|
87
|
-
"accept": "application/json, text/plain, */*",
|
|
88
|
-
"content-type": "application/json",
|
|
89
|
-
"user-agent": self.agent.random(),
|
|
90
|
-
"origin": "https://app.giz.ai",
|
|
91
|
-
"referer": "https://app.giz.ai/",
|
|
92
|
-
"sec-fetch-dest": "empty",
|
|
93
|
-
"sec-fetch-mode": "cors",
|
|
94
|
-
"sec-fetch-site": "same-origin"
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
# Update session headers and proxies
|
|
98
|
-
self.session.headers.update(self.headers)
|
|
99
|
-
self.session.proxies = proxies
|
|
100
|
-
|
|
101
|
-
# Store configuration
|
|
102
|
-
self.system_prompt = system_prompt
|
|
103
|
-
self.is_conversation = is_conversation
|
|
104
|
-
self.max_tokens_to_sample = max_tokens
|
|
105
|
-
self.timeout = timeout
|
|
106
|
-
self.last_response = {}
|
|
107
|
-
self.model = model
|
|
108
|
-
|
|
109
|
-
self.__available_optimizers = (
|
|
110
|
-
method
|
|
111
|
-
for method in dir(Optimizers)
|
|
112
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
Conversation.intro = (
|
|
116
|
-
AwesomePrompts().get_act(
|
|
117
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
118
|
-
)
|
|
119
|
-
if act
|
|
120
|
-
else intro or Conversation.intro
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
self.conversation = Conversation(
|
|
124
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
125
|
-
)
|
|
126
|
-
self.conversation.history_offset = history_offset
|
|
127
|
-
|
|
128
|
-
def _generate_id(self, length: int = 21) -> str:
|
|
129
|
-
"""Generates a random URL-safe base64 string."""
|
|
130
|
-
random_bytes = os.urandom(length * 2) # Generate more bytes initially
|
|
131
|
-
b64_encoded = base64.urlsafe_b64encode(random_bytes).decode('utf-8')
|
|
132
|
-
return b64_encoded[:length]
|
|
133
|
-
|
|
134
|
-
def _get_random_ip(self) -> str:
|
|
135
|
-
"""Generates a random IPv4 address string."""
|
|
136
|
-
return f"{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
|
137
|
-
|
|
138
|
-
def ask(
|
|
139
|
-
self,
|
|
140
|
-
prompt: str,
|
|
141
|
-
stream: bool = False, # Parameter kept for compatibility but not used
|
|
142
|
-
raw: bool = False,
|
|
143
|
-
optimizer: str = None,
|
|
144
|
-
conversationally: bool = False,
|
|
145
|
-
) -> Dict[str, Any]:
|
|
146
|
-
"""
|
|
147
|
-
Sends a prompt to the GizAI API and returns the response.
|
|
148
|
-
|
|
149
|
-
Args:
|
|
150
|
-
prompt (str): The prompt to send to the API.
|
|
151
|
-
stream (bool): Not supported by GizAI, kept for compatibility.
|
|
152
|
-
raw (bool): Whether to return the raw response.
|
|
153
|
-
optimizer (str): Optimizer to use for the prompt.
|
|
154
|
-
conversationally (bool): Whether to generate the prompt conversationally.
|
|
155
|
-
|
|
156
|
-
Returns:
|
|
157
|
-
Dict[str, Any]: The API response.
|
|
158
|
-
|
|
159
|
-
Examples:
|
|
160
|
-
>>> ai = GizAI()
|
|
161
|
-
>>> response = ai.ask("Tell me a joke!")
|
|
162
|
-
"""
|
|
163
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
164
|
-
if optimizer:
|
|
165
|
-
if optimizer in self.__available_optimizers:
|
|
166
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
167
|
-
conversation_prompt if conversationally else prompt
|
|
168
|
-
)
|
|
169
|
-
else:
|
|
170
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
171
|
-
|
|
172
|
-
# Generate random IDs for request
|
|
173
|
-
instance_id = self._generate_id()
|
|
174
|
-
subscribe_id = self._generate_id()
|
|
175
|
-
x_forwarded_for = self._get_random_ip()
|
|
176
|
-
|
|
177
|
-
# Set up request body - GizAI doesn't support streaming
|
|
178
|
-
request_body = {
|
|
179
|
-
"model": "chat",
|
|
180
|
-
"baseModel": self.model, # Use the specific model ID here
|
|
181
|
-
"input": {
|
|
182
|
-
"messages": [{
|
|
183
|
-
"type": "human",
|
|
184
|
-
"content": conversation_prompt
|
|
185
|
-
}],
|
|
186
|
-
"mode": "plan"
|
|
187
|
-
},
|
|
188
|
-
"noStream": True,
|
|
189
|
-
"instanceId": instance_id,
|
|
190
|
-
"subscribeId": subscribe_id
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
# Combine default headers with the dynamic x-forwarded-for header
|
|
194
|
-
request_headers = {**self.headers, "x-forwarded-for": x_forwarded_for}
|
|
195
|
-
|
|
196
|
-
try:
|
|
197
|
-
# Use curl_cffi session post with impersonate
|
|
198
|
-
response = self.session.post(
|
|
199
|
-
self.api_url,
|
|
200
|
-
headers=request_headers,
|
|
201
|
-
json=request_body,
|
|
202
|
-
timeout=self.timeout,
|
|
203
|
-
impersonate="chrome120", # Use a common impersonation profile
|
|
204
|
-
http_version=CurlHttpVersion.V2_0 # Use HTTP/2
|
|
205
|
-
)
|
|
206
|
-
response.raise_for_status() # Check for HTTP errors
|
|
207
|
-
|
|
208
|
-
# Process the response
|
|
209
|
-
try:
|
|
210
|
-
response_json = response.json()
|
|
211
|
-
# GizAI responses have "status" and "output" fields
|
|
212
|
-
if response_json.get("status") == "completed" and "output" in response_json:
|
|
213
|
-
content = response_json["output"]
|
|
214
|
-
else:
|
|
215
|
-
content = ""
|
|
216
|
-
# Try to extract content from any available field that might contain the response
|
|
217
|
-
for key, value in response_json.items():
|
|
218
|
-
if isinstance(value, str) and len(value) > 10:
|
|
219
|
-
content = value
|
|
220
|
-
break
|
|
221
|
-
except json.JSONDecodeError:
|
|
222
|
-
# Handle case where response is not valid JSON
|
|
223
|
-
content = response.text
|
|
224
|
-
|
|
225
|
-
# Update conversation history
|
|
226
|
-
self.last_response = {"text": content}
|
|
227
|
-
self.conversation.update_chat_history(prompt, content)
|
|
228
|
-
|
|
229
|
-
return self.last_response if not raw else content
|
|
230
|
-
|
|
231
|
-
except CurlError as e:
|
|
232
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}")
|
|
233
|
-
except Exception as e:
|
|
234
|
-
error_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
235
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {error_text}")
|
|
236
|
-
|
|
237
|
-
def chat(
|
|
238
|
-
self,
|
|
239
|
-
prompt: str,
|
|
240
|
-
stream: bool = False, # Parameter kept for compatibility but not used
|
|
241
|
-
optimizer: str = None,
|
|
242
|
-
conversationally: bool = False,
|
|
243
|
-
) -> 'Generator[str, None, None]':
|
|
244
|
-
"""
|
|
245
|
-
Generates a response from the GizAI API.
|
|
246
|
-
|
|
247
|
-
Args:
|
|
248
|
-
prompt (str): The prompt to send to the API.
|
|
249
|
-
stream (bool): Not supported by GizAI, kept for compatibility.
|
|
250
|
-
optimizer (str): Optimizer to use for the prompt.
|
|
251
|
-
conversationally (bool): Whether to generate the prompt conversationally.
|
|
252
|
-
|
|
253
|
-
Returns:
|
|
254
|
-
Generator[str, None, None]: The API response text as a generator.
|
|
255
|
-
|
|
256
|
-
Examples:
|
|
257
|
-
>>> ai = GizAI()
|
|
258
|
-
>>> response = ai.chat("What's the weather today?")
|
|
259
|
-
"""
|
|
260
|
-
# GizAI doesn't support streaming, so ignore the stream parameter
|
|
261
|
-
response_data = self.ask(
|
|
262
|
-
prompt, stream=False, raw=False,
|
|
263
|
-
optimizer=optimizer, conversationally=conversationally
|
|
264
|
-
)
|
|
265
|
-
result = self.get_message(response_data)
|
|
266
|
-
if stream:
|
|
267
|
-
yield result
|
|
268
|
-
else:
|
|
269
|
-
return result
|
|
270
|
-
|
|
271
|
-
def get_message(self, response: Union[dict, str]) -> str:
|
|
272
|
-
"""
|
|
273
|
-
Extracts the message from the API response.
|
|
274
|
-
|
|
275
|
-
Args:
|
|
276
|
-
response (Union[dict, str]): The API response.
|
|
277
|
-
|
|
278
|
-
Returns:
|
|
279
|
-
str: The message content.
|
|
280
|
-
|
|
281
|
-
Examples:
|
|
282
|
-
>>> ai = GizAI()
|
|
283
|
-
>>> response = ai.ask("Tell me a joke!")
|
|
284
|
-
>>> message = ai.get_message(response)
|
|
285
|
-
"""
|
|
286
|
-
if isinstance(response, str):
|
|
287
|
-
return response
|
|
288
|
-
assert isinstance(response, dict), "Response should be either dict or str"
|
|
289
|
-
return response.get("text", "")
|
|
290
|
-
|
|
291
|
-
if __name__ == "__main__":
|
|
292
|
-
ai = GizAI()
|
|
293
|
-
response = ai.chat("Hello, how are you?", stream=True)
|
|
294
|
-
for chunk in response:
|
|
1
|
+
import os
|
|
2
|
+
import base64
|
|
3
|
+
import random
|
|
4
|
+
import json
|
|
5
|
+
from typing import Union, Dict, Any, Optional, Generator
|
|
6
|
+
from urllib import response
|
|
7
|
+
|
|
8
|
+
from curl_cffi import CurlError
|
|
9
|
+
from curl_cffi.requests import Session
|
|
10
|
+
from curl_cffi.const import CurlHttpVersion
|
|
11
|
+
|
|
12
|
+
from webscout.AIutel import Optimizers
|
|
13
|
+
from webscout.AIutel import Conversation
|
|
14
|
+
from webscout.AIutel import AwesomePrompts
|
|
15
|
+
from webscout.AIbase import Provider
|
|
16
|
+
from webscout import exceptions
|
|
17
|
+
from webscout.litagent import LitAgent
|
|
18
|
+
|
|
19
|
+
class GizAI(Provider):
|
|
20
|
+
"""
|
|
21
|
+
A class to interact with the GizAI API.
|
|
22
|
+
|
|
23
|
+
Attributes:
|
|
24
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
25
|
+
|
|
26
|
+
Examples:
|
|
27
|
+
>>> from webscout.Provider.GizAI import GizAI
|
|
28
|
+
>>> ai = GizAI()
|
|
29
|
+
>>> response = ai.chat("What's the weather today?")
|
|
30
|
+
>>> print(response)
|
|
31
|
+
"""
|
|
32
|
+
required_auth = False
|
|
33
|
+
AVAILABLE_MODELS = [
|
|
34
|
+
"azure-gpt-4-1",
|
|
35
|
+
"chat-gpt4",
|
|
36
|
+
"chat-grok-2",
|
|
37
|
+
"chat-o4-mini",
|
|
38
|
+
"chat-o4-mini-high",
|
|
39
|
+
"chat-o4-mini-medium",
|
|
40
|
+
"claude-haiku",
|
|
41
|
+
"claude-sonnet",
|
|
42
|
+
"deepinfra-llama-4-maverick",
|
|
43
|
+
"deepseek",
|
|
44
|
+
"deepseek-r1-distill-llama-70b",
|
|
45
|
+
"gemini-2.0-flash-lite",
|
|
46
|
+
"gemini-2.5-flash",
|
|
47
|
+
"gemini-2.5-pro",
|
|
48
|
+
"gpt-4-1-mini",
|
|
49
|
+
"gpt-4-1-nano",
|
|
50
|
+
"gpt-4o-image",
|
|
51
|
+
"hyperbolic-deepseek-r1",
|
|
52
|
+
"llama-3-70b",
|
|
53
|
+
"llama-4-scout",
|
|
54
|
+
"o3",
|
|
55
|
+
"phi-4",
|
|
56
|
+
"qwq-32b"
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
is_conversation: bool = True,
|
|
62
|
+
max_tokens: int = 2049,
|
|
63
|
+
timeout: int = 30,
|
|
64
|
+
intro: str = None,
|
|
65
|
+
filepath: str = None,
|
|
66
|
+
update_file: bool = True,
|
|
67
|
+
proxies: dict = {},
|
|
68
|
+
history_offset: int = 10250,
|
|
69
|
+
act: str = None,
|
|
70
|
+
model: str = "gemini-2.0-flash-lite",
|
|
71
|
+
system_prompt: str = "You are a helpful assistant."
|
|
72
|
+
):
|
|
73
|
+
"""Initializes the GizAI API client."""
|
|
74
|
+
if model not in self.AVAILABLE_MODELS:
|
|
75
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
76
|
+
|
|
77
|
+
self.api_url = "https://app.giz.ai/api/data/users/inferenceServer.infer"
|
|
78
|
+
|
|
79
|
+
# Initialize LitAgent for user-agent generation
|
|
80
|
+
self.agent = LitAgent()
|
|
81
|
+
|
|
82
|
+
# Initialize curl_cffi Session
|
|
83
|
+
self.session = Session()
|
|
84
|
+
|
|
85
|
+
# Set up the headers
|
|
86
|
+
self.headers = {
|
|
87
|
+
"accept": "application/json, text/plain, */*",
|
|
88
|
+
"content-type": "application/json",
|
|
89
|
+
"user-agent": self.agent.random(),
|
|
90
|
+
"origin": "https://app.giz.ai",
|
|
91
|
+
"referer": "https://app.giz.ai/",
|
|
92
|
+
"sec-fetch-dest": "empty",
|
|
93
|
+
"sec-fetch-mode": "cors",
|
|
94
|
+
"sec-fetch-site": "same-origin"
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
# Update session headers and proxies
|
|
98
|
+
self.session.headers.update(self.headers)
|
|
99
|
+
self.session.proxies = proxies
|
|
100
|
+
|
|
101
|
+
# Store configuration
|
|
102
|
+
self.system_prompt = system_prompt
|
|
103
|
+
self.is_conversation = is_conversation
|
|
104
|
+
self.max_tokens_to_sample = max_tokens
|
|
105
|
+
self.timeout = timeout
|
|
106
|
+
self.last_response = {}
|
|
107
|
+
self.model = model
|
|
108
|
+
|
|
109
|
+
self.__available_optimizers = (
|
|
110
|
+
method
|
|
111
|
+
for method in dir(Optimizers)
|
|
112
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
Conversation.intro = (
|
|
116
|
+
AwesomePrompts().get_act(
|
|
117
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
118
|
+
)
|
|
119
|
+
if act
|
|
120
|
+
else intro or Conversation.intro
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
self.conversation = Conversation(
|
|
124
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
125
|
+
)
|
|
126
|
+
self.conversation.history_offset = history_offset
|
|
127
|
+
|
|
128
|
+
def _generate_id(self, length: int = 21) -> str:
|
|
129
|
+
"""Generates a random URL-safe base64 string."""
|
|
130
|
+
random_bytes = os.urandom(length * 2) # Generate more bytes initially
|
|
131
|
+
b64_encoded = base64.urlsafe_b64encode(random_bytes).decode('utf-8')
|
|
132
|
+
return b64_encoded[:length]
|
|
133
|
+
|
|
134
|
+
def _get_random_ip(self) -> str:
|
|
135
|
+
"""Generates a random IPv4 address string."""
|
|
136
|
+
return f"{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
|
137
|
+
|
|
138
|
+
def ask(
|
|
139
|
+
self,
|
|
140
|
+
prompt: str,
|
|
141
|
+
stream: bool = False, # Parameter kept for compatibility but not used
|
|
142
|
+
raw: bool = False,
|
|
143
|
+
optimizer: str = None,
|
|
144
|
+
conversationally: bool = False,
|
|
145
|
+
) -> Dict[str, Any]:
|
|
146
|
+
"""
|
|
147
|
+
Sends a prompt to the GizAI API and returns the response.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
prompt (str): The prompt to send to the API.
|
|
151
|
+
stream (bool): Not supported by GizAI, kept for compatibility.
|
|
152
|
+
raw (bool): Whether to return the raw response.
|
|
153
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
154
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
Dict[str, Any]: The API response.
|
|
158
|
+
|
|
159
|
+
Examples:
|
|
160
|
+
>>> ai = GizAI()
|
|
161
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
162
|
+
"""
|
|
163
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
164
|
+
if optimizer:
|
|
165
|
+
if optimizer in self.__available_optimizers:
|
|
166
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
167
|
+
conversation_prompt if conversationally else prompt
|
|
168
|
+
)
|
|
169
|
+
else:
|
|
170
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
171
|
+
|
|
172
|
+
# Generate random IDs for request
|
|
173
|
+
instance_id = self._generate_id()
|
|
174
|
+
subscribe_id = self._generate_id()
|
|
175
|
+
x_forwarded_for = self._get_random_ip()
|
|
176
|
+
|
|
177
|
+
# Set up request body - GizAI doesn't support streaming
|
|
178
|
+
request_body = {
|
|
179
|
+
"model": "chat",
|
|
180
|
+
"baseModel": self.model, # Use the specific model ID here
|
|
181
|
+
"input": {
|
|
182
|
+
"messages": [{
|
|
183
|
+
"type": "human",
|
|
184
|
+
"content": conversation_prompt
|
|
185
|
+
}],
|
|
186
|
+
"mode": "plan"
|
|
187
|
+
},
|
|
188
|
+
"noStream": True,
|
|
189
|
+
"instanceId": instance_id,
|
|
190
|
+
"subscribeId": subscribe_id
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
# Combine default headers with the dynamic x-forwarded-for header
|
|
194
|
+
request_headers = {**self.headers, "x-forwarded-for": x_forwarded_for}
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
# Use curl_cffi session post with impersonate
|
|
198
|
+
response = self.session.post(
|
|
199
|
+
self.api_url,
|
|
200
|
+
headers=request_headers,
|
|
201
|
+
json=request_body,
|
|
202
|
+
timeout=self.timeout,
|
|
203
|
+
impersonate="chrome120", # Use a common impersonation profile
|
|
204
|
+
http_version=CurlHttpVersion.V2_0 # Use HTTP/2
|
|
205
|
+
)
|
|
206
|
+
response.raise_for_status() # Check for HTTP errors
|
|
207
|
+
|
|
208
|
+
# Process the response
|
|
209
|
+
try:
|
|
210
|
+
response_json = response.json()
|
|
211
|
+
# GizAI responses have "status" and "output" fields
|
|
212
|
+
if response_json.get("status") == "completed" and "output" in response_json:
|
|
213
|
+
content = response_json["output"]
|
|
214
|
+
else:
|
|
215
|
+
content = ""
|
|
216
|
+
# Try to extract content from any available field that might contain the response
|
|
217
|
+
for key, value in response_json.items():
|
|
218
|
+
if isinstance(value, str) and len(value) > 10:
|
|
219
|
+
content = value
|
|
220
|
+
break
|
|
221
|
+
except json.JSONDecodeError:
|
|
222
|
+
# Handle case where response is not valid JSON
|
|
223
|
+
content = response.text
|
|
224
|
+
|
|
225
|
+
# Update conversation history
|
|
226
|
+
self.last_response = {"text": content}
|
|
227
|
+
self.conversation.update_chat_history(prompt, content)
|
|
228
|
+
|
|
229
|
+
return self.last_response if not raw else content
|
|
230
|
+
|
|
231
|
+
except CurlError as e:
|
|
232
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}")
|
|
233
|
+
except Exception as e:
|
|
234
|
+
error_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
235
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {error_text}")
|
|
236
|
+
|
|
237
|
+
def chat(
|
|
238
|
+
self,
|
|
239
|
+
prompt: str,
|
|
240
|
+
stream: bool = False, # Parameter kept for compatibility but not used
|
|
241
|
+
optimizer: str = None,
|
|
242
|
+
conversationally: bool = False,
|
|
243
|
+
) -> 'Generator[str, None, None]':
|
|
244
|
+
"""
|
|
245
|
+
Generates a response from the GizAI API.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
prompt (str): The prompt to send to the API.
|
|
249
|
+
stream (bool): Not supported by GizAI, kept for compatibility.
|
|
250
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
251
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
Generator[str, None, None]: The API response text as a generator.
|
|
255
|
+
|
|
256
|
+
Examples:
|
|
257
|
+
>>> ai = GizAI()
|
|
258
|
+
>>> response = ai.chat("What's the weather today?")
|
|
259
|
+
"""
|
|
260
|
+
# GizAI doesn't support streaming, so ignore the stream parameter
|
|
261
|
+
response_data = self.ask(
|
|
262
|
+
prompt, stream=False, raw=False,
|
|
263
|
+
optimizer=optimizer, conversationally=conversationally
|
|
264
|
+
)
|
|
265
|
+
result = self.get_message(response_data)
|
|
266
|
+
if stream:
|
|
267
|
+
yield result
|
|
268
|
+
else:
|
|
269
|
+
return result
|
|
270
|
+
|
|
271
|
+
def get_message(self, response: Union[dict, str]) -> str:
|
|
272
|
+
"""
|
|
273
|
+
Extracts the message from the API response.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
response (Union[dict, str]): The API response.
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
str: The message content.
|
|
280
|
+
|
|
281
|
+
Examples:
|
|
282
|
+
>>> ai = GizAI()
|
|
283
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
284
|
+
>>> message = ai.get_message(response)
|
|
285
|
+
"""
|
|
286
|
+
if isinstance(response, str):
|
|
287
|
+
return response
|
|
288
|
+
assert isinstance(response, dict), "Response should be either dict or str"
|
|
289
|
+
return response.get("text", "")
|
|
290
|
+
|
|
291
|
+
if __name__ == "__main__":
|
|
292
|
+
ai = GizAI()
|
|
293
|
+
response = ai.chat("Hello, how are you?", stream=True)
|
|
294
|
+
for chunk in response:
|
|
295
295
|
print(chunk, end="", flush=True)
|