webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Install the Google AI Python SDK
|
|
3
|
+
|
|
4
|
+
$ pip install google-generativeai
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import google.generativeai as genai
|
|
9
|
+
|
|
10
|
+
from google.generativeai.types import HarmCategory, HarmBlockThreshold
|
|
11
|
+
import requests
|
|
12
|
+
from webscout.AIutel import Optimizers
|
|
13
|
+
from webscout.AIutel import Conversation
|
|
14
|
+
from webscout.AIutel import AwesomePrompts
|
|
15
|
+
from webscout.AIbase import Provider
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class GEMINIAPI(Provider):
|
|
19
|
+
"""
|
|
20
|
+
A class to interact with the Gemini API using the google-generativeai library.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
api_key,
|
|
26
|
+
model_name: str = "gemini-1.5-flash-latest",
|
|
27
|
+
temperature: float = 1,
|
|
28
|
+
top_p: float = 0.95,
|
|
29
|
+
top_k: int = 64,
|
|
30
|
+
max_output_tokens: int = 8192,
|
|
31
|
+
is_conversation: bool = True,
|
|
32
|
+
timeout: int = 30,
|
|
33
|
+
intro: str = None,
|
|
34
|
+
filepath: str = None,
|
|
35
|
+
update_file: bool = True,
|
|
36
|
+
proxies: dict = {},
|
|
37
|
+
history_offset: int = 10250,
|
|
38
|
+
act: str = None,
|
|
39
|
+
system_instruction: str = "You are a helpful and informative AI assistant.",
|
|
40
|
+
safety_settings: dict = None,
|
|
41
|
+
):
|
|
42
|
+
"""
|
|
43
|
+
Initializes the Gemini API with the given parameters.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
api_key (str, optional): Your Gemini API key. If None, it will use the environment variable "GEMINI_API_KEY".
|
|
47
|
+
Defaults to None.
|
|
48
|
+
model_name (str, optional): The name of the Gemini model to use.
|
|
49
|
+
Defaults to "gemini-1.5-flash-exp-0827".
|
|
50
|
+
temperature (float, optional): The temperature parameter for the model. Defaults to 1.
|
|
51
|
+
top_p (float, optional): The top_p parameter for the model. Defaults to 0.95.
|
|
52
|
+
top_k (int, optional): The top_k parameter for the model. Defaults to 64.
|
|
53
|
+
max_output_tokens (int, optional): The maximum number of output tokens. Defaults to 8192.
|
|
54
|
+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
55
|
+
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
56
|
+
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
57
|
+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
58
|
+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
59
|
+
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
60
|
+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
61
|
+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
62
|
+
system_instruction (str, optional): System instruction to guide the AI's behavior.
|
|
63
|
+
Defaults to "You are a helpful and informative AI assistant.".
|
|
64
|
+
"""
|
|
65
|
+
self.api_key = api_key
|
|
66
|
+
self.model_name = model_name
|
|
67
|
+
self.temperature = temperature
|
|
68
|
+
self.top_p = top_p
|
|
69
|
+
self.top_k = top_k
|
|
70
|
+
self.max_output_tokens = max_output_tokens
|
|
71
|
+
self.system_instruction = system_instruction
|
|
72
|
+
self.safety_settings = safety_settings if safety_settings else {}
|
|
73
|
+
self.session = requests.Session() # Not directly used for Gemini API calls, but can be used for other requests
|
|
74
|
+
self.is_conversation = is_conversation
|
|
75
|
+
self.max_tokens_to_sample = max_output_tokens
|
|
76
|
+
self.timeout = timeout
|
|
77
|
+
self.last_response = {}
|
|
78
|
+
|
|
79
|
+
self.__available_optimizers = (
|
|
80
|
+
method
|
|
81
|
+
for method in dir(Optimizers)
|
|
82
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
83
|
+
)
|
|
84
|
+
Conversation.intro = (
|
|
85
|
+
AwesomePrompts().get_act(
|
|
86
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
87
|
+
)
|
|
88
|
+
if act
|
|
89
|
+
else intro or Conversation.intro
|
|
90
|
+
)
|
|
91
|
+
self.conversation = Conversation(
|
|
92
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
93
|
+
)
|
|
94
|
+
self.conversation.history_offset = history_offset
|
|
95
|
+
self.session.proxies = proxies
|
|
96
|
+
|
|
97
|
+
# Configure the Gemini API
|
|
98
|
+
genai.configure(api_key=self.api_key)
|
|
99
|
+
|
|
100
|
+
# Create the model with generation config
|
|
101
|
+
self.generation_config = {
|
|
102
|
+
"temperature": self.temperature,
|
|
103
|
+
"top_p": self.top_p,
|
|
104
|
+
"top_k": self.top_k,
|
|
105
|
+
"max_output_tokens": self.max_output_tokens,
|
|
106
|
+
"response_mime_type": "text/plain",
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
self.model = genai.GenerativeModel(
|
|
110
|
+
model_name=self.model_name,
|
|
111
|
+
generation_config=self.generation_config,
|
|
112
|
+
safety_settings=self.safety_settings,
|
|
113
|
+
system_instruction=self.system_instruction,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Start the chat session
|
|
117
|
+
self.chat_session = self.model.start_chat()
|
|
118
|
+
|
|
119
|
+
def ask(
|
|
120
|
+
self,
|
|
121
|
+
prompt: str,
|
|
122
|
+
stream: bool = False,
|
|
123
|
+
raw: bool = False,
|
|
124
|
+
optimizer: str = None,
|
|
125
|
+
conversationally: bool = False,
|
|
126
|
+
) -> dict:
|
|
127
|
+
"""Chat with AI
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
prompt (str): Prompt to be send.
|
|
131
|
+
stream (bool, optional): Not used for Gemini API. Defaults to False.
|
|
132
|
+
raw (bool, optional): Not used for Gemini API. Defaults to False.
|
|
133
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
134
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
135
|
+
Returns:
|
|
136
|
+
dict : {}
|
|
137
|
+
```json
|
|
138
|
+
{
|
|
139
|
+
"text" : "How may I assist you today?"
|
|
140
|
+
}
|
|
141
|
+
```
|
|
142
|
+
"""
|
|
143
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
144
|
+
if optimizer:
|
|
145
|
+
if optimizer in self.__available_optimizers:
|
|
146
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
147
|
+
conversation_prompt if conversationally else prompt
|
|
148
|
+
)
|
|
149
|
+
else:
|
|
150
|
+
raise Exception(
|
|
151
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Send the message to the chat session and get the response
|
|
155
|
+
response = self.chat_session.send_message(conversation_prompt)
|
|
156
|
+
self.last_response.update(dict(text=response.text))
|
|
157
|
+
self.conversation.update_chat_history(
|
|
158
|
+
prompt, self.get_message(self.last_response)
|
|
159
|
+
)
|
|
160
|
+
return self.last_response
|
|
161
|
+
|
|
162
|
+
def chat(
|
|
163
|
+
self,
|
|
164
|
+
prompt: str,
|
|
165
|
+
stream: bool = False, # Streaming not supported by the current google-generativeai library
|
|
166
|
+
optimizer: str = None,
|
|
167
|
+
conversationally: bool = False,
|
|
168
|
+
) -> str:
|
|
169
|
+
"""Generate response `str`
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
prompt (str): Prompt to be send.
|
|
173
|
+
stream (bool, optional): Not used for Gemini API. Defaults to False.
|
|
174
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
175
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
176
|
+
Returns:
|
|
177
|
+
str: Response generated
|
|
178
|
+
"""
|
|
179
|
+
return self.get_message(
|
|
180
|
+
self.ask(
|
|
181
|
+
prompt,
|
|
182
|
+
optimizer=optimizer,
|
|
183
|
+
conversationally=conversationally,
|
|
184
|
+
)
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
def get_message(self, response: dict) -> str:
|
|
188
|
+
"""Retrieves message only from response
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
response (dict): Response generated by `self.ask`
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
str: Message extracted
|
|
195
|
+
"""
|
|
196
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
197
|
+
return response["text"]
|
|
198
|
+
if __name__ == "__main__":
|
|
199
|
+
safety_settings = {
|
|
200
|
+
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
|
201
|
+
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
|
202
|
+
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
|
203
|
+
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
|
204
|
+
}
|
|
205
|
+
ai = GEMINIAPI(api_key="" , safety_settings=safety_settings)
|
|
206
|
+
res = ai.chat(input(">>> "))
|
|
207
|
+
for r in res:
|
|
208
|
+
print(r, end="", flush=True)
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
from typing import Optional, Union, Any, Dict, Generator
|
|
5
|
+
|
|
6
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
from webscout.litagent import LitAgent as Lit
|
|
10
|
+
|
|
11
|
+
class IBMGranite(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the IBM Granite API (accessed via d18n68ssusgr7r.cloudfront.net)
|
|
14
|
+
using Lit agent for the user agent.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
AVAILABLE_MODELS = ["granite-3-8b-instruct", "granite-3-2-8b-instruct"]
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
api_key: str,
|
|
22
|
+
is_conversation: bool = True,
|
|
23
|
+
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
24
|
+
timeout: int = 30,
|
|
25
|
+
intro: str = None,
|
|
26
|
+
filepath: str = None,
|
|
27
|
+
update_file: bool = True,
|
|
28
|
+
proxies: dict = {},
|
|
29
|
+
history_offset: int = 10250,
|
|
30
|
+
act: str = None,
|
|
31
|
+
model: str = "granite-3-2-8b-instruct",
|
|
32
|
+
system_prompt: str = "You are a helpful AI assistant.",
|
|
33
|
+
thinking: bool = False,
|
|
34
|
+
):
|
|
35
|
+
"""Initializes the IBMGranite API client using Lit agent for the user agent."""
|
|
36
|
+
if model not in self.AVAILABLE_MODELS:
|
|
37
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
38
|
+
|
|
39
|
+
# Initialize curl_cffi Session
|
|
40
|
+
self.session = Session()
|
|
41
|
+
self.is_conversation = is_conversation
|
|
42
|
+
self.max_tokens_to_sample = max_tokens
|
|
43
|
+
self.api_endpoint = "https://d18n68ssusgr7r.cloudfront.net/v1/chat/completions"
|
|
44
|
+
self.stream_chunk_size = 64
|
|
45
|
+
self.timeout = timeout
|
|
46
|
+
self.last_response = {}
|
|
47
|
+
self.model = model
|
|
48
|
+
self.system_prompt = system_prompt
|
|
49
|
+
self.thinking = thinking
|
|
50
|
+
|
|
51
|
+
# Use Lit agent (keep if needed for other headers or logic)
|
|
52
|
+
self.headers = {
|
|
53
|
+
"authority": "d18n68ssusgr7r.cloudfront.net", # Keep authority
|
|
54
|
+
"accept": "application/json,application/jsonl", # Keep accept
|
|
55
|
+
"content-type": "application/json",
|
|
56
|
+
"origin": "https://www.ibm.com", # Keep origin
|
|
57
|
+
"referer": "https://www.ibm.com/", # Keep referer
|
|
58
|
+
}
|
|
59
|
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
60
|
+
|
|
61
|
+
# Update curl_cffi session headers and proxies
|
|
62
|
+
self.session.headers.update(self.headers)
|
|
63
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
64
|
+
|
|
65
|
+
self.__available_optimizers = (
|
|
66
|
+
method for method in dir(Optimizers)
|
|
67
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
Conversation.intro = (
|
|
71
|
+
AwesomePrompts().get_act(
|
|
72
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
73
|
+
)
|
|
74
|
+
if act
|
|
75
|
+
else intro or Conversation.intro
|
|
76
|
+
)
|
|
77
|
+
self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
|
|
78
|
+
self.conversation.history_offset = history_offset
|
|
79
|
+
|
|
80
|
+
@staticmethod
|
|
81
|
+
def _granite_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
82
|
+
"""Extracts content from IBM Granite stream JSON lists [3, "text"]."""
|
|
83
|
+
if isinstance(chunk, list) and len(chunk) == 2 and chunk[0] == 3 and isinstance(chunk[1], str):
|
|
84
|
+
return chunk[1]
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
def ask(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
stream: bool = False, # API supports streaming
|
|
91
|
+
raw: bool = False,
|
|
92
|
+
optimizer: str = None,
|
|
93
|
+
conversationally: bool = False,
|
|
94
|
+
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
95
|
+
"""Chat with AI
|
|
96
|
+
Args:
|
|
97
|
+
prompt (str): Prompt to be sent.
|
|
98
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
99
|
+
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
100
|
+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
101
|
+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
102
|
+
Returns:
|
|
103
|
+
Union[Dict, Generator[Dict, None, None]]: Response generated
|
|
104
|
+
"""
|
|
105
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
106
|
+
if optimizer:
|
|
107
|
+
if optimizer in self.__available_optimizers:
|
|
108
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
109
|
+
conversation_prompt if conversationally else prompt
|
|
110
|
+
)
|
|
111
|
+
else:
|
|
112
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
113
|
+
|
|
114
|
+
payload = {
|
|
115
|
+
"model": self.model,
|
|
116
|
+
"messages": [
|
|
117
|
+
{"role": "system", "content": self.system_prompt},
|
|
118
|
+
{"role": "user", "content": conversation_prompt},
|
|
119
|
+
],
|
|
120
|
+
"stream": True # API seems to require stream=True based on response format
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
def for_stream():
|
|
124
|
+
streaming_text = "" # Initialize outside try block
|
|
125
|
+
try:
|
|
126
|
+
# Use curl_cffi session post with impersonate
|
|
127
|
+
response = self.session.post(
|
|
128
|
+
self.api_endpoint,
|
|
129
|
+
# headers are set on the session
|
|
130
|
+
json=payload,
|
|
131
|
+
stream=True,
|
|
132
|
+
timeout=self.timeout,
|
|
133
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
134
|
+
)
|
|
135
|
+
response.raise_for_status() # Check for HTTP errors
|
|
136
|
+
|
|
137
|
+
# Use sanitize_stream
|
|
138
|
+
processed_stream = sanitize_stream(
|
|
139
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
140
|
+
intro_value=None, # No prefix
|
|
141
|
+
to_json=True, # Stream sends JSON lines (which are lists)
|
|
142
|
+
content_extractor=self._granite_extractor, # Use the specific extractor
|
|
143
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
for content_chunk in processed_stream:
|
|
147
|
+
# content_chunk is the string extracted by _granite_extractor
|
|
148
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
149
|
+
streaming_text += content_chunk
|
|
150
|
+
resp = dict(text=content_chunk)
|
|
151
|
+
yield resp if not raw else content_chunk
|
|
152
|
+
|
|
153
|
+
# Update history after stream finishes
|
|
154
|
+
self.last_response = dict(text=streaming_text)
|
|
155
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
156
|
+
|
|
157
|
+
except CurlError as e: # Catch CurlError
|
|
158
|
+
raise exceptions.ProviderConnectionError(f"Request failed (CurlError): {e}") from e
|
|
159
|
+
except json.JSONDecodeError as e: # Keep specific JSON error handling
|
|
160
|
+
raise exceptions.InvalidResponseError(f"Failed to decode JSON response: {e}") from e
|
|
161
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
162
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
163
|
+
# Use specific exception type if available, otherwise generic
|
|
164
|
+
ex_type = exceptions.FailedToGenerateResponseError if not isinstance(e, exceptions.ProviderConnectionError) else type(e)
|
|
165
|
+
raise ex_type(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def for_non_stream():
|
|
169
|
+
# Aggregate the stream using the updated for_stream logic
|
|
170
|
+
full_text = ""
|
|
171
|
+
try:
|
|
172
|
+
# Ensure raw=False so for_stream yields dicts
|
|
173
|
+
for chunk_data in for_stream():
|
|
174
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
175
|
+
full_text += chunk_data["text"]
|
|
176
|
+
# Handle raw string case if raw=True was passed
|
|
177
|
+
elif raw and isinstance(chunk_data, str):
|
|
178
|
+
full_text += chunk_data
|
|
179
|
+
except Exception as e:
|
|
180
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
181
|
+
if not full_text:
|
|
182
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
183
|
+
|
|
184
|
+
# last_response and history are updated within for_stream
|
|
185
|
+
# Return the final aggregated response dict or raw string
|
|
186
|
+
return full_text if raw else self.last_response
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
190
|
+
# The non-stream wrapper will handle aggregation if stream=False.
|
|
191
|
+
return for_stream() if stream else for_non_stream()
|
|
192
|
+
|
|
193
|
+
def chat(
|
|
194
|
+
self,
|
|
195
|
+
prompt: str,
|
|
196
|
+
stream: bool = False,
|
|
197
|
+
optimizer: str = None,
|
|
198
|
+
conversationally: bool = False,
|
|
199
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
200
|
+
"""Generate response as a string using chat method"""
|
|
201
|
+
def for_stream_chat():
|
|
202
|
+
# ask() yields dicts or strings when streaming
|
|
203
|
+
gen = self.ask(
|
|
204
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
205
|
+
optimizer=optimizer, conversationally=conversationally
|
|
206
|
+
)
|
|
207
|
+
for response_dict in gen:
|
|
208
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
209
|
+
|
|
210
|
+
def for_non_stream_chat():
|
|
211
|
+
# ask() returns dict or str when not streaming
|
|
212
|
+
response_data = self.ask(
|
|
213
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
214
|
+
optimizer=optimizer, conversationally=conversationally
|
|
215
|
+
)
|
|
216
|
+
return self.get_message(response_data) # get_message expects dict
|
|
217
|
+
|
|
218
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
219
|
+
|
|
220
|
+
def get_message(self, response: dict) -> str:
|
|
221
|
+
"""Retrieves message only from response"""
|
|
222
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
223
|
+
return response["text"]
|
|
224
|
+
|
|
225
|
+
if __name__ == "__main__":
|
|
226
|
+
# Ensure curl_cffi is installed
|
|
227
|
+
from rich import print
|
|
228
|
+
# Example usage: Initialize without logging.
|
|
229
|
+
ai = IBMGranite(
|
|
230
|
+
api_key="", # press f12 to see the API key
|
|
231
|
+
thinking=True,
|
|
232
|
+
)
|
|
233
|
+
response = ai.chat("write a poem about AI", stream=True)
|
|
234
|
+
for chunk in response:
|
|
235
|
+
print(chunk, end="", flush=True)
|