webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
6
|
+
import time
|
|
7
|
+
import uuid
|
|
8
|
+
import re
|
|
9
|
+
|
|
10
|
+
from webscout.AIutel import Optimizers
|
|
11
|
+
from webscout.AIutel import Conversation
|
|
12
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
13
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
14
|
+
from webscout import exceptions
|
|
15
|
+
from webscout.litagent import LitAgent
|
|
16
|
+
|
|
17
|
+
class Hunyuan(Provider):
|
|
18
|
+
"""
|
|
19
|
+
A class to interact with the Tencent Hunyuan API with LitAgent user-agent.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
AVAILABLE_MODELS = [
|
|
23
|
+
"hunyuan-t1-latest",
|
|
24
|
+
# Add more models as they become available
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
is_conversation: bool = True,
|
|
30
|
+
max_tokens: int = 2048, # Note: max_tokens is not used by this API
|
|
31
|
+
timeout: int = 30,
|
|
32
|
+
intro: str = None,
|
|
33
|
+
filepath: str = None,
|
|
34
|
+
update_file: bool = True,
|
|
35
|
+
proxies: dict = {},
|
|
36
|
+
history_offset: int = 10250,
|
|
37
|
+
act: str = None,
|
|
38
|
+
model: str = "hunyuan-t1-latest",
|
|
39
|
+
browser: str = "chrome", # Note: browser fingerprinting might be less effective with impersonate
|
|
40
|
+
api_key: str = None,
|
|
41
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
42
|
+
):
|
|
43
|
+
|
|
44
|
+
"""Initializes the Hunyuan API client."""
|
|
45
|
+
if model not in self.AVAILABLE_MODELS:
|
|
46
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
47
|
+
|
|
48
|
+
self.url = "https://llm.hunyuan.tencent.com/aide/api/v2/triton_image/demo_text_chat/"
|
|
49
|
+
|
|
50
|
+
# Initialize LitAgent (keep if needed for other headers or logic)
|
|
51
|
+
self.agent = LitAgent()
|
|
52
|
+
# Fingerprint generation might be less relevant with impersonate
|
|
53
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
54
|
+
|
|
55
|
+
# Use the fingerprint for headers (keep relevant ones)
|
|
56
|
+
self.headers = {
|
|
57
|
+
"Accept": "*/*",
|
|
58
|
+
"Accept-Language": self.fingerprint["accept_language"], # Keep Accept-Language
|
|
59
|
+
"Content-Type": "application/json",
|
|
60
|
+
"DNT": "1", # Keep DNT
|
|
61
|
+
"Origin": "https://llm.hunyuan.tencent.com", # Keep Origin
|
|
62
|
+
"Referer": "https://llm.hunyuan.tencent.com/", # Keep Referer
|
|
63
|
+
"Sec-Fetch-Dest": "empty", # Keep Sec-Fetch-*
|
|
64
|
+
"Sec-Fetch-Mode": "cors",
|
|
65
|
+
"Sec-Fetch-Site": "same-origin",
|
|
66
|
+
"Sec-GPC": "1", # Keep Sec-GPC
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
# Add authorization if API key is provided
|
|
70
|
+
if api_key:
|
|
71
|
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
|
72
|
+
else:
|
|
73
|
+
# Default test key (may not work long-term)
|
|
74
|
+
self.headers["Authorization"] = "Bearer 7auGXNATFSKl7dF"
|
|
75
|
+
|
|
76
|
+
# Initialize curl_cffi Session
|
|
77
|
+
self.session = Session()
|
|
78
|
+
# Update curl_cffi session headers and proxies
|
|
79
|
+
self.session.headers.update(self.headers)
|
|
80
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
81
|
+
self.system_message = system_prompt
|
|
82
|
+
self.is_conversation = is_conversation
|
|
83
|
+
self.max_tokens_to_sample = max_tokens
|
|
84
|
+
self.timeout = timeout
|
|
85
|
+
self.last_response = {}
|
|
86
|
+
self.model = model
|
|
87
|
+
|
|
88
|
+
self.__available_optimizers = (
|
|
89
|
+
method
|
|
90
|
+
for method in dir(Optimizers)
|
|
91
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
92
|
+
)
|
|
93
|
+
Conversation.intro = (
|
|
94
|
+
AwesomePrompts().get_act(
|
|
95
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
96
|
+
)
|
|
97
|
+
if act
|
|
98
|
+
else intro or Conversation.intro
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
self.conversation = Conversation(
|
|
102
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
103
|
+
)
|
|
104
|
+
self.conversation.history_offset = history_offset
|
|
105
|
+
|
|
106
|
+
def refresh_identity(self, browser: str = None):
|
|
107
|
+
"""
|
|
108
|
+
Refreshes the browser identity fingerprint.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
browser: Specific browser to use for the new fingerprint
|
|
112
|
+
"""
|
|
113
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
114
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
115
|
+
|
|
116
|
+
# Update headers with new fingerprint (only relevant ones)
|
|
117
|
+
self.headers.update({
|
|
118
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
119
|
+
})
|
|
120
|
+
|
|
121
|
+
# Update session headers
|
|
122
|
+
self.session.headers.update(self.headers) # Update only relevant headers
|
|
123
|
+
|
|
124
|
+
return self.fingerprint
|
|
125
|
+
|
|
126
|
+
def ask(
|
|
127
|
+
self,
|
|
128
|
+
prompt: str,
|
|
129
|
+
stream: bool = False, # API supports streaming
|
|
130
|
+
raw: bool = False,
|
|
131
|
+
optimizer: str = None,
|
|
132
|
+
conversationally: bool = False,
|
|
133
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
134
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
135
|
+
if optimizer:
|
|
136
|
+
if optimizer in self.__available_optimizers:
|
|
137
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
138
|
+
else:
|
|
139
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
140
|
+
|
|
141
|
+
# Generate a unique query ID for each request
|
|
142
|
+
query_id = ''.join(re.findall(r'[a-z0-9]', str(uuid.uuid4())[:18]))
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
# Payload construction
|
|
146
|
+
payload = {
|
|
147
|
+
"stream": True, # API seems to require stream=True based on response format
|
|
148
|
+
"model": self.model,
|
|
149
|
+
"query_id": query_id,
|
|
150
|
+
"messages": [
|
|
151
|
+
{"role": "system", "content": self.system_message},
|
|
152
|
+
{"role": "user", "content": "Always response in English\n\n" + conversation_prompt},
|
|
153
|
+
],
|
|
154
|
+
"stream_moderation": True,
|
|
155
|
+
"enable_enhancement": False
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
def for_stream():
|
|
159
|
+
streaming_text = "" # Initialize outside try block
|
|
160
|
+
try:
|
|
161
|
+
# Use curl_cffi session post with impersonate
|
|
162
|
+
response = self.session.post(
|
|
163
|
+
self.url,
|
|
164
|
+
data=json.dumps(payload),
|
|
165
|
+
stream=True,
|
|
166
|
+
timeout=self.timeout,
|
|
167
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
168
|
+
)
|
|
169
|
+
response.raise_for_status() # Check for HTTP errors
|
|
170
|
+
|
|
171
|
+
# Iterate over bytes and decode manually
|
|
172
|
+
for line_bytes in response.iter_lines():
|
|
173
|
+
if line_bytes:
|
|
174
|
+
try:
|
|
175
|
+
line = line_bytes.decode('utf-8').strip()
|
|
176
|
+
if line.startswith("data: "):
|
|
177
|
+
json_str = line[6:]
|
|
178
|
+
if json_str == "[DONE]":
|
|
179
|
+
break
|
|
180
|
+
json_data = json.loads(json_str)
|
|
181
|
+
if 'choices' in json_data:
|
|
182
|
+
choice = json_data['choices'][0]
|
|
183
|
+
if 'delta' in choice and 'content' in choice['delta']:
|
|
184
|
+
content = choice['delta']['content']
|
|
185
|
+
if content: # Ensure content is not None or empty
|
|
186
|
+
streaming_text += content
|
|
187
|
+
resp = dict(text=content)
|
|
188
|
+
# Yield dict or raw string chunk
|
|
189
|
+
yield resp if not raw else content
|
|
190
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
191
|
+
continue # Ignore lines that are not valid JSON or cannot be decoded
|
|
192
|
+
|
|
193
|
+
# Update history after stream finishes
|
|
194
|
+
self.last_response = {"text": streaming_text}
|
|
195
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
196
|
+
|
|
197
|
+
except CurlError as e: # Catch CurlError
|
|
198
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
199
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
200
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
201
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def for_non_stream():
|
|
205
|
+
# Aggregate the stream using the updated for_stream logic
|
|
206
|
+
full_text = ""
|
|
207
|
+
try:
|
|
208
|
+
# Ensure raw=False so for_stream yields dicts
|
|
209
|
+
for chunk_data in for_stream():
|
|
210
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
211
|
+
full_text += chunk_data["text"]
|
|
212
|
+
# Handle raw string case if raw=True was passed
|
|
213
|
+
elif raw and isinstance(chunk_data, str):
|
|
214
|
+
full_text += chunk_data
|
|
215
|
+
except Exception as e:
|
|
216
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
217
|
+
if not full_text:
|
|
218
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
219
|
+
|
|
220
|
+
# last_response and history are updated within for_stream
|
|
221
|
+
# Return the final aggregated response dict or raw string
|
|
222
|
+
return full_text if raw else self.last_response
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
226
|
+
# The non-stream wrapper will handle aggregation if stream=False.
|
|
227
|
+
return for_stream() if stream else for_non_stream()
|
|
228
|
+
|
|
229
|
+
def chat(
|
|
230
|
+
self,
|
|
231
|
+
prompt: str,
|
|
232
|
+
stream: bool = False,
|
|
233
|
+
optimizer: str = None,
|
|
234
|
+
conversationally: bool = False,
|
|
235
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
236
|
+
def for_stream_chat():
|
|
237
|
+
# ask() yields dicts or strings when streaming
|
|
238
|
+
gen = self.ask(
|
|
239
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
240
|
+
optimizer=optimizer, conversationally=conversationally
|
|
241
|
+
)
|
|
242
|
+
for response_dict in gen:
|
|
243
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
244
|
+
|
|
245
|
+
def for_non_stream_chat():
|
|
246
|
+
# ask() returns dict or str when not streaming
|
|
247
|
+
response_data = self.ask(
|
|
248
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
249
|
+
optimizer=optimizer, conversationally=conversationally
|
|
250
|
+
)
|
|
251
|
+
return self.get_message(response_data) # get_message expects dict
|
|
252
|
+
|
|
253
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
254
|
+
|
|
255
|
+
def get_message(self, response: dict) -> str:
|
|
256
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
257
|
+
return response["text"]
|
|
258
|
+
|
|
259
|
+
if __name__ == "__main__":
|
|
260
|
+
# Ensure curl_cffi is installed
|
|
261
|
+
print("-" * 80)
|
|
262
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
263
|
+
print("-" * 80)
|
|
264
|
+
|
|
265
|
+
for model in Hunyuan.AVAILABLE_MODELS:
|
|
266
|
+
try:
|
|
267
|
+
test_ai = Hunyuan(model=model, timeout=60)
|
|
268
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
269
|
+
response_text = ""
|
|
270
|
+
for chunk in response:
|
|
271
|
+
response_text += chunk
|
|
272
|
+
|
|
273
|
+
if response_text and len(response_text.strip()) > 0:
|
|
274
|
+
status = "✓"
|
|
275
|
+
# Clean and truncate response
|
|
276
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
277
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
278
|
+
else:
|
|
279
|
+
status = "✗"
|
|
280
|
+
display_text = "Empty or invalid response"
|
|
281
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
282
|
+
except Exception as e:
|
|
283
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from typing import Union, Any, Dict, Optional, Generator
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
from webscout.litagent import LitAgent
|
|
11
|
+
|
|
12
|
+
class JadveOpenAI(Provider):
|
|
13
|
+
"""
|
|
14
|
+
A class to interact with the OpenAI API through jadve.com using the streaming endpoint.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
AVAILABLE_MODELS = ["gpt-4o-mini"]
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
is_conversation: bool = True,
|
|
22
|
+
max_tokens: int = 600,
|
|
23
|
+
timeout: int = 30,
|
|
24
|
+
intro: str = None,
|
|
25
|
+
filepath: str = None,
|
|
26
|
+
update_file: bool = True,
|
|
27
|
+
proxies: dict = {},
|
|
28
|
+
history_offset: int = 10250,
|
|
29
|
+
act: str = None,
|
|
30
|
+
model: str = "gpt-4o-mini",
|
|
31
|
+
system_prompt: str = "You are a helpful AI assistant." # Note: system_prompt is not used by this API
|
|
32
|
+
):
|
|
33
|
+
"""
|
|
34
|
+
Initializes the JadveOpenAI client.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
is_conversation (bool, optional): Enable conversational mode. Defaults to True.
|
|
38
|
+
max_tokens (int, optional): Maximum tokens for generation. Defaults to 600.
|
|
39
|
+
timeout (int, optional): HTTP request timeout in seconds. Defaults to 30.
|
|
40
|
+
intro (str, optional): Introductory prompt text. Defaults to None.
|
|
41
|
+
filepath (str, optional): Path to conversation history file. Defaults to None.
|
|
42
|
+
update_file (bool, optional): Whether to update the conversation history file. Defaults to True.
|
|
43
|
+
proxies (dict, optional): Proxies for HTTP requests. Defaults to {}.
|
|
44
|
+
history_offset (int, optional): Limit for conversation history. Defaults to 10250.
|
|
45
|
+
act (str|int, optional): Act key for AwesomePrompts. Defaults to None.
|
|
46
|
+
model (str, optional): AI model to be used. Defaults to "gpt-4o-mini".
|
|
47
|
+
system_prompt (str, optional): System prompt text. Defaults to "You are a helpful AI assistant."
|
|
48
|
+
"""
|
|
49
|
+
if model not in self.AVAILABLE_MODELS:
|
|
50
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
51
|
+
|
|
52
|
+
# Initialize curl_cffi Session
|
|
53
|
+
self.session = Session()
|
|
54
|
+
self.is_conversation = is_conversation
|
|
55
|
+
self.max_tokens_to_sample = max_tokens
|
|
56
|
+
self.api_endpoint = "https://openai.jadve.com/stream"
|
|
57
|
+
self.stream_chunk_size = 64
|
|
58
|
+
self.timeout = timeout
|
|
59
|
+
self.last_response = {}
|
|
60
|
+
self.model = model
|
|
61
|
+
self.system_prompt = system_prompt
|
|
62
|
+
|
|
63
|
+
# Headers for API requests
|
|
64
|
+
self.headers = {
|
|
65
|
+
"accept": "*/*",
|
|
66
|
+
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
67
|
+
"content-type": "application/json",
|
|
68
|
+
"dnt": "1",
|
|
69
|
+
"origin": "https://jadve.com",
|
|
70
|
+
"priority": "u=1, i", # Keep priority header if needed
|
|
71
|
+
"referer": "https://jadve.com/",
|
|
72
|
+
"sec-fetch-dest": "empty",
|
|
73
|
+
"sec-fetch-mode": "cors",
|
|
74
|
+
"sec-fetch-site": "same-site",
|
|
75
|
+
"x-authorization": "Bearer" # Keep custom headers
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
# Update curl_cffi session headers and proxies
|
|
79
|
+
self.session.headers.update(self.headers)
|
|
80
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
81
|
+
|
|
82
|
+
self.__available_optimizers = (
|
|
83
|
+
method for method in dir(Optimizers)
|
|
84
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
Conversation.intro = (
|
|
88
|
+
AwesomePrompts().get_act(
|
|
89
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
90
|
+
)
|
|
91
|
+
if act
|
|
92
|
+
else intro or Conversation.intro
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
self.conversation = Conversation(
|
|
96
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
97
|
+
)
|
|
98
|
+
self.conversation.history_offset = history_offset
|
|
99
|
+
|
|
100
|
+
@staticmethod
|
|
101
|
+
def _jadve_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
102
|
+
"""Extracts content from the Jadve stream format '0:"..."'."""
|
|
103
|
+
if isinstance(chunk, str):
|
|
104
|
+
match = re.search(r'0:"(.*?)"(?=,|$)', chunk) # Look for 0:"...", possibly followed by comma or end of string
|
|
105
|
+
if match:
|
|
106
|
+
# Decode potential unicode escapes like \u00e9 and handle escaped quotes/backslashes
|
|
107
|
+
content = match.group(1).encode().decode('unicode_escape')
|
|
108
|
+
return content.replace('\\\\', '\\').replace('\\"', '"')
|
|
109
|
+
return None
|
|
110
|
+
|
|
111
|
+
def ask(
|
|
112
|
+
self,
|
|
113
|
+
prompt: str,
|
|
114
|
+
stream: bool = False, # API supports streaming
|
|
115
|
+
raw: bool = False,
|
|
116
|
+
optimizer: str = None,
|
|
117
|
+
conversationally: bool = False,
|
|
118
|
+
) -> Union[dict, Generator[dict, None, None]]:
|
|
119
|
+
"""
|
|
120
|
+
Chat with AI.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
prompt (str): Prompt to be sent.
|
|
124
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
125
|
+
raw (bool, optional): Return raw content chunks. Defaults to False.
|
|
126
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
127
|
+
conversationally (bool, optional): Flag for conversational optimization. Defaults to False.
|
|
128
|
+
Returns:
|
|
129
|
+
dict or generator: A dictionary with the generated text or a generator yielding text chunks.
|
|
130
|
+
"""
|
|
131
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
132
|
+
if optimizer:
|
|
133
|
+
if optimizer in self.__available_optimizers:
|
|
134
|
+
conversation_prompt = getattr(Optimizers, optimizer)(conversation_prompt if conversationally else prompt)
|
|
135
|
+
else:
|
|
136
|
+
raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
137
|
+
|
|
138
|
+
payload = {
|
|
139
|
+
"messages": [
|
|
140
|
+
{"role": "system", "content": self.system_prompt},
|
|
141
|
+
{"role": "user", "content": [{"type": "text", "text": conversation_prompt}]}
|
|
142
|
+
],
|
|
143
|
+
"model": self.model,
|
|
144
|
+
"botId": "",
|
|
145
|
+
"chatId": "",
|
|
146
|
+
"stream": True, # API endpoint suggests streaming is default/required
|
|
147
|
+
"temperature": 0.7,
|
|
148
|
+
"returnTokensUsage": True,
|
|
149
|
+
"useTools": False
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
def for_stream():
|
|
153
|
+
full_response_text = "" # Initialize outside try block
|
|
154
|
+
try:
|
|
155
|
+
# Use curl_cffi session post with impersonate
|
|
156
|
+
response = self.session.post(
|
|
157
|
+
self.api_endpoint,
|
|
158
|
+
# headers are set on the session
|
|
159
|
+
json=payload,
|
|
160
|
+
stream=True,
|
|
161
|
+
timeout=self.timeout,
|
|
162
|
+
# proxies are set on the session
|
|
163
|
+
impersonate="chrome110" # Use a common impersonation profile
|
|
164
|
+
)
|
|
165
|
+
response.raise_for_status() # Check for HTTP errors
|
|
166
|
+
|
|
167
|
+
# Use sanitize_stream
|
|
168
|
+
processed_stream = sanitize_stream(
|
|
169
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
170
|
+
intro_value=None, # No simple prefix
|
|
171
|
+
to_json=False, # Content is text after extraction
|
|
172
|
+
content_extractor=self._jadve_extractor, # Use the specific extractor
|
|
173
|
+
# end_marker="e:", # Add if 'e:' reliably marks the end
|
|
174
|
+
yield_raw_on_error=True
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
for content_chunk in processed_stream:
|
|
178
|
+
# content_chunk is the string extracted by _jadve_extractor
|
|
179
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
180
|
+
full_response_text += content_chunk
|
|
181
|
+
resp = {"text": content_chunk}
|
|
182
|
+
yield resp if not raw else content_chunk
|
|
183
|
+
|
|
184
|
+
# Update history after stream finishes
|
|
185
|
+
self.last_response = {"text": full_response_text}
|
|
186
|
+
self.conversation.update_chat_history(prompt, full_response_text)
|
|
187
|
+
|
|
188
|
+
except CurlError as e: # Catch CurlError
|
|
189
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
190
|
+
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
191
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
192
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to generate response ({type(e).__name__}): {e} - {err_text}") from e
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def for_non_stream():
|
|
196
|
+
# Aggregate the stream using the updated for_stream logic
|
|
197
|
+
collected_text = ""
|
|
198
|
+
try:
|
|
199
|
+
# Ensure raw=False so for_stream yields dicts
|
|
200
|
+
for chunk_data in for_stream():
|
|
201
|
+
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
202
|
+
collected_text += chunk_data["text"]
|
|
203
|
+
# Handle raw string case if raw=True was passed
|
|
204
|
+
elif raw and isinstance(chunk_data, str):
|
|
205
|
+
collected_text += chunk_data
|
|
206
|
+
except Exception as e:
|
|
207
|
+
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
208
|
+
if not collected_text:
|
|
209
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
210
|
+
|
|
211
|
+
# last_response and history are updated within for_stream
|
|
212
|
+
# Return the final aggregated response dict or raw string
|
|
213
|
+
return collected_text if raw else self.last_response
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
# Since the API endpoint suggests streaming, always call the stream generator.
|
|
217
|
+
# The non-stream wrapper will handle aggregation if stream=False.
|
|
218
|
+
return for_stream() if stream else for_non_stream()
|
|
219
|
+
|
|
220
|
+
def chat(
|
|
221
|
+
self,
|
|
222
|
+
prompt: str,
|
|
223
|
+
stream: bool = False,
|
|
224
|
+
optimizer: str = None,
|
|
225
|
+
conversationally: bool = False,
|
|
226
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
227
|
+
"""
|
|
228
|
+
Generate a chat response (string).
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
prompt (str): Prompt to be sent.
|
|
232
|
+
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
233
|
+
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
234
|
+
conversationally (bool, optional): Flag for conversational optimization. Defaults to False.
|
|
235
|
+
Returns:
|
|
236
|
+
str or generator: Generated response string or generator yielding response chunks.
|
|
237
|
+
"""
|
|
238
|
+
def for_stream_chat():
|
|
239
|
+
# ask() yields dicts or strings when streaming
|
|
240
|
+
gen = self.ask(
|
|
241
|
+
prompt, stream=True, raw=False, # Ensure ask yields dicts
|
|
242
|
+
optimizer=optimizer, conversationally=conversationally
|
|
243
|
+
)
|
|
244
|
+
for response_dict in gen:
|
|
245
|
+
yield self.get_message(response_dict) # get_message expects dict
|
|
246
|
+
|
|
247
|
+
def for_non_stream_chat():
|
|
248
|
+
# ask() returns dict or str when not streaming
|
|
249
|
+
response_data = self.ask(
|
|
250
|
+
prompt, stream=False, raw=False, # Ensure ask returns dict
|
|
251
|
+
optimizer=optimizer, conversationally=conversationally
|
|
252
|
+
)
|
|
253
|
+
return self.get_message(response_data) # get_message expects dict
|
|
254
|
+
|
|
255
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
256
|
+
|
|
257
|
+
def get_message(self, response: dict) -> str:
|
|
258
|
+
"""
|
|
259
|
+
Retrieves message from the response.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
response (dict): Response from the ask() method.
|
|
263
|
+
Returns:
|
|
264
|
+
str: Extracted text.
|
|
265
|
+
"""
|
|
266
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
267
|
+
# Extractor handles formatting
|
|
268
|
+
return response.get("text", "")
|
|
269
|
+
|
|
270
|
+
if __name__ == "__main__":
|
|
271
|
+
# Ensure curl_cffi is installed
|
|
272
|
+
print("-" * 80)
|
|
273
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
274
|
+
print("-" * 80)
|
|
275
|
+
|
|
276
|
+
for model in JadveOpenAI.AVAILABLE_MODELS:
|
|
277
|
+
try:
|
|
278
|
+
test_ai = JadveOpenAI(model=model, timeout=60)
|
|
279
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
280
|
+
response_text = response
|
|
281
|
+
|
|
282
|
+
if response_text and len(response_text.strip()) > 0:
|
|
283
|
+
status = "✓"
|
|
284
|
+
# Truncate response if too long
|
|
285
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
286
|
+
else:
|
|
287
|
+
status = "✗"
|
|
288
|
+
display_text = "Empty or invalid response"
|
|
289
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
290
|
+
except Exception as e:
|
|
291
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|