webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,281 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
from typing import Dict, Optional, Generator, Union, Any
|
|
5
|
+
|
|
6
|
+
from webscout.AIbase import AISearch
|
|
7
|
+
from webscout import exceptions
|
|
8
|
+
from webscout.litagent import LitAgent
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Response:
|
|
12
|
+
"""A wrapper class for webpilotai API responses.
|
|
13
|
+
|
|
14
|
+
This class automatically converts response objects to their text representation
|
|
15
|
+
when printed or converted to string.
|
|
16
|
+
|
|
17
|
+
Attributes:
|
|
18
|
+
text (str): The text content of the response
|
|
19
|
+
|
|
20
|
+
Example:
|
|
21
|
+
>>> response = Response("Hello, world!")
|
|
22
|
+
>>> print(response)
|
|
23
|
+
Hello, world!
|
|
24
|
+
>>> str(response)
|
|
25
|
+
'Hello, world!'
|
|
26
|
+
"""
|
|
27
|
+
def __init__(self, text: str):
|
|
28
|
+
self.text = text
|
|
29
|
+
|
|
30
|
+
def __str__(self):
|
|
31
|
+
return self.text
|
|
32
|
+
|
|
33
|
+
def __repr__(self):
|
|
34
|
+
return self.text
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class webpilotai(AISearch):
|
|
38
|
+
"""A class to interact with the webpilotai (WebPilot) AI search API.
|
|
39
|
+
|
|
40
|
+
webpilotai provides a web-based comprehensive search response interface that returns AI-generated
|
|
41
|
+
responses with source references and related questions. It supports both streaming and
|
|
42
|
+
non-streaming responses.
|
|
43
|
+
|
|
44
|
+
Basic Usage:
|
|
45
|
+
>>> from webscout import webpilotai
|
|
46
|
+
>>> ai = webpilotai()
|
|
47
|
+
>>> # Non-streaming example
|
|
48
|
+
>>> response = ai.search("What is Python?")
|
|
49
|
+
>>> print(response)
|
|
50
|
+
Python is a high-level programming language...
|
|
51
|
+
|
|
52
|
+
>>> # Streaming example
|
|
53
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
54
|
+
... print(chunk, end="", flush=True)
|
|
55
|
+
Artificial Intelligence is...
|
|
56
|
+
|
|
57
|
+
>>> # Raw response format
|
|
58
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
59
|
+
... print(chunk)
|
|
60
|
+
{'text': 'Hello'}
|
|
61
|
+
{'text': ' there!'}
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 90.
|
|
65
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
def __init__(
|
|
69
|
+
self,
|
|
70
|
+
timeout: int = 90,
|
|
71
|
+
proxies: Optional[dict] = None,
|
|
72
|
+
):
|
|
73
|
+
"""Initialize the webpilotai API client.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 90.
|
|
77
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
78
|
+
|
|
79
|
+
Example:
|
|
80
|
+
>>> ai = webpilotai(timeout=120) # Longer timeout
|
|
81
|
+
>>> ai = webpilotai(proxies={'http': 'http://proxy.com:8080'}) # With proxy
|
|
82
|
+
"""
|
|
83
|
+
self.session = requests.Session()
|
|
84
|
+
self.api_endpoint = "https://api.webpilotai.com/rupee/v1/search"
|
|
85
|
+
self.timeout = timeout
|
|
86
|
+
self.last_response = {}
|
|
87
|
+
|
|
88
|
+
# The 'Bearer null' is part of the API's expected headers
|
|
89
|
+
self.headers = {
|
|
90
|
+
'Accept': 'application/json, text/plain, */*, text/event-stream',
|
|
91
|
+
'Content-Type': 'application/json;charset=UTF-8',
|
|
92
|
+
'Authorization': 'Bearer null',
|
|
93
|
+
'Origin': 'https://www.webpilot.ai',
|
|
94
|
+
'Referer': 'https://www.webpilot.ai/',
|
|
95
|
+
'User-Agent': LitAgent().random(),
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
self.session.headers.update(self.headers)
|
|
99
|
+
self.proxies = proxies
|
|
100
|
+
|
|
101
|
+
def search(
|
|
102
|
+
self,
|
|
103
|
+
prompt: str,
|
|
104
|
+
stream: bool = False,
|
|
105
|
+
raw: bool = False,
|
|
106
|
+
) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
|
|
107
|
+
"""Search using the webpilotai API and get AI-generated responses.
|
|
108
|
+
|
|
109
|
+
This method sends a search query to webpilotai and returns the AI-generated response.
|
|
110
|
+
It supports both streaming and non-streaming modes, as well as raw response format.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
prompt (str): The search query or prompt to send to the API.
|
|
114
|
+
stream (bool, optional): If True, yields response chunks as they arrive.
|
|
115
|
+
If False, returns complete response. Defaults to False.
|
|
116
|
+
raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
|
|
117
|
+
If False, returns Response objects that convert to text automatically.
|
|
118
|
+
Defaults to False.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
|
|
122
|
+
- If stream=False: Returns complete response as Response object
|
|
123
|
+
- If stream=True: Yields response chunks as either Dict or Response objects
|
|
124
|
+
|
|
125
|
+
Raises:
|
|
126
|
+
APIConnectionError: If the API request fails
|
|
127
|
+
|
|
128
|
+
Examples:
|
|
129
|
+
Basic search:
|
|
130
|
+
>>> ai = webpilotai()
|
|
131
|
+
>>> response = ai.search("What is Python?")
|
|
132
|
+
>>> print(response)
|
|
133
|
+
Python is a programming language...
|
|
134
|
+
|
|
135
|
+
Streaming response:
|
|
136
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
137
|
+
... print(chunk, end="")
|
|
138
|
+
Artificial Intelligence...
|
|
139
|
+
|
|
140
|
+
Raw response format:
|
|
141
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
142
|
+
... print(chunk)
|
|
143
|
+
{'text': 'Hello'}
|
|
144
|
+
{'text': ' there!'}
|
|
145
|
+
"""
|
|
146
|
+
payload = {
|
|
147
|
+
"q": prompt,
|
|
148
|
+
"threadId": "" # Empty for new search
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
def for_stream():
|
|
152
|
+
full_response_content = ""
|
|
153
|
+
current_event_name = None
|
|
154
|
+
current_data_buffer = []
|
|
155
|
+
|
|
156
|
+
try:
|
|
157
|
+
with self.session.post(
|
|
158
|
+
self.api_endpoint,
|
|
159
|
+
json=payload,
|
|
160
|
+
stream=True,
|
|
161
|
+
timeout=self.timeout,
|
|
162
|
+
proxies=self.proxies
|
|
163
|
+
) as response:
|
|
164
|
+
if not response.ok:
|
|
165
|
+
raise exceptions.APIConnectionError(
|
|
166
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Process the stream line by line
|
|
170
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
171
|
+
if not line: # Empty line indicates end of an event
|
|
172
|
+
if current_data_buffer:
|
|
173
|
+
# Process the completed event
|
|
174
|
+
full_data = "\n".join(current_data_buffer)
|
|
175
|
+
if current_event_name == "message":
|
|
176
|
+
try:
|
|
177
|
+
data_payload = json.loads(full_data)
|
|
178
|
+
# Check structure based on the API response
|
|
179
|
+
if data_payload.get('type') == 'data':
|
|
180
|
+
content_chunk = data_payload.get('data', {}).get('content', "")
|
|
181
|
+
if content_chunk:
|
|
182
|
+
full_response_content += content_chunk
|
|
183
|
+
|
|
184
|
+
# Yield the new content chunk
|
|
185
|
+
if raw:
|
|
186
|
+
yield {"text": content_chunk}
|
|
187
|
+
else:
|
|
188
|
+
yield Response(content_chunk)
|
|
189
|
+
except json.JSONDecodeError:
|
|
190
|
+
pass
|
|
191
|
+
except Exception as e:
|
|
192
|
+
# Handle exceptions gracefully in stream processing
|
|
193
|
+
pass
|
|
194
|
+
|
|
195
|
+
# Reset for the next event
|
|
196
|
+
current_event_name = None
|
|
197
|
+
current_data_buffer = []
|
|
198
|
+
continue
|
|
199
|
+
|
|
200
|
+
# Parse SSE fields
|
|
201
|
+
if line.startswith('event:'):
|
|
202
|
+
current_event_name = line[len('event:'):].strip()
|
|
203
|
+
elif line.startswith('data:'):
|
|
204
|
+
data_part = line[len('data:'):]
|
|
205
|
+
# Remove leading space if present (common in SSE)
|
|
206
|
+
if data_part.startswith(' '):
|
|
207
|
+
data_part = data_part[1:]
|
|
208
|
+
current_data_buffer.append(data_part)
|
|
209
|
+
|
|
210
|
+
# Process any remaining data in buffer if stream ended without blank line
|
|
211
|
+
if current_data_buffer and current_event_name == "message":
|
|
212
|
+
try:
|
|
213
|
+
full_data = "\n".join(current_data_buffer)
|
|
214
|
+
data_payload = json.loads(full_data)
|
|
215
|
+
if data_payload.get('type') == 'data':
|
|
216
|
+
content_chunk = data_payload.get('data', {}).get('content', "")
|
|
217
|
+
if content_chunk and len(content_chunk) > len(full_response_content):
|
|
218
|
+
delta = content_chunk[len(full_response_content):]
|
|
219
|
+
full_response_content += delta
|
|
220
|
+
|
|
221
|
+
if raw:
|
|
222
|
+
yield {"text": delta}
|
|
223
|
+
else:
|
|
224
|
+
yield Response(delta)
|
|
225
|
+
except (json.JSONDecodeError, Exception):
|
|
226
|
+
pass
|
|
227
|
+
|
|
228
|
+
except requests.exceptions.Timeout:
|
|
229
|
+
raise exceptions.APIConnectionError("Request timed out")
|
|
230
|
+
except requests.exceptions.RequestException as e:
|
|
231
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
232
|
+
|
|
233
|
+
def for_non_stream():
|
|
234
|
+
full_response = ""
|
|
235
|
+
for chunk in for_stream():
|
|
236
|
+
if raw:
|
|
237
|
+
yield chunk
|
|
238
|
+
else:
|
|
239
|
+
full_response += str(chunk)
|
|
240
|
+
|
|
241
|
+
if not raw:
|
|
242
|
+
# Format the response for better readability
|
|
243
|
+
formatted_response = self.format_response(full_response)
|
|
244
|
+
self.last_response = Response(formatted_response)
|
|
245
|
+
return self.last_response
|
|
246
|
+
|
|
247
|
+
return for_stream() if stream else for_non_stream()
|
|
248
|
+
|
|
249
|
+
@staticmethod
|
|
250
|
+
def format_response(text: str) -> str:
|
|
251
|
+
"""Format the response text for better readability.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
text (str): The raw response text
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
str: Formatted text with improved structure
|
|
258
|
+
"""
|
|
259
|
+
# Clean up formatting
|
|
260
|
+
# Remove excessive newlines
|
|
261
|
+
clean_text = re.sub(r'\n{3,}', '\n\n', text)
|
|
262
|
+
|
|
263
|
+
# Ensure consistent spacing around sections
|
|
264
|
+
clean_text = re.sub(r'([.!?])\s*\n\s*([A-Z])', r'\1\n\n\2', clean_text)
|
|
265
|
+
|
|
266
|
+
# Clean up any leftover HTML or markdown artifacts
|
|
267
|
+
clean_text = re.sub(r'<[^>]*>', '', clean_text)
|
|
268
|
+
|
|
269
|
+
# Remove trailing whitespace on each line
|
|
270
|
+
clean_text = '\n'.join(line.rstrip() for line in clean_text.split('\n'))
|
|
271
|
+
|
|
272
|
+
return clean_text.strip()
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
if __name__ == "__main__":
|
|
276
|
+
from rich import print
|
|
277
|
+
|
|
278
|
+
ai = webpilotai()
|
|
279
|
+
response = ai.search(input(">>> "), stream=True, raw=False)
|
|
280
|
+
for chunk in response:
|
|
281
|
+
print(chunk, end="", flush=True)
|
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
from curl_cffi import CurlError
|
|
2
|
+
from curl_cffi.requests import Session
|
|
3
|
+
import json
|
|
4
|
+
import uuid
|
|
5
|
+
import time
|
|
6
|
+
import hashlib
|
|
7
|
+
from typing import Any, Dict, Optional, Generator, Union
|
|
8
|
+
|
|
9
|
+
from webscout.AIutel import Optimizers
|
|
10
|
+
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
11
|
+
from webscout.AIutel import AwesomePrompts
|
|
12
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
13
|
+
from webscout import exceptions
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
|
+
|
|
16
|
+
class Aitopia(Provider):
|
|
17
|
+
"""
|
|
18
|
+
A class to interact with the Aitopia API with LitAgent user-agent.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
AVAILABLE_MODELS = [
|
|
22
|
+
"Claude 3 Haiku",
|
|
23
|
+
"GPT-4o Mini",
|
|
24
|
+
"Gemini 1.5 Flash",
|
|
25
|
+
"Llama 3.1 70B"
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
is_conversation: bool = True,
|
|
31
|
+
max_tokens: int = 2049,
|
|
32
|
+
timeout: int = 30,
|
|
33
|
+
intro: str = None,
|
|
34
|
+
filepath: str = None,
|
|
35
|
+
update_file: bool = True,
|
|
36
|
+
proxies: dict = {},
|
|
37
|
+
history_offset: int = 10250,
|
|
38
|
+
act: str = None,
|
|
39
|
+
model: str = "Claude 3 Haiku",
|
|
40
|
+
browser: str = "chrome"
|
|
41
|
+
):
|
|
42
|
+
"""Initializes the Aitopia API client."""
|
|
43
|
+
if model not in self.AVAILABLE_MODELS:
|
|
44
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
45
|
+
|
|
46
|
+
self.url = "https://extensions.aitopia.ai/ai/send"
|
|
47
|
+
|
|
48
|
+
# Initialize LitAgent for user agent generation
|
|
49
|
+
self.agent = LitAgent()
|
|
50
|
+
# Use fingerprinting to create a consistent browser identity
|
|
51
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
52
|
+
|
|
53
|
+
# Use the fingerprint for headers
|
|
54
|
+
self.headers = {
|
|
55
|
+
"accept": "text/plain",
|
|
56
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
57
|
+
"content-type": "text/plain;charset=UTF-8",
|
|
58
|
+
"dnt": "1",
|
|
59
|
+
"origin": "https://chat.aitopia.ai",
|
|
60
|
+
"priority": "u=1, i",
|
|
61
|
+
"referer": "https://chat.aitopia.ai/",
|
|
62
|
+
"sec-ch-ua": self.fingerprint["sec_ch_ua"] or '"Chromium";v="134", "Not:A-Brand";v="24", "Microsoft Edge";v="134"',
|
|
63
|
+
"sec-ch-ua-mobile": "?0",
|
|
64
|
+
"sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
|
|
65
|
+
"sec-fetch-dest": "empty",
|
|
66
|
+
"sec-fetch-mode": "cors",
|
|
67
|
+
"sec-fetch-site": "same-site",
|
|
68
|
+
"user-agent": self.fingerprint["user_agent"]
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
self.session = Session() # Use curl_cffi Session
|
|
72
|
+
self.session.headers.update(self.headers)
|
|
73
|
+
self.session.proxies = proxies # Assign proxies directly
|
|
74
|
+
|
|
75
|
+
self.is_conversation = is_conversation
|
|
76
|
+
self.max_tokens_to_sample = max_tokens
|
|
77
|
+
self.timeout = timeout
|
|
78
|
+
self.last_response = {}
|
|
79
|
+
self.model = model
|
|
80
|
+
|
|
81
|
+
self.__available_optimizers = (
|
|
82
|
+
method
|
|
83
|
+
for method in dir(Optimizers)
|
|
84
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
85
|
+
)
|
|
86
|
+
Conversation.intro = (
|
|
87
|
+
AwesomePrompts().get_act(
|
|
88
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
89
|
+
)
|
|
90
|
+
if act
|
|
91
|
+
else intro or Conversation.intro
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
self.conversation = Conversation(
|
|
95
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
96
|
+
)
|
|
97
|
+
self.conversation.history_offset = history_offset
|
|
98
|
+
|
|
99
|
+
def refresh_identity(self, browser: str = None):
|
|
100
|
+
"""
|
|
101
|
+
Refreshes the browser identity fingerprint.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
browser: Specific browser to use for the new fingerprint
|
|
105
|
+
"""
|
|
106
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
107
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
108
|
+
|
|
109
|
+
# Update headers with new fingerprint
|
|
110
|
+
self.headers.update({
|
|
111
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
112
|
+
"sec-ch-ua": self.fingerprint["sec_ch_ua"] or self.headers["sec-ch-ua"],
|
|
113
|
+
"sec-ch-ua-platform": f'"{self.fingerprint["platform"]}"',
|
|
114
|
+
"user-agent": self.fingerprint["user_agent"],
|
|
115
|
+
})
|
|
116
|
+
|
|
117
|
+
# Update session headers
|
|
118
|
+
for header, value in self.headers.items():
|
|
119
|
+
self.session.headers[header] = value
|
|
120
|
+
|
|
121
|
+
return self.fingerprint
|
|
122
|
+
|
|
123
|
+
def generate_uuid_search(self):
|
|
124
|
+
"""Generate a UUID and convert to base64-like string."""
|
|
125
|
+
uuid_str = str(uuid.uuid4())
|
|
126
|
+
return uuid_str.replace('-', '')
|
|
127
|
+
|
|
128
|
+
def generate_hopekey(self):
|
|
129
|
+
"""Generate a random string and hash it."""
|
|
130
|
+
random_str = str(uuid.uuid4()) + str(time.time())
|
|
131
|
+
return hashlib.md5(random_str.encode()).hexdigest()
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def _aitopia_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
135
|
+
"""Extracts content from Aitopia stream JSON objects."""
|
|
136
|
+
if isinstance(chunk, dict):
|
|
137
|
+
# Handle Claude 3 Haiku response format
|
|
138
|
+
if "delta" in chunk and "text" in chunk["delta"]:
|
|
139
|
+
return chunk["delta"]["text"]
|
|
140
|
+
# Handle GPT-4o Mini response format
|
|
141
|
+
elif "choices" in chunk and "0" in chunk["choices"]:
|
|
142
|
+
return chunk["choices"]["0"]["delta"].get("content")
|
|
143
|
+
# Add other potential formats here if needed
|
|
144
|
+
return None
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def ask(
|
|
148
|
+
self,
|
|
149
|
+
prompt: str,
|
|
150
|
+
stream: bool = False,
|
|
151
|
+
raw: bool = False,
|
|
152
|
+
optimizer: str = None,
|
|
153
|
+
conversationally: bool = False,
|
|
154
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
155
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
156
|
+
if optimizer:
|
|
157
|
+
if optimizer in self.__available_optimizers:
|
|
158
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
159
|
+
conversation_prompt if conversationally else prompt
|
|
160
|
+
)
|
|
161
|
+
else:
|
|
162
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
163
|
+
|
|
164
|
+
# Generate hopekey and update headers
|
|
165
|
+
hopekey = self.generate_hopekey()
|
|
166
|
+
self.headers["hopekey"] = hopekey
|
|
167
|
+
|
|
168
|
+
# Default history if none provided
|
|
169
|
+
history = [
|
|
170
|
+
{
|
|
171
|
+
"item": "Hello, how can I help you today?",
|
|
172
|
+
"role": "assistant",
|
|
173
|
+
# "model": "GPT-4o Mini"
|
|
174
|
+
}
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
# Generate current timestamp for chat_id
|
|
178
|
+
current_time = int(time.time() * 1000)
|
|
179
|
+
|
|
180
|
+
# Request payload
|
|
181
|
+
payload = {
|
|
182
|
+
"history": history,
|
|
183
|
+
"text": conversation_prompt,
|
|
184
|
+
"model": self.model,
|
|
185
|
+
"stream": stream,
|
|
186
|
+
"uuid_search": self.generate_uuid_search(),
|
|
187
|
+
"mode": "ai_chat",
|
|
188
|
+
"prompt_mode": False,
|
|
189
|
+
"extra_key": "__all",
|
|
190
|
+
"extra_data": {"prompt_mode": False},
|
|
191
|
+
"chat_id": current_time,
|
|
192
|
+
"language_detail": {
|
|
193
|
+
"lang_code": "en",
|
|
194
|
+
"name": "English",
|
|
195
|
+
"title": "English"
|
|
196
|
+
},
|
|
197
|
+
"is_continue": False,
|
|
198
|
+
"lang_code": "en"
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
def for_stream():
|
|
202
|
+
streaming_text = "" # Initialize outside try block
|
|
203
|
+
try:
|
|
204
|
+
response = self.session.post(
|
|
205
|
+
self.url, headers=self.headers, json=payload, stream=True, timeout=self.timeout,
|
|
206
|
+
impersonate="chrome120" # Add impersonate
|
|
207
|
+
)
|
|
208
|
+
response.raise_for_status()
|
|
209
|
+
|
|
210
|
+
# Use sanitize_stream
|
|
211
|
+
processed_stream = sanitize_stream(
|
|
212
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
213
|
+
intro_value="data:",
|
|
214
|
+
to_json=True, # Stream sends JSON
|
|
215
|
+
skip_markers=["[DONE]"],
|
|
216
|
+
content_extractor=self._aitopia_extractor, # Use the specific extractor
|
|
217
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
for content_chunk in processed_stream:
|
|
221
|
+
# content_chunk is the string extracted by _aitopia_extractor
|
|
222
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
223
|
+
streaming_text += content_chunk
|
|
224
|
+
resp = dict(text=content_chunk)
|
|
225
|
+
yield resp if not raw else content_chunk
|
|
226
|
+
|
|
227
|
+
except CurlError as e:
|
|
228
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
229
|
+
except Exception as e:
|
|
230
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
231
|
+
finally:
|
|
232
|
+
# Update history after stream finishes or fails
|
|
233
|
+
if streaming_text:
|
|
234
|
+
self.last_response = {"text": streaming_text}
|
|
235
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
236
|
+
|
|
237
|
+
def for_non_stream():
|
|
238
|
+
try:
|
|
239
|
+
response = self.session.post(
|
|
240
|
+
self.url, headers=self.headers, json=payload, timeout=self.timeout,
|
|
241
|
+
impersonate="chrome120" # Add impersonate
|
|
242
|
+
)
|
|
243
|
+
response.raise_for_status()
|
|
244
|
+
|
|
245
|
+
response_text_raw = response.text # Get raw text
|
|
246
|
+
|
|
247
|
+
# Use sanitize_stream to parse the non-streaming JSON response
|
|
248
|
+
# Assuming non-stream uses the GPT format based on original code
|
|
249
|
+
processed_stream = sanitize_stream(
|
|
250
|
+
data=response_text_raw,
|
|
251
|
+
to_json=True, # Parse the whole text as JSON
|
|
252
|
+
intro_value=None,
|
|
253
|
+
content_extractor=lambda chunk: chunk.get("choices", [{}])[0].get("message", {}).get("content") if isinstance(chunk, dict) else None,
|
|
254
|
+
yield_raw_on_error=False
|
|
255
|
+
)
|
|
256
|
+
# Extract the single result
|
|
257
|
+
content = next(processed_stream, None)
|
|
258
|
+
content = content if isinstance(content, str) else "" # Ensure it's a string
|
|
259
|
+
|
|
260
|
+
if content: # Check if content was successfully extracted
|
|
261
|
+
self.last_response = {"text": content}
|
|
262
|
+
self.conversation.update_chat_history(prompt, content)
|
|
263
|
+
return {"text": content}
|
|
264
|
+
else:
|
|
265
|
+
raise exceptions.FailedToGenerateResponseError("No response content found or failed to parse")
|
|
266
|
+
except CurlError as e:
|
|
267
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
268
|
+
except Exception as e:
|
|
269
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
270
|
+
|
|
271
|
+
return for_stream() if stream else for_non_stream()
|
|
272
|
+
|
|
273
|
+
def chat(
|
|
274
|
+
self,
|
|
275
|
+
prompt: str,
|
|
276
|
+
stream: bool = False,
|
|
277
|
+
optimizer: str = None,
|
|
278
|
+
conversationally: bool = False,
|
|
279
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
280
|
+
def for_stream():
|
|
281
|
+
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
282
|
+
yield self.get_message(response)
|
|
283
|
+
def for_non_stream():
|
|
284
|
+
return self.get_message(
|
|
285
|
+
self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
286
|
+
)
|
|
287
|
+
return for_stream() if stream else for_non_stream()
|
|
288
|
+
|
|
289
|
+
def get_message(self, response: dict) -> str:
|
|
290
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
291
|
+
return response["text"]
|
|
292
|
+
|
|
293
|
+
if __name__ == "__main__":
|
|
294
|
+
print("-" * 80)
|
|
295
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
296
|
+
print("-" * 80)
|
|
297
|
+
|
|
298
|
+
for model in Aitopia.AVAILABLE_MODELS:
|
|
299
|
+
try:
|
|
300
|
+
test_ai = Aitopia(model=model, timeout=60)
|
|
301
|
+
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
302
|
+
response_text = ""
|
|
303
|
+
for chunk in response:
|
|
304
|
+
response_text += chunk
|
|
305
|
+
|
|
306
|
+
if response_text and len(response_text.strip()) > 0:
|
|
307
|
+
status = "✓"
|
|
308
|
+
# Clean and truncate response
|
|
309
|
+
clean_text = response_text.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
310
|
+
display_text = clean_text[:50] + "..." if len(clean_text) > 50 else clean_text
|
|
311
|
+
else:
|
|
312
|
+
status = "✗"
|
|
313
|
+
display_text = "Empty or invalid response"
|
|
314
|
+
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
315
|
+
except Exception as e:
|
|
316
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|