webscout 8.2.7__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -249
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Litlogger/__init__.py +67 -0
- webscout/Litlogger/core/__init__.py +6 -0
- webscout/Litlogger/core/level.py +23 -0
- webscout/Litlogger/core/logger.py +165 -0
- webscout/Litlogger/handlers/__init__.py +12 -0
- webscout/Litlogger/handlers/console.py +33 -0
- webscout/Litlogger/handlers/file.py +143 -0
- webscout/Litlogger/handlers/network.py +173 -0
- webscout/Litlogger/styles/__init__.py +7 -0
- webscout/Litlogger/styles/colors.py +249 -0
- webscout/Litlogger/styles/formats.py +458 -0
- webscout/Litlogger/styles/text.py +87 -0
- webscout/Litlogger/utils/__init__.py +6 -0
- webscout/Litlogger/utils/detectors.py +153 -0
- webscout/Litlogger/utils/formatters.py +200 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +228 -0
- webscout/Provider/AISEARCH/genspark_search.py +350 -0
- webscout/Provider/AISEARCH/hika_search.py +198 -0
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +324 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +673 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +370 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +285 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +36 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +488 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +319 -0
- webscout/Provider/OPENAI/e2b.py +1356 -0
- webscout/Provider/OPENAI/exaai.py +411 -0
- webscout/Provider/OPENAI/exachat.py +443 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +323 -0
- webscout/Provider/OPENAI/groq.py +361 -0
- webscout/Provider/OPENAI/heckai.py +307 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +383 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +356 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/scirachat.py +471 -0
- webscout/Provider/OPENAI/sonus.py +303 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -0
- webscout/Provider/OPENAI/uncovrAI.py +462 -0
- webscout/Provider/OPENAI/utils.py +307 -0
- webscout/Provider/OPENAI/venice.py +425 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +378 -0
- webscout/Provider/OPENAI/yep.py +356 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/AiForce/__init__.py +22 -0
- webscout/Provider/TTI/AiForce/async_aiforce.py +224 -0
- webscout/Provider/TTI/AiForce/sync_aiforce.py +245 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -0
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +181 -0
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +180 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/ImgSys/__init__.py +23 -0
- webscout/Provider/TTI/ImgSys/async_imgsys.py +202 -0
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +195 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/Nexra/__init__.py +22 -0
- webscout/Provider/TTI/Nexra/async_nexra.py +286 -0
- webscout/Provider/TTI/Nexra/sync_nexra.py +258 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/PollinationsAI/__init__.py +23 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +311 -0
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +265 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/__init__.py +12 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/artbit/__init__.py +22 -0
- webscout/Provider/TTI/artbit/async_artbit.py +155 -0
- webscout/Provider/TTI/artbit/sync_artbit.py +148 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +261 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +252 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/huggingface/__init__.py +22 -0
- webscout/Provider/TTI/huggingface/async_huggingface.py +199 -0
- webscout/Provider/TTI/huggingface/sync_huggingface.py +195 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/piclumen/__init__.py +23 -0
- webscout/Provider/TTI/piclumen/async_piclumen.py +268 -0
- webscout/Provider/TTI/piclumen/sync_piclumen.py +233 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/pixelmuse/__init__.py +4 -0
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +249 -0
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +182 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTI/talkai/__init__.py +4 -0
- webscout/Provider/TTI/talkai/async_talkai.py +229 -0
- webscout/Provider/TTI/talkai/sync_talkai.py +207 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +9 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +280 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +172 -0
- webscout/Provider/ai4chat.py +149 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/scout/README.md +402 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +568 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +460 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +262 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/METADATA +115 -60
- webscout-8.2.8.dist-info/RECORD +334 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import re
|
|
3
|
+
import traceback
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any, Dict, Optional, Union
|
|
6
|
+
|
|
7
|
+
class MessageFormatter:
|
|
8
|
+
"""Utility class for formatting log messages."""
|
|
9
|
+
|
|
10
|
+
@staticmethod
|
|
11
|
+
def format_exception(exc_info: tuple) -> str:
|
|
12
|
+
"""
|
|
13
|
+
Format exception information into a readable string.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
exc_info: Tuple of (type, value, traceback)
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
Formatted exception string
|
|
20
|
+
"""
|
|
21
|
+
return "".join(traceback.format_exception(*exc_info))
|
|
22
|
+
|
|
23
|
+
@staticmethod
|
|
24
|
+
def format_dict(data: Dict[str, Any], indent: int = 2) -> str:
|
|
25
|
+
"""
|
|
26
|
+
Format dictionary into pretty-printed string.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
data: Dictionary to format
|
|
30
|
+
indent: Number of spaces for indentation
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
Formatted string representation
|
|
34
|
+
"""
|
|
35
|
+
return json.dumps(data, indent=indent, default=str, ensure_ascii=False)
|
|
36
|
+
|
|
37
|
+
@staticmethod
|
|
38
|
+
def format_object(obj: Any) -> str:
|
|
39
|
+
"""
|
|
40
|
+
Format any object into a string representation.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
obj: Object to format
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
String representation of object
|
|
47
|
+
"""
|
|
48
|
+
if hasattr(obj, "to_dict"):
|
|
49
|
+
return MessageFormatter.format_dict(obj.to_dict())
|
|
50
|
+
if hasattr(obj, "__dict__"):
|
|
51
|
+
return MessageFormatter.format_dict(obj.__dict__)
|
|
52
|
+
return str(obj)
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def truncate(message: str, max_length: int = 1000, suffix: str = "...") -> str:
|
|
56
|
+
"""
|
|
57
|
+
Truncate message to maximum length.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
message: Message to truncate
|
|
61
|
+
max_length: Maximum length
|
|
62
|
+
suffix: String to append when truncated
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Truncated message
|
|
66
|
+
"""
|
|
67
|
+
if len(message) <= max_length:
|
|
68
|
+
return message
|
|
69
|
+
return message[:max_length - len(suffix)] + suffix
|
|
70
|
+
|
|
71
|
+
@staticmethod
|
|
72
|
+
def mask_sensitive(message: str, patterns: Dict[str, str]) -> str:
|
|
73
|
+
"""
|
|
74
|
+
Mask sensitive information in message.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
message: Message to process
|
|
78
|
+
patterns: Dictionary of {pattern: mask}
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Message with sensitive info masked
|
|
82
|
+
"""
|
|
83
|
+
result = message
|
|
84
|
+
for pattern, mask in patterns.items():
|
|
85
|
+
result = re.sub(pattern, mask, result)
|
|
86
|
+
return result
|
|
87
|
+
|
|
88
|
+
@staticmethod
|
|
89
|
+
def format_context(context: Dict[str, Any]) -> str:
|
|
90
|
+
"""
|
|
91
|
+
Format context dictionary into readable string.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
context: Context dictionary
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Formatted context string
|
|
98
|
+
"""
|
|
99
|
+
parts = []
|
|
100
|
+
for key, value in sorted(context.items()):
|
|
101
|
+
formatted_value = (
|
|
102
|
+
MessageFormatter.format_object(value)
|
|
103
|
+
if isinstance(value, (dict, list, tuple))
|
|
104
|
+
else str(value)
|
|
105
|
+
)
|
|
106
|
+
parts.append(f"{key}={formatted_value}")
|
|
107
|
+
return " ".join(parts)
|
|
108
|
+
|
|
109
|
+
@staticmethod
|
|
110
|
+
def format_metrics(metrics: Dict[str, Union[int, float]]) -> str:
|
|
111
|
+
"""
|
|
112
|
+
Format performance metrics into readable string.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
metrics: Dictionary of metric names and values
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
Formatted metrics string
|
|
119
|
+
"""
|
|
120
|
+
parts = []
|
|
121
|
+
for key, value in sorted(metrics.items()):
|
|
122
|
+
if isinstance(value, float):
|
|
123
|
+
formatted = f"{value:.3f}"
|
|
124
|
+
else:
|
|
125
|
+
formatted = str(value)
|
|
126
|
+
parts.append(f"{key}={formatted}")
|
|
127
|
+
return " ".join(parts)
|
|
128
|
+
|
|
129
|
+
@staticmethod
|
|
130
|
+
def format_timestamp(
|
|
131
|
+
timestamp: Optional[datetime] = None,
|
|
132
|
+
format: str = "%Y-%m-%d %H:%M:%S.%f"
|
|
133
|
+
) -> str:
|
|
134
|
+
"""
|
|
135
|
+
Format timestamp into string.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
timestamp: Datetime object (uses current time if None)
|
|
139
|
+
format: strftime format string
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Formatted timestamp string
|
|
143
|
+
"""
|
|
144
|
+
if timestamp is None:
|
|
145
|
+
timestamp = datetime.now()
|
|
146
|
+
return timestamp.strftime(format)
|
|
147
|
+
|
|
148
|
+
@classmethod
|
|
149
|
+
def format_message(
|
|
150
|
+
cls,
|
|
151
|
+
message: str,
|
|
152
|
+
context: Optional[Dict[str, Any]] = None,
|
|
153
|
+
metrics: Optional[Dict[str, Union[int, float]]] = None,
|
|
154
|
+
max_length: Optional[int] = None,
|
|
155
|
+
mask_patterns: Optional[Dict[str, str]] = None,
|
|
156
|
+
timestamp_format: Optional[str] = None
|
|
157
|
+
) -> str:
|
|
158
|
+
"""
|
|
159
|
+
Format complete log message with all options.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
message: Base message
|
|
163
|
+
context: Optional context dictionary
|
|
164
|
+
metrics: Optional performance metrics
|
|
165
|
+
max_length: Optional maximum length
|
|
166
|
+
mask_patterns: Optional sensitive data patterns
|
|
167
|
+
timestamp_format: Optional timestamp format
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
Formatted complete message
|
|
171
|
+
"""
|
|
172
|
+
parts = []
|
|
173
|
+
|
|
174
|
+
# Add timestamp
|
|
175
|
+
if timestamp_format:
|
|
176
|
+
parts.append(cls.format_timestamp(format=timestamp_format))
|
|
177
|
+
|
|
178
|
+
# Add main message
|
|
179
|
+
parts.append(message)
|
|
180
|
+
|
|
181
|
+
# Add context if present
|
|
182
|
+
if context:
|
|
183
|
+
parts.append(cls.format_context(context))
|
|
184
|
+
|
|
185
|
+
# Add metrics if present
|
|
186
|
+
if metrics:
|
|
187
|
+
parts.append(cls.format_metrics(metrics))
|
|
188
|
+
|
|
189
|
+
# Join all parts
|
|
190
|
+
result = " ".join(parts)
|
|
191
|
+
|
|
192
|
+
# Apply masking if needed
|
|
193
|
+
if mask_patterns:
|
|
194
|
+
result = cls.mask_sensitive(result, mask_patterns)
|
|
195
|
+
|
|
196
|
+
# Apply length limit if needed
|
|
197
|
+
if max_length:
|
|
198
|
+
result = cls.truncate(result, max_length)
|
|
199
|
+
|
|
200
|
+
return result
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Union, Dict, Any
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
|
|
11
|
+
class AI21(Provider):
|
|
12
|
+
"""
|
|
13
|
+
A class to interact with the AI21 Studio API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
api_key: str,
|
|
19
|
+
model: str = "jamba-1.5-large",
|
|
20
|
+
max_tokens: int = 1024,
|
|
21
|
+
temperature: float = 0.4,
|
|
22
|
+
top_p: float = 1,
|
|
23
|
+
is_conversation: bool = True,
|
|
24
|
+
timeout: int = 30,
|
|
25
|
+
intro: str = None,
|
|
26
|
+
filepath: str = None,
|
|
27
|
+
update_file: bool = True,
|
|
28
|
+
proxies: dict = {},
|
|
29
|
+
history_offset: int = 10250,
|
|
30
|
+
act: str = None,
|
|
31
|
+
system_prompt: str = "You are a helpful and informative AI assistant."
|
|
32
|
+
):
|
|
33
|
+
"""
|
|
34
|
+
Initializes the AI21 Studio API with given parameters.
|
|
35
|
+
"""
|
|
36
|
+
self.api_key = api_key
|
|
37
|
+
self.api_endpoint = "https://api.ai21.com/studio/v1/chat/completions"
|
|
38
|
+
self.model = model
|
|
39
|
+
self.max_tokens = max_tokens
|
|
40
|
+
self.temperature = temperature
|
|
41
|
+
self.top_p = top_p
|
|
42
|
+
self.system_prompt = system_prompt
|
|
43
|
+
self.session = requests.Session()
|
|
44
|
+
self.is_conversation = is_conversation
|
|
45
|
+
self.max_tokens_to_sample = max_tokens
|
|
46
|
+
self.timeout = timeout
|
|
47
|
+
self.last_response = {}
|
|
48
|
+
self.headers = {
|
|
49
|
+
'Accept': 'application/json, text/plain, */*',
|
|
50
|
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
51
|
+
'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
52
|
+
'Authorization': f"Bearer {self.api_key}",
|
|
53
|
+
'Content-Type': 'application/json',
|
|
54
|
+
'DNT': '1',
|
|
55
|
+
'Origin': 'https://studio.ai21.com',
|
|
56
|
+
'Referer': 'https://studio.ai21.com/',
|
|
57
|
+
'Sec-CH-UA': '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
58
|
+
'Sec-CH-UA-Mobile': '?0',
|
|
59
|
+
'Sec-CH-UA-Platform': '"Windows"',
|
|
60
|
+
'Sec-Fetch-Dest': 'empty',
|
|
61
|
+
'Sec-Fetch-Mode': 'cors',
|
|
62
|
+
'Sec-Fetch-Site': 'same-site',
|
|
63
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0',
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
self.__available_optimizers = (
|
|
67
|
+
method
|
|
68
|
+
for method in dir(Optimizers)
|
|
69
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
70
|
+
)
|
|
71
|
+
self.session.headers.update(self.headers)
|
|
72
|
+
Conversation.intro = (
|
|
73
|
+
AwesomePrompts().get_act(
|
|
74
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
75
|
+
)
|
|
76
|
+
if act
|
|
77
|
+
else intro or Conversation.intro
|
|
78
|
+
)
|
|
79
|
+
self.conversation = Conversation(
|
|
80
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
81
|
+
)
|
|
82
|
+
self.conversation.history_offset = history_offset
|
|
83
|
+
self.session.proxies = proxies
|
|
84
|
+
|
|
85
|
+
def ask(
|
|
86
|
+
self,
|
|
87
|
+
prompt: str,
|
|
88
|
+
stream: bool = False,
|
|
89
|
+
raw: bool = False,
|
|
90
|
+
optimizer: str = None,
|
|
91
|
+
conversationally: bool = False,
|
|
92
|
+
) -> Dict[str, Any]:
|
|
93
|
+
"""
|
|
94
|
+
Sends a prompt to the AI21 Studio API and returns the response.
|
|
95
|
+
"""
|
|
96
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
97
|
+
if optimizer:
|
|
98
|
+
if optimizer in self.__available_optimizers:
|
|
99
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
100
|
+
conversation_prompt if conversationally else prompt
|
|
101
|
+
)
|
|
102
|
+
else:
|
|
103
|
+
raise Exception(
|
|
104
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
payload = {
|
|
108
|
+
"messages": [
|
|
109
|
+
{"role": "system", "content": self.system_prompt},
|
|
110
|
+
{"role": "user", "content": conversation_prompt}
|
|
111
|
+
],
|
|
112
|
+
"n": 1,
|
|
113
|
+
"max_tokens": self.max_tokens,
|
|
114
|
+
"model": self.model,
|
|
115
|
+
"stop": [],
|
|
116
|
+
"temperature": self.temperature,
|
|
117
|
+
"top_p": self.top_p,
|
|
118
|
+
"documents": [],
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, timeout=self.timeout)
|
|
122
|
+
|
|
123
|
+
if not response.ok:
|
|
124
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
125
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
resp = response.json()
|
|
129
|
+
self.last_response.update(resp)
|
|
130
|
+
self.conversation.update_chat_history(
|
|
131
|
+
prompt, self.get_message(self.last_response)
|
|
132
|
+
)
|
|
133
|
+
return self.last_response
|
|
134
|
+
|
|
135
|
+
def chat(
|
|
136
|
+
self,
|
|
137
|
+
prompt: str,
|
|
138
|
+
stream: bool = False,
|
|
139
|
+
optimizer: str = None,
|
|
140
|
+
conversationally: bool = False,
|
|
141
|
+
) -> str:
|
|
142
|
+
"""
|
|
143
|
+
Generates a response from the AI21 API.
|
|
144
|
+
"""
|
|
145
|
+
|
|
146
|
+
def for_stream():
|
|
147
|
+
for response in self.ask(
|
|
148
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
149
|
+
):
|
|
150
|
+
yield self.get_message(response)
|
|
151
|
+
|
|
152
|
+
def for_non_stream():
|
|
153
|
+
return self.get_message(
|
|
154
|
+
self.ask(
|
|
155
|
+
prompt,
|
|
156
|
+
False,
|
|
157
|
+
optimizer=optimizer,
|
|
158
|
+
conversationally=conversationally,
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
return for_stream() if stream else for_non_stream()
|
|
163
|
+
|
|
164
|
+
def get_message(self, response: dict) -> str:
|
|
165
|
+
"""
|
|
166
|
+
Extracts the message from the API response.
|
|
167
|
+
"""
|
|
168
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
169
|
+
return response['choices'][0]['message']['content']
|
|
170
|
+
|
|
171
|
+
# Example usage
|
|
172
|
+
if __name__ == "__main__":
|
|
173
|
+
from rich import print
|
|
174
|
+
ai = AI21(api_key="api_key")
|
|
175
|
+
response = ai.chat(input(">>> "))
|
|
176
|
+
for line in response:
|
|
177
|
+
print(line, end="", flush=True)
|
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
from uuid import uuid4
|
|
2
|
+
import requests
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Dict, Generator, Optional, Union
|
|
5
|
+
|
|
6
|
+
from webscout.AIbase import AISearch
|
|
7
|
+
from webscout import exceptions
|
|
8
|
+
from webscout.litagent import LitAgent
|
|
9
|
+
|
|
10
|
+
class Response:
|
|
11
|
+
"""A wrapper class for DeepFind API responses.
|
|
12
|
+
|
|
13
|
+
This class automatically converts response objects to their text representation
|
|
14
|
+
when printed or converted to string.
|
|
15
|
+
|
|
16
|
+
Attributes:
|
|
17
|
+
text (str): The text content of the response
|
|
18
|
+
|
|
19
|
+
Example:
|
|
20
|
+
>>> response = Response("Hello, world!")
|
|
21
|
+
>>> print(response)
|
|
22
|
+
Hello, world!
|
|
23
|
+
>>> str(response)
|
|
24
|
+
'Hello, world!'
|
|
25
|
+
"""
|
|
26
|
+
def __init__(self, text: str):
|
|
27
|
+
self.text = text
|
|
28
|
+
|
|
29
|
+
def __str__(self):
|
|
30
|
+
return self.text
|
|
31
|
+
|
|
32
|
+
def __repr__(self):
|
|
33
|
+
return self.text
|
|
34
|
+
|
|
35
|
+
class DeepFind(AISearch):
|
|
36
|
+
"""A class to interact with the DeepFind AI search API.
|
|
37
|
+
|
|
38
|
+
DeepFind provides a powerful search interface that returns AI-generated responses
|
|
39
|
+
based on web content. It supports both streaming and non-streaming responses.
|
|
40
|
+
|
|
41
|
+
Basic Usage:
|
|
42
|
+
>>> from webscout import DeepFind
|
|
43
|
+
>>> ai = DeepFind()
|
|
44
|
+
>>> # Non-streaming example
|
|
45
|
+
>>> response = ai.search("What is Python?")
|
|
46
|
+
>>> print(response)
|
|
47
|
+
Python is a high-level programming language...
|
|
48
|
+
|
|
49
|
+
>>> # Streaming example
|
|
50
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
51
|
+
... print(chunk, end="", flush=True)
|
|
52
|
+
Artificial Intelligence is...
|
|
53
|
+
|
|
54
|
+
>>> # Raw response format
|
|
55
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
56
|
+
... print(chunk)
|
|
57
|
+
{'text': 'Hello'}
|
|
58
|
+
{'text': ' there!'}
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 30.
|
|
62
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
63
|
+
|
|
64
|
+
Attributes:
|
|
65
|
+
api_endpoint (str): The DeepFind API endpoint URL.
|
|
66
|
+
stream_chunk_size (int): Size of chunks when streaming responses.
|
|
67
|
+
timeout (int): Request timeout in seconds.
|
|
68
|
+
headers (dict): HTTP headers used in requests.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def __init__(
|
|
72
|
+
self,
|
|
73
|
+
timeout: int = 30,
|
|
74
|
+
proxies: Optional[dict] = None,
|
|
75
|
+
):
|
|
76
|
+
"""Initialize the DeepFind API client.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
timeout (int, optional): Request timeout in seconds. Defaults to 30.
|
|
80
|
+
proxies (dict, optional): Proxy configuration for requests. Defaults to None.
|
|
81
|
+
|
|
82
|
+
Example:
|
|
83
|
+
>>> ai = DeepFind(timeout=60) # Longer timeout
|
|
84
|
+
>>> ai = DeepFind(proxies={'http': 'http://proxy.com:8080'}) # With proxy
|
|
85
|
+
"""
|
|
86
|
+
self.session = requests.Session()
|
|
87
|
+
self.stream_chunk_size = 1024
|
|
88
|
+
self.timeout = timeout
|
|
89
|
+
self.last_response = {}
|
|
90
|
+
self.proxies = proxies
|
|
91
|
+
# Headers will be set per request, as conversationId is dynamic
|
|
92
|
+
|
|
93
|
+
def search(
|
|
94
|
+
self,
|
|
95
|
+
prompt: str,
|
|
96
|
+
stream: bool = False,
|
|
97
|
+
raw: bool = False,
|
|
98
|
+
) -> Union[Response, Generator[Union[Dict[str, str], Response], None, None]]:
|
|
99
|
+
"""Search using the DeepFind API and get AI-generated responses.
|
|
100
|
+
|
|
101
|
+
This method sends a search query to DeepFind and returns the AI-generated response.
|
|
102
|
+
It supports both streaming and non-streaming modes, as well as raw response format.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
prompt (str): The search query or prompt to send to the API.
|
|
106
|
+
stream (bool, optional): If True, yields response chunks as they arrive.
|
|
107
|
+
If False, returns complete response. Defaults to False.
|
|
108
|
+
raw (bool, optional): If True, returns raw response dictionaries with 'text' key.
|
|
109
|
+
If False, returns Response objects that convert to text automatically.
|
|
110
|
+
Defaults to False.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Union[Dict[str, Any], Generator[str, None, None]]:
|
|
114
|
+
- If stream=False: Returns complete response
|
|
115
|
+
- If stream=True: Yields response chunks as they arrive
|
|
116
|
+
|
|
117
|
+
Raises:
|
|
118
|
+
APIConnectionError: If the API request fails
|
|
119
|
+
|
|
120
|
+
Examples:
|
|
121
|
+
Basic search:
|
|
122
|
+
>>> ai = DeepFind()
|
|
123
|
+
>>> response = ai.search("What is Python?")
|
|
124
|
+
>>> print(response)
|
|
125
|
+
Python is a programming language...
|
|
126
|
+
|
|
127
|
+
Streaming response:
|
|
128
|
+
>>> for chunk in ai.search("Tell me about AI", stream=True):
|
|
129
|
+
... print(chunk, end="")
|
|
130
|
+
Artificial Intelligence...
|
|
131
|
+
|
|
132
|
+
Raw response format:
|
|
133
|
+
>>> for chunk in ai.search("Hello", stream=True, raw=True):
|
|
134
|
+
... print(chunk)
|
|
135
|
+
{'text': 'Hello'}
|
|
136
|
+
{'text': ' there!'}
|
|
137
|
+
|
|
138
|
+
Error handling:
|
|
139
|
+
>>> try:
|
|
140
|
+
... response = ai.search("My question")
|
|
141
|
+
... except exceptions.APIConnectionError as e:
|
|
142
|
+
... print(f"API error: {e}")
|
|
143
|
+
"""
|
|
144
|
+
conversation_id = uuid4().hex
|
|
145
|
+
message_id = uuid4().hex
|
|
146
|
+
url = f"https://www.deepfind.co/s/{conversation_id}"
|
|
147
|
+
payload = [
|
|
148
|
+
{
|
|
149
|
+
"id": conversation_id,
|
|
150
|
+
"messages": [
|
|
151
|
+
{
|
|
152
|
+
"role": "user",
|
|
153
|
+
"conversationId": conversation_id,
|
|
154
|
+
"messageId": message_id,
|
|
155
|
+
"content": prompt,
|
|
156
|
+
}
|
|
157
|
+
],
|
|
158
|
+
},
|
|
159
|
+
conversation_id,
|
|
160
|
+
message_id,
|
|
161
|
+
]
|
|
162
|
+
# Update headers for this conversation
|
|
163
|
+
headers = {
|
|
164
|
+
"Accept": "text/x-component",
|
|
165
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
166
|
+
"Accept-Language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
167
|
+
"Content-Type": "text/plain;charset=UTF-8",
|
|
168
|
+
"DNT": "1",
|
|
169
|
+
"Next-Action": "f354668f23f516a46ad0abe4dedb84b19068bb54",
|
|
170
|
+
"Next-Router-State-Tree": f'%5B%22%22%2C%7B%22children%22%3A%5B%22s%22%2C%7B%22children%22%3A%5B%5B%22conversationId%22%2C%22{conversation_id}%22%2C%22d%22%5D%2C%7B%22children%22%3A%5B%22__PAGE__%22%2C%7B%7D%2C%22%2Fs%2F{conversation_id}%22%2C%22refresh%22%5D%7D%5D%7D%5D%7D%2Cnull%2Cnull%2Ctrue%5D',
|
|
171
|
+
"Origin": "https://www.deepfind.co",
|
|
172
|
+
"Referer": f"https://www.deepfind.co/s/{conversation_id}",
|
|
173
|
+
"Sec-Ch-Ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
|
|
174
|
+
"Sec-Ch-Ua-Mobile": "?0",
|
|
175
|
+
"Sec-Ch-Ua-Platform": '"Windows"',
|
|
176
|
+
"Sec-Fetch-Dest": "empty",
|
|
177
|
+
"Sec-Fetch-Mode": "cors",
|
|
178
|
+
"Sec-Fetch-Site": "same-origin",
|
|
179
|
+
"User-Agent": LitAgent().random(),
|
|
180
|
+
}
|
|
181
|
+
self.session.headers.clear()
|
|
182
|
+
self.session.headers.update(headers)
|
|
183
|
+
|
|
184
|
+
def for_stream():
|
|
185
|
+
try:
|
|
186
|
+
with self.session.post(
|
|
187
|
+
url,
|
|
188
|
+
headers=headers,
|
|
189
|
+
json=payload,
|
|
190
|
+
stream=True,
|
|
191
|
+
timeout=self.timeout,
|
|
192
|
+
proxies=self.proxies,
|
|
193
|
+
) as response:
|
|
194
|
+
response.raise_for_status()
|
|
195
|
+
streaming_text = ""
|
|
196
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
197
|
+
if line:
|
|
198
|
+
# Try to extract content from the new streaming format
|
|
199
|
+
content_matches = re.findall(r'"content":"([^"\\]*(?:\\.[^"\\]*)*)"', line)
|
|
200
|
+
if content_matches:
|
|
201
|
+
for content in content_matches:
|
|
202
|
+
if len(content) > len(streaming_text):
|
|
203
|
+
delta = content[len(streaming_text):]
|
|
204
|
+
streaming_text = content
|
|
205
|
+
delta = delta.replace('\\"', '"').replace('\\n', '\n')
|
|
206
|
+
delta = re.sub(r'\[REF\]\(https?://[^\s]*\)', '', delta)
|
|
207
|
+
if raw:
|
|
208
|
+
yield {"text": delta}
|
|
209
|
+
else:
|
|
210
|
+
yield Response(delta)
|
|
211
|
+
self.last_response = Response(streaming_text)
|
|
212
|
+
except requests.exceptions.RequestException as e:
|
|
213
|
+
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
214
|
+
|
|
215
|
+
def for_non_stream():
|
|
216
|
+
full_response = ""
|
|
217
|
+
for chunk in for_stream():
|
|
218
|
+
if raw:
|
|
219
|
+
yield chunk
|
|
220
|
+
else:
|
|
221
|
+
full_response += str(chunk)
|
|
222
|
+
if not raw:
|
|
223
|
+
self.last_response = Response(full_response)
|
|
224
|
+
return self.last_response
|
|
225
|
+
return for_stream() if stream else for_non_stream()
|
|
226
|
+
|
|
227
|
+
@staticmethod
|
|
228
|
+
def clean_content(text: str) -> str:
|
|
229
|
+
"""Removes all webblock elements with research or detail classes.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
text (str): The text to clean
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
str: The cleaned text
|
|
236
|
+
|
|
237
|
+
Example:
|
|
238
|
+
>>> text = '<webblock class="research">...</webblock>Other text'
|
|
239
|
+
>>> cleaned_text = DeepFind.clean_content(text)
|
|
240
|
+
>>> print(cleaned_text)
|
|
241
|
+
Other text
|
|
242
|
+
"""
|
|
243
|
+
cleaned_text = re.sub(
|
|
244
|
+
r'<webblock class="(?:research|detail)">[^<]*</webblock>', "", text
|
|
245
|
+
)
|
|
246
|
+
return cleaned_text
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
if __name__ == "__main__":
|
|
250
|
+
from rich import print
|
|
251
|
+
ai = DeepFind()
|
|
252
|
+
response = ai.search(input(">>> "), stream=True, raw=False)
|
|
253
|
+
for chunk in response:
|
|
254
|
+
print(chunk, end="", flush=True)
|