webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +33 -15
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +703 -250
- webscout/Bard.py +441 -323
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +15 -0
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +333 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +202 -0
- webscout/Provider/AISEARCH/genspark_search.py +324 -0
- webscout/Provider/AISEARCH/hika_search.py +186 -0
- webscout/Provider/AISEARCH/iask_search.py +410 -0
- webscout/Provider/AISEARCH/monica_search.py +220 -0
- webscout/Provider/AISEARCH/scira_search.py +298 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +791 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +369 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +375 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +283 -0
- webscout/Provider/OPENAI/NEMOTRON.py +232 -0
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -0
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +40 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +969 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +494 -0
- webscout/Provider/OPENAI/chatsandbox.py +173 -0
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +322 -0
- webscout/Provider/OPENAI/e2b.py +1414 -0
- webscout/Provider/OPENAI/exaai.py +417 -0
- webscout/Provider/OPENAI/exachat.py +444 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +326 -0
- webscout/Provider/OPENAI/groq.py +364 -0
- webscout/Provider/OPENAI/heckai.py +308 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +389 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +357 -0
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +477 -0
- webscout/Provider/OPENAI/sonus.py +304 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +364 -0
- webscout/Provider/OPENAI/uncovrAI.py +463 -0
- webscout/Provider/OPENAI/utils.py +318 -0
- webscout/Provider/OPENAI/venice.py +431 -0
- webscout/Provider/OPENAI/wisecat.py +387 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +365 -0
- webscout/Provider/OPENAI/yep.py +382 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/README.md +82 -0
- webscout/Provider/TTI/__init__.py +7 -0
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +10 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +475 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +174 -0
- webscout/Provider/ai4chat.py +174 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +224 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/client.py +70 -0
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +404 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +210 -0
- webscout/scout/core/scout.py +607 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +478 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
- webscout-8.2.9.dist-info/RECORD +289 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
webscout/Provider/yep.py
ADDED
|
@@ -0,0 +1,389 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
import json
|
|
3
|
+
from curl_cffi import CurlError
|
|
4
|
+
from curl_cffi.requests import Session
|
|
5
|
+
|
|
6
|
+
from typing import Any, Dict, Optional, Generator, Union, List, TypeVar
|
|
7
|
+
|
|
8
|
+
from webscout.AIutel import Optimizers
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
|
+
from webscout.conversation import Conversation, Fn
|
|
14
|
+
|
|
15
|
+
T = TypeVar('T')
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class YEPCHAT(Provider):
|
|
19
|
+
"""
|
|
20
|
+
YEPCHAT is a provider class for interacting with the Yep API.
|
|
21
|
+
|
|
22
|
+
Attributes:
|
|
23
|
+
AVAILABLE_MODELS (list): List of available models for the provider.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
AVAILABLE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B", "Mixtral-8x7B-Instruct-v0.1"]
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
is_conversation: bool = True,
|
|
31
|
+
max_tokens: int = 1280,
|
|
32
|
+
timeout: int = 30,
|
|
33
|
+
intro: str = None,
|
|
34
|
+
filepath: str = None,
|
|
35
|
+
update_file: bool = True,
|
|
36
|
+
proxies: dict = {},
|
|
37
|
+
history_offset: int = 10250,
|
|
38
|
+
act: str = None,
|
|
39
|
+
model: str = "DeepSeek-R1-Distill-Qwen-32B",
|
|
40
|
+
temperature: float = 0.6,
|
|
41
|
+
top_p: float = 0.7,
|
|
42
|
+
browser: str = "chrome",
|
|
43
|
+
tools: Optional[List[Fn]] = None
|
|
44
|
+
):
|
|
45
|
+
"""
|
|
46
|
+
Initializes the YEPCHAT provider with the specified parameters.
|
|
47
|
+
|
|
48
|
+
Examples:
|
|
49
|
+
>>> ai = YEPCHAT()
|
|
50
|
+
>>> ai.ask("What's the weather today?")
|
|
51
|
+
Sends a prompt to the Yep API and returns the response.
|
|
52
|
+
|
|
53
|
+
>>> ai.chat("Tell me a joke", stream=True)
|
|
54
|
+
Initiates a chat with the Yep API using the provided prompt.
|
|
55
|
+
|
|
56
|
+
>>> weather_tool = Fn(name="get_weather", description="Get the current weather", parameters={"location": "string"})
|
|
57
|
+
>>> ai = YEPCHAT(tools=[weather_tool])
|
|
58
|
+
>>> ai.chat("What's the weather in New York?")
|
|
59
|
+
Uses the weather tool to provide weather information.
|
|
60
|
+
"""
|
|
61
|
+
if model not in self.AVAILABLE_MODELS:
|
|
62
|
+
raise ValueError(
|
|
63
|
+
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# Initialize curl_cffi Session instead of cloudscraper
|
|
67
|
+
self.session = Session()
|
|
68
|
+
self.is_conversation = is_conversation
|
|
69
|
+
self.max_tokens_to_sample = max_tokens
|
|
70
|
+
self.chat_endpoint = "https://api.yep.com/v1/chat/completions"
|
|
71
|
+
self.stream_chunk_size = 64
|
|
72
|
+
self.timeout = timeout
|
|
73
|
+
self.last_response = {}
|
|
74
|
+
self.model = model
|
|
75
|
+
self.temperature = temperature
|
|
76
|
+
self.top_p = top_p
|
|
77
|
+
|
|
78
|
+
# Initialize LitAgent for user agent generation
|
|
79
|
+
self.agent = LitAgent()
|
|
80
|
+
# Use fingerprinting to create a consistent browser identity
|
|
81
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
82
|
+
|
|
83
|
+
# Use the fingerprint for headers
|
|
84
|
+
self.headers = {
|
|
85
|
+
"Accept": self.fingerprint["accept"],
|
|
86
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
87
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
88
|
+
"Content-Type": "application/json; charset=utf-8",
|
|
89
|
+
"DNT": "1",
|
|
90
|
+
"Origin": "https://yep.com",
|
|
91
|
+
"Referer": "https://yep.com/",
|
|
92
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
93
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
94
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
95
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
# Create session cookies with unique identifiers
|
|
99
|
+
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
100
|
+
|
|
101
|
+
self.__available_optimizers = (
|
|
102
|
+
method
|
|
103
|
+
for method in dir(Optimizers)
|
|
104
|
+
if callable(getattr(Optimizers, method))
|
|
105
|
+
and not method.startswith("__")
|
|
106
|
+
)
|
|
107
|
+
Conversation.intro = (
|
|
108
|
+
AwesomePrompts().get_act(act, raise_not_found=True, default=None, case_insensitive=True)
|
|
109
|
+
if act
|
|
110
|
+
else intro or Conversation.intro
|
|
111
|
+
)
|
|
112
|
+
self.conversation = Conversation(
|
|
113
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file, tools=tools
|
|
114
|
+
)
|
|
115
|
+
self.conversation.history_offset = history_offset
|
|
116
|
+
# Set consistent headers and proxies for the curl_cffi session
|
|
117
|
+
self.session.headers.update(self.headers)
|
|
118
|
+
self.session.proxies = proxies
|
|
119
|
+
# Note: curl_cffi handles cookies differently, passed directly in requests
|
|
120
|
+
|
|
121
|
+
def refresh_identity(self, browser: str = None):
|
|
122
|
+
"""
|
|
123
|
+
Refreshes the browser identity fingerprint.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
browser: Specific browser to use for the new fingerprint
|
|
127
|
+
"""
|
|
128
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
129
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
130
|
+
|
|
131
|
+
# Update headers with new fingerprint
|
|
132
|
+
self.headers.update({
|
|
133
|
+
"Accept": self.fingerprint["accept"],
|
|
134
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
135
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
|
|
136
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
137
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
138
|
+
})
|
|
139
|
+
|
|
140
|
+
# Update session headers
|
|
141
|
+
self.session.headers.update(self.headers)
|
|
142
|
+
|
|
143
|
+
# Generate new cookies (will be passed in requests)
|
|
144
|
+
self.cookies = {"__Host-session": uuid.uuid4().hex, '__cf_bm': uuid.uuid4().hex}
|
|
145
|
+
|
|
146
|
+
return self.fingerprint
|
|
147
|
+
|
|
148
|
+
def ask(
|
|
149
|
+
self,
|
|
150
|
+
prompt: str,
|
|
151
|
+
stream: bool = False,
|
|
152
|
+
raw: bool = False,
|
|
153
|
+
optimizer: str = None,
|
|
154
|
+
conversationally: bool = False,
|
|
155
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
156
|
+
"""
|
|
157
|
+
Sends a prompt to the Yep API and returns the response.
|
|
158
|
+
Now supports tool calling functionality.
|
|
159
|
+
|
|
160
|
+
Examples:
|
|
161
|
+
>>> ai = YEPCHAT()
|
|
162
|
+
>>> ai.ask("What's the weather today?")
|
|
163
|
+
Returns the response from the Yep API.
|
|
164
|
+
|
|
165
|
+
>>> ai.ask("Tell me a joke", stream=True)
|
|
166
|
+
Streams the response from the Yep API.
|
|
167
|
+
|
|
168
|
+
>>> weather_tool = Fn(name="get_weather", description="Get the current weather", parameters={"location": "string"})
|
|
169
|
+
>>> ai = YEPCHAT(tools=[weather_tool])
|
|
170
|
+
>>> ai.ask("What's the weather in New York?")
|
|
171
|
+
Will use the weather tool to provide response.
|
|
172
|
+
"""
|
|
173
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
174
|
+
if optimizer:
|
|
175
|
+
if optimizer in self.__available_optimizers:
|
|
176
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
177
|
+
conversation_prompt if conversationally else prompt
|
|
178
|
+
)
|
|
179
|
+
else:
|
|
180
|
+
raise Exception(
|
|
181
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
data = {
|
|
185
|
+
"stream": stream,
|
|
186
|
+
"max_tokens": self.max_tokens_to_sample,
|
|
187
|
+
"top_p": self.top_p,
|
|
188
|
+
"temperature": self.temperature,
|
|
189
|
+
"messages": [{"content": conversation_prompt, "role": "user"}],
|
|
190
|
+
"model": self.model,
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
def for_stream():
|
|
194
|
+
try:
|
|
195
|
+
# buffer = b"" # No longer needed here
|
|
196
|
+
# Use curl_cffi session post, pass cookies explicitly
|
|
197
|
+
response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
|
|
198
|
+
|
|
199
|
+
if not response.ok:
|
|
200
|
+
# If we get a non-200 response, try refreshing our identity once
|
|
201
|
+
if response.status_code in [403, 429]:
|
|
202
|
+
self.refresh_identity()
|
|
203
|
+
# Retry with new identity
|
|
204
|
+
# Use curl_cffi session post, pass cookies explicitly
|
|
205
|
+
retry_response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, stream=True, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
|
|
206
|
+
if not retry_response.ok:
|
|
207
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
208
|
+
f"Failed to generate response after identity refresh - ({retry_response.status_code}, {retry_response.reason}) - {retry_response.text}"
|
|
209
|
+
)
|
|
210
|
+
response = retry_response # Use the successful retry response
|
|
211
|
+
else:
|
|
212
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
213
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
# --- Start of stream processing block (should be outside the 'if not response.ok' block) ---
|
|
217
|
+
streaming_text = ""
|
|
218
|
+
|
|
219
|
+
# Use sanitize_stream to process the lines
|
|
220
|
+
processed_stream = sanitize_stream(
|
|
221
|
+
data=response.iter_content(chunk_size=None), # Pass the byte iterator directly
|
|
222
|
+
intro_value="data:",
|
|
223
|
+
to_json=True, # Yep sends JSON after 'data:'
|
|
224
|
+
skip_markers=["[DONE]"], # Skip the final marker
|
|
225
|
+
yield_raw_on_error=False, # Only process valid JSON data
|
|
226
|
+
# --- Add the content extractor ---
|
|
227
|
+
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None
|
|
228
|
+
)
|
|
229
|
+
# The loop now yields the final extracted string content directly
|
|
230
|
+
for content_chunk in processed_stream:
|
|
231
|
+
# --- TEMPORARY DEBUG PRINT ---
|
|
232
|
+
# print(f"\nDEBUG: Received extracted content: {content_chunk!r}\n", flush=True) # Keep or remove debug print as needed
|
|
233
|
+
if content_chunk and isinstance(content_chunk, str): # Ensure it's a non-empty string
|
|
234
|
+
streaming_text += content_chunk
|
|
235
|
+
# Yield dict or raw string chunk based on 'raw' flag
|
|
236
|
+
yield dict(text=content_chunk) if not raw else content_chunk
|
|
237
|
+
# --- End of stream processing block ---
|
|
238
|
+
|
|
239
|
+
# Check if the response contains a tool call (This should happen *after* processing the stream)
|
|
240
|
+
response_data = self.conversation.handle_tool_response(streaming_text)
|
|
241
|
+
|
|
242
|
+
if response_data["is_tool_call"]:
|
|
243
|
+
# Handle tool call results
|
|
244
|
+
if response_data["success"]:
|
|
245
|
+
for tool_call in response_data.get("tool_calls", []):
|
|
246
|
+
tool_name = tool_call.get("name", "unknown_tool")
|
|
247
|
+
result = response_data["result"]
|
|
248
|
+
self.conversation.update_chat_history_with_tool(prompt, tool_name, result)
|
|
249
|
+
else:
|
|
250
|
+
# If tool call failed, update history with error
|
|
251
|
+
self.conversation.update_chat_history(prompt,
|
|
252
|
+
f"Error executing tool call: {response_data['result']}")
|
|
253
|
+
else:
|
|
254
|
+
# Normal response handling
|
|
255
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
256
|
+
|
|
257
|
+
except CurlError as e: # Catch CurlError
|
|
258
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
259
|
+
except Exception as e:
|
|
260
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
261
|
+
|
|
262
|
+
def for_non_stream():
|
|
263
|
+
try:
|
|
264
|
+
# Use curl_cffi session post, pass cookies explicitly
|
|
265
|
+
response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
|
|
266
|
+
if not response.ok:
|
|
267
|
+
if response.status_code in [403, 429]:
|
|
268
|
+
self.refresh_identity()
|
|
269
|
+
# Use curl_cffi session post, pass cookies explicitly
|
|
270
|
+
response = self.session.post(self.chat_endpoint, headers=self.headers, cookies=self.cookies, json=data, timeout=self.timeout, impersonate=self.fingerprint.get("browser_type", "chrome110"))
|
|
271
|
+
if not response.ok:
|
|
272
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
273
|
+
f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {response.text}"
|
|
274
|
+
)
|
|
275
|
+
else:
|
|
276
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
277
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
# ... existing non-stream response handling code ...
|
|
281
|
+
response_data = response.json()
|
|
282
|
+
if 'choices' in response_data and len(response_data['choices']) > 0:
|
|
283
|
+
content = response_data['choices'][0].get('message', {}).get('content', '')
|
|
284
|
+
|
|
285
|
+
# Check if the response contains a tool call
|
|
286
|
+
tool_response = self.conversation.handle_tool_response(content)
|
|
287
|
+
|
|
288
|
+
if tool_response["is_tool_call"]:
|
|
289
|
+
# Process tool call
|
|
290
|
+
if tool_response["success"]:
|
|
291
|
+
# Get the first tool call for simplicity
|
|
292
|
+
if "tool_calls" in tool_response and len(tool_response["tool_calls"]) > 0:
|
|
293
|
+
tool_call = tool_response["tool_calls"][0]
|
|
294
|
+
tool_name = tool_call.get("name", "unknown_tool")
|
|
295
|
+
tool_result = tool_response["result"]
|
|
296
|
+
|
|
297
|
+
# Update chat history with tool call
|
|
298
|
+
self.conversation.update_chat_history_with_tool(prompt, tool_name, tool_result)
|
|
299
|
+
|
|
300
|
+
# Return tool result
|
|
301
|
+
return {"text": tool_result, "is_tool_call": True, "tool_name": tool_name}
|
|
302
|
+
|
|
303
|
+
# If tool call processing failed
|
|
304
|
+
return {"text": tool_response["result"], "is_tool_call": True, "error": True}
|
|
305
|
+
else:
|
|
306
|
+
# Normal response handling
|
|
307
|
+
self.conversation.update_chat_history(prompt, content)
|
|
308
|
+
return {"text": content}
|
|
309
|
+
else:
|
|
310
|
+
raise exceptions.FailedToGenerateResponseError("No response content found")
|
|
311
|
+
except CurlError as e: # Catch CurlError
|
|
312
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
313
|
+
except Exception as e:
|
|
314
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
315
|
+
|
|
316
|
+
return for_stream() if stream else for_non_stream()
|
|
317
|
+
|
|
318
|
+
def chat(
|
|
319
|
+
self,
|
|
320
|
+
prompt: str,
|
|
321
|
+
stream: bool = False,
|
|
322
|
+
optimizer: str = None,
|
|
323
|
+
conversationally: bool = False,
|
|
324
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
325
|
+
"""
|
|
326
|
+
Initiates a chat with the Yep API using the provided prompt.
|
|
327
|
+
|
|
328
|
+
Examples:
|
|
329
|
+
>>> ai = YEPCHAT()
|
|
330
|
+
>>> ai.chat("Tell me a joke")
|
|
331
|
+
Returns the chat response from the Yep API.
|
|
332
|
+
|
|
333
|
+
>>> ai.chat("What's the weather today?", stream=True)
|
|
334
|
+
Streams the chat response from the Yep API.
|
|
335
|
+
"""
|
|
336
|
+
def for_stream():
|
|
337
|
+
for response in self.ask(
|
|
338
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
339
|
+
):
|
|
340
|
+
yield self.get_message(response)
|
|
341
|
+
|
|
342
|
+
def for_non_stream():
|
|
343
|
+
return self.get_message(
|
|
344
|
+
self.ask(
|
|
345
|
+
prompt,
|
|
346
|
+
False,
|
|
347
|
+
optimizer=optimizer,
|
|
348
|
+
conversationally=conversationally,
|
|
349
|
+
)
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
return for_stream() if stream else for_non_stream()
|
|
353
|
+
|
|
354
|
+
def get_message(self, response: dict) -> str:
|
|
355
|
+
"""
|
|
356
|
+
Extracts the message content from the API response.
|
|
357
|
+
|
|
358
|
+
Examples:
|
|
359
|
+
>>> ai = YEPCHAT()
|
|
360
|
+
>>> response = ai.ask("Tell me a joke")
|
|
361
|
+
>>> ai.get_message(response)
|
|
362
|
+
Extracts and returns the message content from the response.
|
|
363
|
+
"""
|
|
364
|
+
assert isinstance(response, dict)
|
|
365
|
+
return response["text"]
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
if __name__ == "__main__":
|
|
369
|
+
print("-" * 80)
|
|
370
|
+
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
371
|
+
print("-" * 80)
|
|
372
|
+
|
|
373
|
+
for model in YEPCHAT.AVAILABLE_MODELS:
|
|
374
|
+
try:
|
|
375
|
+
test_ai = YEPCHAT(model=model, timeout=60)
|
|
376
|
+
response = test_ai.chat("Say 'Hello' in one word")
|
|
377
|
+
response_text = response
|
|
378
|
+
|
|
379
|
+
if response_text and len(response_text.strip()) > 0:
|
|
380
|
+
status = "✓"
|
|
381
|
+
# Truncate response if too long
|
|
382
|
+
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
383
|
+
else:
|
|
384
|
+
status = "✗"
|
|
385
|
+
display_text = "Empty or invalid response"
|
|
386
|
+
print(f"{model:<50} {status:<10} {display_text}")
|
|
387
|
+
except Exception as e:
|
|
388
|
+
print(f"{model:<50} {'✗':<10} {str(e)}")
|
|
389
|
+
|
webscout/__init__.py
CHANGED
|
@@ -26,9 +26,11 @@ __repo__ = "https://github.com/OE-LUCIFER/Webscout"
|
|
|
26
26
|
# Add update checker
|
|
27
27
|
from .update_checker import check_for_updates
|
|
28
28
|
try:
|
|
29
|
-
check_for_updates()
|
|
29
|
+
update_message = check_for_updates()
|
|
30
|
+
if update_message:
|
|
31
|
+
print(update_message)
|
|
30
32
|
except Exception:
|
|
31
|
-
pass # Silently handle any update check errors
|
|
33
|
+
pass # Silently handle any update check errorslently handle any update check errors
|
|
32
34
|
|
|
33
35
|
import logging
|
|
34
36
|
logging.getLogger("webscout").addHandler(logging.NullHandler())
|
webscout/cli.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import sys
|
|
2
2
|
from .swiftcli import CLI, option
|
|
3
|
-
from .webscout_search import WEBS
|
|
3
|
+
from .webscout_search import WEBS # Import the WEBS class from webscout_search
|
|
4
4
|
from .DWEBS import GoogleSearch # Import GoogleSearch from DWEBS
|
|
5
5
|
from .yep_search import YepSearch # Import YepSearch from yep_search
|
|
6
6
|
from .version import __version__
|
|
@@ -48,32 +48,7 @@ def version():
|
|
|
48
48
|
"""Show the version of webscout."""
|
|
49
49
|
print(f"webscout version: {__version__}")
|
|
50
50
|
|
|
51
|
-
|
|
52
|
-
@option("--proxy", help="Proxy URL to use for requests")
|
|
53
|
-
@option("--model", "-m", help="AI model to use", default="gpt-4o-mini", type=str)
|
|
54
|
-
@option("--timeout", "-t", help="Timeout value for requests", type=int, default=10)
|
|
55
|
-
def chat(proxy: str = None, model: str = "gpt-4o-mini", timeout: int = 10):
|
|
56
|
-
"""Interactive AI chat using DuckDuckGo's AI."""
|
|
57
|
-
webs = WEBS(proxy=proxy, timeout=timeout)
|
|
58
|
-
|
|
59
|
-
print(f"Using model: {model}")
|
|
60
|
-
print("Type your message and press Enter. Press Ctrl+C or type 'exit' to quit.\n")
|
|
61
|
-
|
|
62
|
-
try:
|
|
63
|
-
while True:
|
|
64
|
-
try:
|
|
65
|
-
user_input = input(">>> ").strip()
|
|
66
|
-
if not user_input or user_input.lower() in ['exit', 'quit']:
|
|
67
|
-
break
|
|
68
|
-
|
|
69
|
-
response = webs.chat(keywords=user_input, model=model)
|
|
70
|
-
print(f"\nAI: {response}\n")
|
|
71
|
-
|
|
72
|
-
except Exception as e:
|
|
73
|
-
print(f"Error: {str(e)}\n")
|
|
74
|
-
|
|
75
|
-
except KeyboardInterrupt:
|
|
76
|
-
print("\nChat session interrupted. Exiting...")
|
|
51
|
+
|
|
77
52
|
|
|
78
53
|
@app.command()
|
|
79
54
|
@option("--keywords", "-k", help="Search keywords", required=True)
|
|
@@ -546,4 +521,4 @@ def main():
|
|
|
546
521
|
sys.exit(1)
|
|
547
522
|
|
|
548
523
|
if __name__ == "__main__":
|
|
549
|
-
main()
|
|
524
|
+
main()
|
webscout/client.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unified client import for OpenAI-compatible providers and server utilities.
|
|
3
|
+
|
|
4
|
+
This module provides a unified import interface for all OpenAI-compatible providers and exposes
|
|
5
|
+
helper functions to start the OpenAI-compatible API server programmatically.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from webscout.client import FreeAIChat, AI4Chat, ExaChat, MultiChatAI, TwoAI, SciraChat, ChatSandbox, C4AI, Groq, TypeGPT, LLMChat, Cleeai, DeepInfra, BlackboxAI, Cloudflare, Netwrck, OIVSCode, Venice, Writecream, Wisecat, Yep, X0GPT, Sonus, Toolbaz, TextPollinations, StandardInput, Opkfc, Flowith, ExaAI, FreeGemini, GeminiAPI, Gemini, GithubChat, GizAI, Glider, HeckAI, HuggingFaceChat, Hunyuan, Jadve, Julius, Koala, LearnFast, LearnFastAI, NEMOTRON, MCPCore, PydanticImports, TeachAnything, UncovrAI, API, Base
|
|
9
|
+
client = FreeAIChat()
|
|
10
|
+
response = client.chat.completions.create(...)
|
|
11
|
+
|
|
12
|
+
# To start the server programmatically:
|
|
13
|
+
from webscout.client import start_server
|
|
14
|
+
start_server()
|
|
15
|
+
|
|
16
|
+
# For advanced server control:
|
|
17
|
+
from webscout.client import run_api
|
|
18
|
+
run_api(host="0.0.0.0", port=8000, debug=True)
|
|
19
|
+
|
|
20
|
+
# Instantly start the API server from the command line:
|
|
21
|
+
# python -m webscout.client
|
|
22
|
+
# or, if installed as a script:
|
|
23
|
+
# webscout-api
|
|
24
|
+
|
|
25
|
+
Exports:
|
|
26
|
+
- All OpenAI-compatible provider classes
|
|
27
|
+
- start_server (function): Start the OpenAI-compatible API server with default or custom settings.
|
|
28
|
+
- run_api (function): Advanced server startup with full control over host, port, and other options.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
from webscout.Provider.OPENAI import *
|
|
32
|
+
from webscout.Provider.OPENAI.api import start_server, run_api
|
|
33
|
+
|
|
34
|
+
# ---
|
|
35
|
+
# API Documentation
|
|
36
|
+
#
|
|
37
|
+
# start_server
|
|
38
|
+
# -------------
|
|
39
|
+
# def start_server(port: int = 8000, api_key: str = None, default_provider: str = None, base_url: str = None):
|
|
40
|
+
# """
|
|
41
|
+
# Start the OpenAI-compatible API server with optional configuration.
|
|
42
|
+
#
|
|
43
|
+
# Parameters:
|
|
44
|
+
# port (int, optional): The port to run the server on. Defaults to 8000.
|
|
45
|
+
# api_key (str, optional): API key for authentication. If None, authentication is disabled.
|
|
46
|
+
# default_provider (str, optional): The default provider to use. If None, uses the package default.
|
|
47
|
+
# base_url (str, optional): Base URL prefix for the API (e.g., '/api/v1'). If None, no prefix is used.
|
|
48
|
+
#
|
|
49
|
+
# Returns:
|
|
50
|
+
# None
|
|
51
|
+
# """
|
|
52
|
+
#
|
|
53
|
+
# run_api
|
|
54
|
+
# -------
|
|
55
|
+
# def run_api(host: str = '0.0.0.0', port: int = None, api_key: str = None, default_provider: str = None, base_url: str = None, debug: bool = False, show_available_providers: bool = True):
|
|
56
|
+
# """
|
|
57
|
+
# Advanced server startup for the OpenAI-compatible API server.
|
|
58
|
+
#
|
|
59
|
+
# Parameters:
|
|
60
|
+
# host (str, optional): Host address to bind the server. Defaults to '0.0.0.0'.
|
|
61
|
+
# port (int, optional): Port to run the server on. Defaults to 8000 if not specified.
|
|
62
|
+
# api_key (str, optional): API key for authentication. If None, authentication is disabled.
|
|
63
|
+
# default_provider (str, optional): The default provider to use. If None, uses the package default.
|
|
64
|
+
# base_url (str, optional): Base URL prefix for the API (e.g., '/api/v1'). If None, no prefix is used.
|
|
65
|
+
# debug (bool, optional): Run the server in debug mode with auto-reload. Defaults to False.
|
|
66
|
+
# show_available_providers (bool, optional): Print available providers on startup. Defaults to True.
|
|
67
|
+
#
|
|
68
|
+
# Returns:
|
|
69
|
+
# None
|
|
70
|
+
# """
|
webscout/conversation.py
CHANGED
|
@@ -1,6 +1,24 @@
|
|
|
1
|
+
"""
|
|
2
|
+
conversation.py
|
|
3
|
+
|
|
4
|
+
This module provides a modern conversation manager for handling chat-based interactions, message history, tool calls, and robust error handling. It defines the Conversation class and supporting types for managing conversational state, tool integration, and message validation.
|
|
5
|
+
|
|
6
|
+
Classes:
|
|
7
|
+
ConversationError: Base exception for conversation-related errors.
|
|
8
|
+
ToolCallError: Raised when there's an error with tool calls.
|
|
9
|
+
MessageValidationError: Raised when message validation fails.
|
|
10
|
+
Message: Represents a single message in the conversation.
|
|
11
|
+
FunctionCall: TypedDict for a function call.
|
|
12
|
+
ToolDefinition: TypedDict for a tool definition.
|
|
13
|
+
FunctionCallData: TypedDict for function call data.
|
|
14
|
+
Fn: Represents a function (tool) that the agent can call.
|
|
15
|
+
Conversation: Main conversation manager class.
|
|
16
|
+
|
|
17
|
+
Functions:
|
|
18
|
+
tools: Decorator to mark a function as a tool.
|
|
19
|
+
"""
|
|
1
20
|
import os
|
|
2
21
|
import json
|
|
3
|
-
import logging
|
|
4
22
|
from typing import Optional, Dict, List, Any, TypedDict, Callable, TypeVar, Union
|
|
5
23
|
from dataclasses import dataclass
|
|
6
24
|
from datetime import datetime
|
|
@@ -59,14 +77,15 @@ def tools(func: Callable[..., T]) -> Callable[..., T]:
|
|
|
59
77
|
return func
|
|
60
78
|
|
|
61
79
|
class Conversation:
|
|
62
|
-
"""
|
|
63
|
-
|
|
80
|
+
"""
|
|
81
|
+
Modern conversation manager with enhanced features.
|
|
82
|
+
|
|
64
83
|
Key Features:
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
84
|
+
- Robust message handling with metadata
|
|
85
|
+
- Enhanced tool calling support
|
|
86
|
+
- Efficient history management
|
|
87
|
+
- Improved error handling
|
|
88
|
+
- Memory optimization
|
|
70
89
|
"""
|
|
71
90
|
|
|
72
91
|
intro = (
|
|
@@ -95,23 +114,8 @@ class Conversation:
|
|
|
95
114
|
self.prompt_allowance = 10
|
|
96
115
|
self.tools = tools or []
|
|
97
116
|
self.compression_threshold = compression_threshold
|
|
98
|
-
self.logger = self._setup_logger()
|
|
99
|
-
|
|
100
117
|
if filepath:
|
|
101
|
-
self.load_conversation(filepath,
|
|
102
|
-
|
|
103
|
-
def _setup_logger(self) -> logging.Logger:
|
|
104
|
-
"""Set up enhanced logging."""
|
|
105
|
-
logger = logging.getLogger("conversation")
|
|
106
|
-
if not logger.handlers:
|
|
107
|
-
handler = logging.StreamHandler()
|
|
108
|
-
formatter = logging.Formatter(
|
|
109
|
-
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
110
|
-
)
|
|
111
|
-
handler.setFormatter(formatter)
|
|
112
|
-
logger.addHandler(handler)
|
|
113
|
-
logger.setLevel(logging.INFO)
|
|
114
|
-
return logger
|
|
118
|
+
self.load_conversation(filepath, True)
|
|
115
119
|
|
|
116
120
|
def load_conversation(self, filepath: str, exists: bool = True) -> None:
|
|
117
121
|
"""Load conversation with improved error handling."""
|
|
@@ -132,7 +136,6 @@ class Conversation:
|
|
|
132
136
|
self.intro = file_contents[0]
|
|
133
137
|
self._process_history_from_file(file_contents[1:])
|
|
134
138
|
except Exception as e:
|
|
135
|
-
self.logger.error(f"Error loading conversation: {str(e)}")
|
|
136
139
|
raise ConversationError(f"Failed to load conversation: {str(e)}") from e
|
|
137
140
|
|
|
138
141
|
def _process_history_from_file(self, lines: List[str]) -> None:
|
|
@@ -272,6 +275,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
272
275
|
def add_message(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
|
|
273
276
|
"""Add a message with enhanced validation and metadata support."""
|
|
274
277
|
try:
|
|
278
|
+
role = role.lower() # Normalize role to lowercase
|
|
275
279
|
if not self.validate_message(role, content):
|
|
276
280
|
raise MessageValidationError("Invalid message role or content")
|
|
277
281
|
|
|
@@ -284,7 +288,6 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
284
288
|
self._compress_history()
|
|
285
289
|
|
|
286
290
|
except Exception as e:
|
|
287
|
-
self.logger.error(f"Error adding message: {str(e)}")
|
|
288
291
|
raise ConversationError(f"Failed to add message: {str(e)}") from e
|
|
289
292
|
|
|
290
293
|
def _append_to_file(self, message: Message) -> None:
|
|
@@ -299,17 +302,17 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
299
302
|
fh.write(f"\n{role_display}: {message.content}")
|
|
300
303
|
|
|
301
304
|
except Exception as e:
|
|
302
|
-
self.logger.error(f"Error writing to file: {str(e)}")
|
|
303
305
|
raise ConversationError(f"Failed to write to file: {str(e)}") from e
|
|
304
306
|
|
|
305
307
|
def validate_message(self, role: str, content: str) -> bool:
|
|
306
308
|
"""Validate message with enhanced role checking."""
|
|
307
309
|
valid_roles = {'user', 'assistant', 'tool', 'system'}
|
|
308
310
|
if role not in valid_roles:
|
|
309
|
-
self.logger.error(f"Invalid role: {role}")
|
|
310
311
|
return False
|
|
311
|
-
if not
|
|
312
|
-
|
|
312
|
+
if not isinstance(content, str):
|
|
313
|
+
return False
|
|
314
|
+
# Allow empty content for assistant (needed for streaming)
|
|
315
|
+
if not content and role != 'assistant':
|
|
313
316
|
return False
|
|
314
317
|
return True
|
|
315
318
|
|
|
@@ -345,7 +348,6 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
345
348
|
}
|
|
346
349
|
|
|
347
350
|
except Exception as e:
|
|
348
|
-
self.logger.error(f"Error handling tool response: {str(e)}")
|
|
349
351
|
raise ToolCallError(f"Failed to handle tool response: {str(e)}") from e
|
|
350
352
|
|
|
351
353
|
def _parse_function_call(self, response: str) -> FunctionCallData:
|
|
@@ -381,7 +383,6 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
381
383
|
raise
|
|
382
384
|
|
|
383
385
|
except Exception as e:
|
|
384
|
-
self.logger.error(f"Error parsing function call: {str(e)}")
|
|
385
386
|
return {"error": str(e)}
|
|
386
387
|
|
|
387
388
|
def execute_function(self, function_call_data: FunctionCallData) -> str:
|
|
@@ -405,7 +406,6 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
405
406
|
return "; ".join(results)
|
|
406
407
|
|
|
407
408
|
except Exception as e:
|
|
408
|
-
self.logger.error(f"Error executing function: {str(e)}")
|
|
409
409
|
raise ToolCallError(f"Failed to execute function: {str(e)}") from e
|
|
410
410
|
|
|
411
411
|
def get_tools_description(self) -> str:
|
|
@@ -428,9 +428,9 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
428
428
|
This method adds both the user's prompt and the assistant's response
|
|
429
429
|
to the conversation history as separate messages.
|
|
430
430
|
"""
|
|
431
|
-
# Add user's message
|
|
431
|
+
# Add user's message (normalize role)
|
|
432
432
|
self.add_message("user", prompt)
|
|
433
433
|
|
|
434
|
-
# Add assistant's response
|
|
434
|
+
# Add assistant's response (normalize role)
|
|
435
435
|
self.add_message("assistant", response)
|
|
436
436
|
|