webscout 8.3.7__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -937
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
webscout/conversation.py
CHANGED
|
@@ -1,438 +1,438 @@
|
|
|
1
|
-
"""
|
|
2
|
-
conversation.py
|
|
3
|
-
|
|
4
|
-
This module provides a modern conversation manager for handling chat-based interactions, message history, tool calls, and robust error handling. It defines the Conversation class and supporting types for managing conversational state, tool integration, and message validation.
|
|
5
|
-
|
|
6
|
-
Classes:
|
|
7
|
-
ConversationError: Base exception for conversation-related errors.
|
|
8
|
-
ToolCallError: Raised when there's an error with tool calls.
|
|
9
|
-
MessageValidationError: Raised when message validation fails.
|
|
10
|
-
Message: Represents a single message in the conversation.
|
|
11
|
-
FunctionCall: TypedDict for a function call.
|
|
12
|
-
ToolDefinition: TypedDict for a tool definition.
|
|
13
|
-
FunctionCallData: TypedDict for function call data.
|
|
14
|
-
Fn: Represents a function (tool) that the agent can call.
|
|
15
|
-
Conversation: Main conversation manager class.
|
|
16
|
-
|
|
17
|
-
Functions:
|
|
18
|
-
tools: Decorator to mark a function as a tool.
|
|
19
|
-
"""
|
|
20
|
-
import os
|
|
21
|
-
import json
|
|
22
|
-
from typing import Optional, Dict, List, Any, TypedDict, Callable, TypeVar, Union
|
|
23
|
-
from dataclasses import dataclass
|
|
24
|
-
from datetime import datetime
|
|
25
|
-
|
|
26
|
-
T = TypeVar('T')
|
|
27
|
-
|
|
28
|
-
class ConversationError(Exception):
|
|
29
|
-
"""Base exception for conversation-related errors."""
|
|
30
|
-
pass
|
|
31
|
-
|
|
32
|
-
class ToolCallError(ConversationError):
|
|
33
|
-
"""Raised when there's an error with tool calls."""
|
|
34
|
-
pass
|
|
35
|
-
|
|
36
|
-
class MessageValidationError(ConversationError):
|
|
37
|
-
"""Raised when message validation fails."""
|
|
38
|
-
pass
|
|
39
|
-
|
|
40
|
-
@dataclass
|
|
41
|
-
class Message:
|
|
42
|
-
"""Represents a single message in the conversation."""
|
|
43
|
-
role: str
|
|
44
|
-
content: str
|
|
45
|
-
timestamp: datetime = datetime.now()
|
|
46
|
-
metadata: Dict[str, Any] = None
|
|
47
|
-
|
|
48
|
-
def __post_init__(self):
|
|
49
|
-
if self.metadata is None:
|
|
50
|
-
self.metadata = {}
|
|
51
|
-
|
|
52
|
-
class FunctionCall(TypedDict):
|
|
53
|
-
"""Type for a function call."""
|
|
54
|
-
name: str
|
|
55
|
-
arguments: Dict[str, Any]
|
|
56
|
-
|
|
57
|
-
class ToolDefinition(TypedDict):
|
|
58
|
-
"""Type for a tool definition."""
|
|
59
|
-
type: str
|
|
60
|
-
function: Dict[str, Any]
|
|
61
|
-
|
|
62
|
-
class FunctionCallData(TypedDict, total=False):
|
|
63
|
-
"""Type for function call data"""
|
|
64
|
-
tool_calls: List[FunctionCall]
|
|
65
|
-
error: str
|
|
66
|
-
|
|
67
|
-
class Fn:
|
|
68
|
-
"""Represents a function (tool) that the agent can call."""
|
|
69
|
-
def __init__(self, name: str, description: str, parameters: Dict[str, str]) -> None:
|
|
70
|
-
self.name: str = name
|
|
71
|
-
self.description: str = description
|
|
72
|
-
self.parameters: Dict[str, str] = parameters
|
|
73
|
-
|
|
74
|
-
def tools(func: Callable[..., T]) -> Callable[..., T]:
|
|
75
|
-
"""Decorator to mark a function as a tool."""
|
|
76
|
-
func._is_tool = True # type: ignore
|
|
77
|
-
return func
|
|
78
|
-
|
|
79
|
-
class Conversation:
|
|
80
|
-
"""
|
|
81
|
-
Modern conversation manager with enhanced features.
|
|
82
|
-
|
|
83
|
-
Key Features:
|
|
84
|
-
- Robust message handling with metadata
|
|
85
|
-
- Enhanced tool calling support
|
|
86
|
-
- Efficient history management
|
|
87
|
-
- Improved error handling
|
|
88
|
-
- Memory optimization
|
|
89
|
-
"""
|
|
90
|
-
|
|
91
|
-
intro = (
|
|
92
|
-
"You're a helpful Large Language Model assistant. "
|
|
93
|
-
"Respond directly to the user's questions or use tools when appropriate."
|
|
94
|
-
)
|
|
95
|
-
|
|
96
|
-
def __init__(
|
|
97
|
-
self,
|
|
98
|
-
status: bool = True,
|
|
99
|
-
max_tokens: int = 600,
|
|
100
|
-
filepath: Optional[str] = None,
|
|
101
|
-
update_file: bool = True,
|
|
102
|
-
tools: Optional[List[Fn]] = None,
|
|
103
|
-
compression_threshold: int = 10000,
|
|
104
|
-
):
|
|
105
|
-
"""Initialize conversation manager with modern features."""
|
|
106
|
-
self.status = status
|
|
107
|
-
self.max_tokens_to_sample = max_tokens
|
|
108
|
-
self.messages: List[Message] = []
|
|
109
|
-
self.history_format = "\nUser: %(user)s\nAssistant: %(llm)s"
|
|
110
|
-
self.tool_history_format = "\nUser: %(user)s\nAssistant: <tool_call>%(tool_json)s</tool_call>\nTool: %(result)s"
|
|
111
|
-
self.file = filepath
|
|
112
|
-
self.update_file = update_file
|
|
113
|
-
self.history_offset = 10250
|
|
114
|
-
self.prompt_allowance = 10
|
|
115
|
-
self.tools = tools or []
|
|
116
|
-
self.compression_threshold = compression_threshold
|
|
117
|
-
if filepath:
|
|
118
|
-
self.load_conversation(filepath, True)
|
|
119
|
-
|
|
120
|
-
def load_conversation(self, filepath: str, exists: bool = True) -> None:
|
|
121
|
-
"""Load conversation with improved error handling."""
|
|
122
|
-
try:
|
|
123
|
-
if not isinstance(filepath, str):
|
|
124
|
-
raise TypeError(f"Filepath must be str, not {type(filepath)}")
|
|
125
|
-
|
|
126
|
-
if exists and not os.path.isfile(filepath):
|
|
127
|
-
raise FileNotFoundError(f"File '{filepath}' does not exist")
|
|
128
|
-
|
|
129
|
-
if not os.path.isfile(filepath):
|
|
130
|
-
with open(filepath, "w", encoding="utf-8") as fh:
|
|
131
|
-
fh.write(self.intro)
|
|
132
|
-
else:
|
|
133
|
-
with open(filepath, encoding="utf-8") as fh:
|
|
134
|
-
file_contents = fh.readlines()
|
|
135
|
-
if file_contents:
|
|
136
|
-
self.intro = file_contents[0]
|
|
137
|
-
self._process_history_from_file(file_contents[1:])
|
|
138
|
-
except Exception as e:
|
|
139
|
-
raise ConversationError(f"Failed to load conversation: {str(e)}") from e
|
|
140
|
-
|
|
141
|
-
def _process_history_from_file(self, lines: List[str]) -> None:
|
|
142
|
-
"""Process and structure conversation history from file."""
|
|
143
|
-
current_role = None
|
|
144
|
-
current_content = []
|
|
145
|
-
|
|
146
|
-
for line in lines:
|
|
147
|
-
line = line.strip()
|
|
148
|
-
if line.startswith(("User:", "Assistant:", "Tool:")):
|
|
149
|
-
if current_role and current_content:
|
|
150
|
-
self.messages.append(Message(
|
|
151
|
-
role=current_role,
|
|
152
|
-
content="\n".join(current_content)
|
|
153
|
-
))
|
|
154
|
-
current_content = []
|
|
155
|
-
current_role = line.split(":")[0].lower()
|
|
156
|
-
content = ":".join(line.split(":")[1:]).strip()
|
|
157
|
-
current_content.append(content)
|
|
158
|
-
elif line:
|
|
159
|
-
current_content.append(line)
|
|
160
|
-
|
|
161
|
-
if current_role and current_content:
|
|
162
|
-
self.messages.append(Message(
|
|
163
|
-
role=current_role,
|
|
164
|
-
content="\n".join(current_content)
|
|
165
|
-
))
|
|
166
|
-
|
|
167
|
-
def _compress_history(self) -> None:
|
|
168
|
-
"""Delete old history when it exceeds threshold."""
|
|
169
|
-
if len(self.messages) > self.compression_threshold:
|
|
170
|
-
# Remove oldest messages, keep only the most recent ones
|
|
171
|
-
self.messages = self.messages[-self.compression_threshold:]
|
|
172
|
-
|
|
173
|
-
# _summarize_messages removed
|
|
174
|
-
|
|
175
|
-
def gen_complete_prompt(self, prompt: str, intro: Optional[str] = None) -> str:
|
|
176
|
-
"""Generate complete prompt with enhanced context management."""
|
|
177
|
-
if not self.status:
|
|
178
|
-
return prompt
|
|
179
|
-
|
|
180
|
-
intro = intro or self.intro or ""
|
|
181
|
-
|
|
182
|
-
# Add tool information if available
|
|
183
|
-
tools_description = self.get_tools_description()
|
|
184
|
-
if tools_description:
|
|
185
|
-
try:
|
|
186
|
-
date_str = f"Current date: {datetime.now().strftime('%d %b %Y')}"
|
|
187
|
-
except:
|
|
188
|
-
date_str = ""
|
|
189
|
-
|
|
190
|
-
intro = self._generate_enhanced_intro(intro, tools_description, date_str)
|
|
191
|
-
|
|
192
|
-
# Generate history string with proper formatting
|
|
193
|
-
history = self._generate_history_string()
|
|
194
|
-
|
|
195
|
-
# Combine and trim if needed
|
|
196
|
-
complete_prompt = intro + self._trim_chat_history(
|
|
197
|
-
history + "\nUser: " + prompt + "\nAssistant:",
|
|
198
|
-
intro
|
|
199
|
-
)
|
|
200
|
-
|
|
201
|
-
return complete_prompt
|
|
202
|
-
|
|
203
|
-
def _generate_enhanced_intro(self, intro: str, tools_description: str, date_str: str) -> str:
|
|
204
|
-
"""Generate enhanced introduction with tools and guidelines."""
|
|
205
|
-
return f'''
|
|
206
|
-
{intro}
|
|
207
|
-
|
|
208
|
-
{date_str}
|
|
209
|
-
|
|
210
|
-
**CORE PROTOCOL:**
|
|
211
|
-
|
|
212
|
-
Your goal is to assist the user effectively. Analyze each query and choose one of two response modes:
|
|
213
|
-
|
|
214
|
-
**1. Tool Mode:**
|
|
215
|
-
- **When:** If the query requires external data, calculations, or functions listed under AVAILABLE TOOLS.
|
|
216
|
-
- **Action:** Output *ONLY* the complete JSON tool call within tags.
|
|
217
|
-
- **Format:** Must start with `<tool_call>` and end with `</tool_call>`.
|
|
218
|
-
|
|
219
|
-
**2. Conversational Mode:**
|
|
220
|
-
- **When:** For queries answerable with internal knowledge.
|
|
221
|
-
- **Action:** Respond directly and concisely.
|
|
222
|
-
|
|
223
|
-
**AVAILABLE TOOLS:**
|
|
224
|
-
{tools_description}
|
|
225
|
-
|
|
226
|
-
**TOOL FORMAT:**
|
|
227
|
-
<tool_call>
|
|
228
|
-
{{
|
|
229
|
-
"name": "tool_name",
|
|
230
|
-
"arguments": {{
|
|
231
|
-
"param": "value"
|
|
232
|
-
}}
|
|
233
|
-
}}
|
|
234
|
-
</tool_call>
|
|
235
|
-
'''
|
|
236
|
-
|
|
237
|
-
def _generate_history_string(self) -> str:
|
|
238
|
-
"""Generate formatted history string from messages."""
|
|
239
|
-
history_parts = []
|
|
240
|
-
for msg in self.messages:
|
|
241
|
-
if msg.role == "system" and msg.metadata.get("summarized_count"):
|
|
242
|
-
history_parts.append(f"[Previous messages summarized: {msg.metadata['summarized_count']}]")
|
|
243
|
-
else:
|
|
244
|
-
role_display = msg.role.capitalize()
|
|
245
|
-
if "<tool_call>" in msg.content:
|
|
246
|
-
history_parts.append(f"{role_display}: {msg.content}")
|
|
247
|
-
else:
|
|
248
|
-
history_parts.append(f"{role_display}: {msg.content}")
|
|
249
|
-
return "\n".join(history_parts)
|
|
250
|
-
|
|
251
|
-
def _trim_chat_history(self, chat_history: str, intro: str) -> str:
|
|
252
|
-
"""Trim chat history with improved token management."""
|
|
253
|
-
intro = intro or ""
|
|
254
|
-
total_length = len(intro) + len(chat_history)
|
|
255
|
-
|
|
256
|
-
if total_length > self.history_offset:
|
|
257
|
-
truncate_at = (total_length - self.history_offset) + self.prompt_allowance
|
|
258
|
-
# Try to truncate at a message boundary
|
|
259
|
-
lines = chat_history[truncate_at:].split('\n')
|
|
260
|
-
for i, line in enumerate(lines):
|
|
261
|
-
if line.startswith(("User:", "Assistant:", "Tool:")):
|
|
262
|
-
return "... " + "\n".join(lines[i:])
|
|
263
|
-
return "... " + chat_history[truncate_at:]
|
|
264
|
-
return chat_history
|
|
265
|
-
|
|
266
|
-
def add_message(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
|
|
267
|
-
"""Add a message with enhanced validation and metadata support. Deletes oldest messages if total word count exceeds max_tokens_to_sample."""
|
|
268
|
-
try:
|
|
269
|
-
role = role.lower() # Normalize role to lowercase
|
|
270
|
-
if not self.validate_message(role, content):
|
|
271
|
-
raise MessageValidationError("Invalid message role or content")
|
|
272
|
-
|
|
273
|
-
# Calculate total word count in history
|
|
274
|
-
def total_word_count(messages):
|
|
275
|
-
return sum(len(msg.content.split()) for msg in messages)
|
|
276
|
-
|
|
277
|
-
# Remove oldest messages until total word count is below limit
|
|
278
|
-
temp_messages = self.messages.copy()
|
|
279
|
-
while temp_messages and (total_word_count(temp_messages) + len(content.split()) > self.max_tokens_to_sample):
|
|
280
|
-
temp_messages.pop(0)
|
|
281
|
-
|
|
282
|
-
self.messages = temp_messages
|
|
283
|
-
|
|
284
|
-
message = Message(role=role, content=content, metadata=metadata or {})
|
|
285
|
-
self.messages.append(message)
|
|
286
|
-
|
|
287
|
-
if self.file and self.update_file:
|
|
288
|
-
self._append_to_file(message)
|
|
289
|
-
|
|
290
|
-
self._compress_history()
|
|
291
|
-
|
|
292
|
-
except Exception as e:
|
|
293
|
-
raise ConversationError(f"Failed to add message: {str(e)}") from e
|
|
294
|
-
|
|
295
|
-
def _append_to_file(self, message: Message) -> None:
|
|
296
|
-
"""Append message to file with error handling."""
|
|
297
|
-
try:
|
|
298
|
-
if not os.path.exists(self.file):
|
|
299
|
-
with open(self.file, "w", encoding="utf-8") as fh:
|
|
300
|
-
fh.write(self.intro + "\n")
|
|
301
|
-
|
|
302
|
-
with open(self.file, "a", encoding="utf-8") as fh:
|
|
303
|
-
role_display = message.role.capitalize()
|
|
304
|
-
fh.write(f"\n{role_display}: {message.content}")
|
|
305
|
-
|
|
306
|
-
except Exception as e:
|
|
307
|
-
raise ConversationError(f"Failed to write to file: {str(e)}") from e
|
|
308
|
-
|
|
309
|
-
def validate_message(self, role: str, content: str) -> bool:
|
|
310
|
-
"""Validate message with enhanced role checking."""
|
|
311
|
-
valid_roles = {'user', 'assistant', 'tool', 'system'}
|
|
312
|
-
if role not in valid_roles:
|
|
313
|
-
return False
|
|
314
|
-
if not isinstance(content, str):
|
|
315
|
-
return False
|
|
316
|
-
# Allow empty content for assistant (needed for streaming)
|
|
317
|
-
if not content and role != 'assistant':
|
|
318
|
-
return False
|
|
319
|
-
return True
|
|
320
|
-
|
|
321
|
-
def handle_tool_response(self, response: str) -> Dict[str, Any]:
|
|
322
|
-
"""Process tool responses with enhanced error handling."""
|
|
323
|
-
try:
|
|
324
|
-
if "<tool_call>" in response:
|
|
325
|
-
function_call_data = self._parse_function_call(response)
|
|
326
|
-
|
|
327
|
-
if "error" in function_call_data:
|
|
328
|
-
return {
|
|
329
|
-
"is_tool_call": True,
|
|
330
|
-
"success": False,
|
|
331
|
-
"result": function_call_data["error"],
|
|
332
|
-
"original_response": response
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
result = self.execute_function(function_call_data)
|
|
336
|
-
self.add_message("tool", result)
|
|
337
|
-
|
|
338
|
-
return {
|
|
339
|
-
"is_tool_call": True,
|
|
340
|
-
"success": True,
|
|
341
|
-
"result": result,
|
|
342
|
-
"tool_calls": function_call_data.get("tool_calls", []),
|
|
343
|
-
"original_response": response
|
|
344
|
-
}
|
|
345
|
-
|
|
346
|
-
return {
|
|
347
|
-
"is_tool_call": False,
|
|
348
|
-
"result": response,
|
|
349
|
-
"original_response": response
|
|
350
|
-
}
|
|
351
|
-
|
|
352
|
-
except Exception as e:
|
|
353
|
-
raise ToolCallError(f"Failed to handle tool response: {str(e)}") from e
|
|
354
|
-
|
|
355
|
-
def _parse_function_call(self, response: str) -> FunctionCallData:
|
|
356
|
-
"""Parse function calls with improved JSON handling."""
|
|
357
|
-
try:
|
|
358
|
-
# Extract content between tool call tags
|
|
359
|
-
start_tag = "<tool_call>"
|
|
360
|
-
end_tag = "</tool_call>"
|
|
361
|
-
start_idx = response.find(start_tag)
|
|
362
|
-
end_idx = response.rfind(end_tag)
|
|
363
|
-
|
|
364
|
-
if start_idx == -1 or end_idx == -1:
|
|
365
|
-
raise ValueError("No valid tool call tags found")
|
|
366
|
-
|
|
367
|
-
json_str = response[start_idx + len(start_tag):end_idx].strip()
|
|
368
|
-
|
|
369
|
-
# Handle both single and multiple tool calls
|
|
370
|
-
try:
|
|
371
|
-
parsed = json.loads(json_str)
|
|
372
|
-
if isinstance(parsed, dict):
|
|
373
|
-
return {"tool_calls": [parsed]}
|
|
374
|
-
elif isinstance(parsed, list):
|
|
375
|
-
return {"tool_calls": parsed}
|
|
376
|
-
else:
|
|
377
|
-
raise ValueError("Invalid tool call structure")
|
|
378
|
-
except json.JSONDecodeError:
|
|
379
|
-
# Try to extract valid JSON if embedded in other content
|
|
380
|
-
import re
|
|
381
|
-
json_pattern = re.search(r'\{[\s\S]*\}', json_str)
|
|
382
|
-
if json_pattern:
|
|
383
|
-
parsed = json.loads(json_pattern.group(0))
|
|
384
|
-
return {"tool_calls": [parsed]}
|
|
385
|
-
raise
|
|
386
|
-
|
|
387
|
-
except Exception as e:
|
|
388
|
-
return {"error": str(e)}
|
|
389
|
-
|
|
390
|
-
def execute_function(self, function_call_data: FunctionCallData) -> str:
|
|
391
|
-
"""Execute functions with enhanced error handling."""
|
|
392
|
-
try:
|
|
393
|
-
tool_calls = function_call_data.get("tool_calls", [])
|
|
394
|
-
if not tool_calls:
|
|
395
|
-
raise ValueError("No tool calls provided")
|
|
396
|
-
|
|
397
|
-
results = []
|
|
398
|
-
for tool_call in tool_calls:
|
|
399
|
-
name = tool_call.get("name")
|
|
400
|
-
arguments = tool_call.get("arguments", {})
|
|
401
|
-
|
|
402
|
-
if not name or not isinstance(arguments, dict):
|
|
403
|
-
raise ValueError(f"Invalid tool call format: {tool_call}")
|
|
404
|
-
|
|
405
|
-
# Execute the tool (implement actual logic here)
|
|
406
|
-
results.append(f"Executed {name} with arguments {arguments}")
|
|
407
|
-
|
|
408
|
-
return "; ".join(results)
|
|
409
|
-
|
|
410
|
-
except Exception as e:
|
|
411
|
-
raise ToolCallError(f"Failed to execute function: {str(e)}") from e
|
|
412
|
-
|
|
413
|
-
def get_tools_description(self) -> str:
|
|
414
|
-
"""Get formatted tools description."""
|
|
415
|
-
if not self.tools:
|
|
416
|
-
return ""
|
|
417
|
-
|
|
418
|
-
return "\n".join(
|
|
419
|
-
f"- {fn.name}: {fn.description} (Parameters: {', '.join(f'{name}: {typ}' for name, typ in fn.parameters.items())})"
|
|
420
|
-
for fn in self.tools
|
|
421
|
-
)
|
|
422
|
-
|
|
423
|
-
def update_chat_history(self, prompt: str, response: str) -> None:
|
|
424
|
-
"""Update chat history with a new prompt-response pair.
|
|
425
|
-
|
|
426
|
-
Args:
|
|
427
|
-
prompt: The user's prompt/question
|
|
428
|
-
response: The assistant's response
|
|
429
|
-
|
|
430
|
-
This method adds both the user's prompt and the assistant's response
|
|
431
|
-
to the conversation history as separate messages.
|
|
432
|
-
"""
|
|
433
|
-
# Add user's message (normalize role)
|
|
434
|
-
self.add_message("user", prompt)
|
|
435
|
-
|
|
436
|
-
# Add assistant's response (normalize role)
|
|
437
|
-
self.add_message("assistant", response)
|
|
438
|
-
|
|
1
|
+
"""
|
|
2
|
+
conversation.py
|
|
3
|
+
|
|
4
|
+
This module provides a modern conversation manager for handling chat-based interactions, message history, tool calls, and robust error handling. It defines the Conversation class and supporting types for managing conversational state, tool integration, and message validation.
|
|
5
|
+
|
|
6
|
+
Classes:
|
|
7
|
+
ConversationError: Base exception for conversation-related errors.
|
|
8
|
+
ToolCallError: Raised when there's an error with tool calls.
|
|
9
|
+
MessageValidationError: Raised when message validation fails.
|
|
10
|
+
Message: Represents a single message in the conversation.
|
|
11
|
+
FunctionCall: TypedDict for a function call.
|
|
12
|
+
ToolDefinition: TypedDict for a tool definition.
|
|
13
|
+
FunctionCallData: TypedDict for function call data.
|
|
14
|
+
Fn: Represents a function (tool) that the agent can call.
|
|
15
|
+
Conversation: Main conversation manager class.
|
|
16
|
+
|
|
17
|
+
Functions:
|
|
18
|
+
tools: Decorator to mark a function as a tool.
|
|
19
|
+
"""
|
|
20
|
+
import os
|
|
21
|
+
import json
|
|
22
|
+
from typing import Optional, Dict, List, Any, TypedDict, Callable, TypeVar, Union
|
|
23
|
+
from dataclasses import dataclass
|
|
24
|
+
from datetime import datetime
|
|
25
|
+
|
|
26
|
+
T = TypeVar('T')
|
|
27
|
+
|
|
28
|
+
class ConversationError(Exception):
|
|
29
|
+
"""Base exception for conversation-related errors."""
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
class ToolCallError(ConversationError):
|
|
33
|
+
"""Raised when there's an error with tool calls."""
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
class MessageValidationError(ConversationError):
|
|
37
|
+
"""Raised when message validation fails."""
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class Message:
|
|
42
|
+
"""Represents a single message in the conversation."""
|
|
43
|
+
role: str
|
|
44
|
+
content: str
|
|
45
|
+
timestamp: datetime = datetime.now()
|
|
46
|
+
metadata: Dict[str, Any] = None
|
|
47
|
+
|
|
48
|
+
def __post_init__(self):
|
|
49
|
+
if self.metadata is None:
|
|
50
|
+
self.metadata = {}
|
|
51
|
+
|
|
52
|
+
class FunctionCall(TypedDict):
|
|
53
|
+
"""Type for a function call."""
|
|
54
|
+
name: str
|
|
55
|
+
arguments: Dict[str, Any]
|
|
56
|
+
|
|
57
|
+
class ToolDefinition(TypedDict):
|
|
58
|
+
"""Type for a tool definition."""
|
|
59
|
+
type: str
|
|
60
|
+
function: Dict[str, Any]
|
|
61
|
+
|
|
62
|
+
class FunctionCallData(TypedDict, total=False):
|
|
63
|
+
"""Type for function call data"""
|
|
64
|
+
tool_calls: List[FunctionCall]
|
|
65
|
+
error: str
|
|
66
|
+
|
|
67
|
+
class Fn:
|
|
68
|
+
"""Represents a function (tool) that the agent can call."""
|
|
69
|
+
def __init__(self, name: str, description: str, parameters: Dict[str, str]) -> None:
|
|
70
|
+
self.name: str = name
|
|
71
|
+
self.description: str = description
|
|
72
|
+
self.parameters: Dict[str, str] = parameters
|
|
73
|
+
|
|
74
|
+
def tools(func: Callable[..., T]) -> Callable[..., T]:
|
|
75
|
+
"""Decorator to mark a function as a tool."""
|
|
76
|
+
func._is_tool = True # type: ignore
|
|
77
|
+
return func
|
|
78
|
+
|
|
79
|
+
class Conversation:
|
|
80
|
+
"""
|
|
81
|
+
Modern conversation manager with enhanced features.
|
|
82
|
+
|
|
83
|
+
Key Features:
|
|
84
|
+
- Robust message handling with metadata
|
|
85
|
+
- Enhanced tool calling support
|
|
86
|
+
- Efficient history management
|
|
87
|
+
- Improved error handling
|
|
88
|
+
- Memory optimization
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
intro = (
|
|
92
|
+
"You're a helpful Large Language Model assistant. "
|
|
93
|
+
"Respond directly to the user's questions or use tools when appropriate."
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
def __init__(
|
|
97
|
+
self,
|
|
98
|
+
status: bool = True,
|
|
99
|
+
max_tokens: int = 600,
|
|
100
|
+
filepath: Optional[str] = None,
|
|
101
|
+
update_file: bool = True,
|
|
102
|
+
tools: Optional[List[Fn]] = None,
|
|
103
|
+
compression_threshold: int = 10000,
|
|
104
|
+
):
|
|
105
|
+
"""Initialize conversation manager with modern features."""
|
|
106
|
+
self.status = status
|
|
107
|
+
self.max_tokens_to_sample = max_tokens
|
|
108
|
+
self.messages: List[Message] = []
|
|
109
|
+
self.history_format = "\nUser: %(user)s\nAssistant: %(llm)s"
|
|
110
|
+
self.tool_history_format = "\nUser: %(user)s\nAssistant: <tool_call>%(tool_json)s</tool_call>\nTool: %(result)s"
|
|
111
|
+
self.file = filepath
|
|
112
|
+
self.update_file = update_file
|
|
113
|
+
self.history_offset = 10250
|
|
114
|
+
self.prompt_allowance = 10
|
|
115
|
+
self.tools = tools or []
|
|
116
|
+
self.compression_threshold = compression_threshold
|
|
117
|
+
if filepath:
|
|
118
|
+
self.load_conversation(filepath, True)
|
|
119
|
+
|
|
120
|
+
def load_conversation(self, filepath: str, exists: bool = True) -> None:
|
|
121
|
+
"""Load conversation with improved error handling."""
|
|
122
|
+
try:
|
|
123
|
+
if not isinstance(filepath, str):
|
|
124
|
+
raise TypeError(f"Filepath must be str, not {type(filepath)}")
|
|
125
|
+
|
|
126
|
+
if exists and not os.path.isfile(filepath):
|
|
127
|
+
raise FileNotFoundError(f"File '{filepath}' does not exist")
|
|
128
|
+
|
|
129
|
+
if not os.path.isfile(filepath):
|
|
130
|
+
with open(filepath, "w", encoding="utf-8") as fh:
|
|
131
|
+
fh.write(self.intro)
|
|
132
|
+
else:
|
|
133
|
+
with open(filepath, encoding="utf-8") as fh:
|
|
134
|
+
file_contents = fh.readlines()
|
|
135
|
+
if file_contents:
|
|
136
|
+
self.intro = file_contents[0]
|
|
137
|
+
self._process_history_from_file(file_contents[1:])
|
|
138
|
+
except Exception as e:
|
|
139
|
+
raise ConversationError(f"Failed to load conversation: {str(e)}") from e
|
|
140
|
+
|
|
141
|
+
def _process_history_from_file(self, lines: List[str]) -> None:
|
|
142
|
+
"""Process and structure conversation history from file."""
|
|
143
|
+
current_role = None
|
|
144
|
+
current_content = []
|
|
145
|
+
|
|
146
|
+
for line in lines:
|
|
147
|
+
line = line.strip()
|
|
148
|
+
if line.startswith(("User:", "Assistant:", "Tool:")):
|
|
149
|
+
if current_role and current_content:
|
|
150
|
+
self.messages.append(Message(
|
|
151
|
+
role=current_role,
|
|
152
|
+
content="\n".join(current_content)
|
|
153
|
+
))
|
|
154
|
+
current_content = []
|
|
155
|
+
current_role = line.split(":")[0].lower()
|
|
156
|
+
content = ":".join(line.split(":")[1:]).strip()
|
|
157
|
+
current_content.append(content)
|
|
158
|
+
elif line:
|
|
159
|
+
current_content.append(line)
|
|
160
|
+
|
|
161
|
+
if current_role and current_content:
|
|
162
|
+
self.messages.append(Message(
|
|
163
|
+
role=current_role,
|
|
164
|
+
content="\n".join(current_content)
|
|
165
|
+
))
|
|
166
|
+
|
|
167
|
+
def _compress_history(self) -> None:
|
|
168
|
+
"""Delete old history when it exceeds threshold."""
|
|
169
|
+
if len(self.messages) > self.compression_threshold:
|
|
170
|
+
# Remove oldest messages, keep only the most recent ones
|
|
171
|
+
self.messages = self.messages[-self.compression_threshold:]
|
|
172
|
+
|
|
173
|
+
# _summarize_messages removed
|
|
174
|
+
|
|
175
|
+
def gen_complete_prompt(self, prompt: str, intro: Optional[str] = None) -> str:
|
|
176
|
+
"""Generate complete prompt with enhanced context management."""
|
|
177
|
+
if not self.status:
|
|
178
|
+
return prompt
|
|
179
|
+
|
|
180
|
+
intro = intro or self.intro or ""
|
|
181
|
+
|
|
182
|
+
# Add tool information if available
|
|
183
|
+
tools_description = self.get_tools_description()
|
|
184
|
+
if tools_description:
|
|
185
|
+
try:
|
|
186
|
+
date_str = f"Current date: {datetime.now().strftime('%d %b %Y')}"
|
|
187
|
+
except:
|
|
188
|
+
date_str = ""
|
|
189
|
+
|
|
190
|
+
intro = self._generate_enhanced_intro(intro, tools_description, date_str)
|
|
191
|
+
|
|
192
|
+
# Generate history string with proper formatting
|
|
193
|
+
history = self._generate_history_string()
|
|
194
|
+
|
|
195
|
+
# Combine and trim if needed
|
|
196
|
+
complete_prompt = intro + self._trim_chat_history(
|
|
197
|
+
history + "\nUser: " + prompt + "\nAssistant:",
|
|
198
|
+
intro
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
return complete_prompt
|
|
202
|
+
|
|
203
|
+
def _generate_enhanced_intro(self, intro: str, tools_description: str, date_str: str) -> str:
|
|
204
|
+
"""Generate enhanced introduction with tools and guidelines."""
|
|
205
|
+
return f'''
|
|
206
|
+
{intro}
|
|
207
|
+
|
|
208
|
+
{date_str}
|
|
209
|
+
|
|
210
|
+
**CORE PROTOCOL:**
|
|
211
|
+
|
|
212
|
+
Your goal is to assist the user effectively. Analyze each query and choose one of two response modes:
|
|
213
|
+
|
|
214
|
+
**1. Tool Mode:**
|
|
215
|
+
- **When:** If the query requires external data, calculations, or functions listed under AVAILABLE TOOLS.
|
|
216
|
+
- **Action:** Output *ONLY* the complete JSON tool call within tags.
|
|
217
|
+
- **Format:** Must start with `<tool_call>` and end with `</tool_call>`.
|
|
218
|
+
|
|
219
|
+
**2. Conversational Mode:**
|
|
220
|
+
- **When:** For queries answerable with internal knowledge.
|
|
221
|
+
- **Action:** Respond directly and concisely.
|
|
222
|
+
|
|
223
|
+
**AVAILABLE TOOLS:**
|
|
224
|
+
{tools_description}
|
|
225
|
+
|
|
226
|
+
**TOOL FORMAT:**
|
|
227
|
+
<tool_call>
|
|
228
|
+
{{
|
|
229
|
+
"name": "tool_name",
|
|
230
|
+
"arguments": {{
|
|
231
|
+
"param": "value"
|
|
232
|
+
}}
|
|
233
|
+
}}
|
|
234
|
+
</tool_call>
|
|
235
|
+
'''
|
|
236
|
+
|
|
237
|
+
def _generate_history_string(self) -> str:
|
|
238
|
+
"""Generate formatted history string from messages."""
|
|
239
|
+
history_parts = []
|
|
240
|
+
for msg in self.messages:
|
|
241
|
+
if msg.role == "system" and msg.metadata.get("summarized_count"):
|
|
242
|
+
history_parts.append(f"[Previous messages summarized: {msg.metadata['summarized_count']}]")
|
|
243
|
+
else:
|
|
244
|
+
role_display = msg.role.capitalize()
|
|
245
|
+
if "<tool_call>" in msg.content:
|
|
246
|
+
history_parts.append(f"{role_display}: {msg.content}")
|
|
247
|
+
else:
|
|
248
|
+
history_parts.append(f"{role_display}: {msg.content}")
|
|
249
|
+
return "\n".join(history_parts)
|
|
250
|
+
|
|
251
|
+
def _trim_chat_history(self, chat_history: str, intro: str) -> str:
|
|
252
|
+
"""Trim chat history with improved token management."""
|
|
253
|
+
intro = intro or ""
|
|
254
|
+
total_length = len(intro) + len(chat_history)
|
|
255
|
+
|
|
256
|
+
if total_length > self.history_offset:
|
|
257
|
+
truncate_at = (total_length - self.history_offset) + self.prompt_allowance
|
|
258
|
+
# Try to truncate at a message boundary
|
|
259
|
+
lines = chat_history[truncate_at:].split('\n')
|
|
260
|
+
for i, line in enumerate(lines):
|
|
261
|
+
if line.startswith(("User:", "Assistant:", "Tool:")):
|
|
262
|
+
return "... " + "\n".join(lines[i:])
|
|
263
|
+
return "... " + chat_history[truncate_at:]
|
|
264
|
+
return chat_history
|
|
265
|
+
|
|
266
|
+
def add_message(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
|
|
267
|
+
"""Add a message with enhanced validation and metadata support. Deletes oldest messages if total word count exceeds max_tokens_to_sample."""
|
|
268
|
+
try:
|
|
269
|
+
role = role.lower() # Normalize role to lowercase
|
|
270
|
+
if not self.validate_message(role, content):
|
|
271
|
+
raise MessageValidationError("Invalid message role or content")
|
|
272
|
+
|
|
273
|
+
# Calculate total word count in history
|
|
274
|
+
def total_word_count(messages):
|
|
275
|
+
return sum(len(msg.content.split()) for msg in messages)
|
|
276
|
+
|
|
277
|
+
# Remove oldest messages until total word count is below limit
|
|
278
|
+
temp_messages = self.messages.copy()
|
|
279
|
+
while temp_messages and (total_word_count(temp_messages) + len(content.split()) > self.max_tokens_to_sample):
|
|
280
|
+
temp_messages.pop(0)
|
|
281
|
+
|
|
282
|
+
self.messages = temp_messages
|
|
283
|
+
|
|
284
|
+
message = Message(role=role, content=content, metadata=metadata or {})
|
|
285
|
+
self.messages.append(message)
|
|
286
|
+
|
|
287
|
+
if self.file and self.update_file:
|
|
288
|
+
self._append_to_file(message)
|
|
289
|
+
|
|
290
|
+
self._compress_history()
|
|
291
|
+
|
|
292
|
+
except Exception as e:
|
|
293
|
+
raise ConversationError(f"Failed to add message: {str(e)}") from e
|
|
294
|
+
|
|
295
|
+
def _append_to_file(self, message: Message) -> None:
|
|
296
|
+
"""Append message to file with error handling."""
|
|
297
|
+
try:
|
|
298
|
+
if not os.path.exists(self.file):
|
|
299
|
+
with open(self.file, "w", encoding="utf-8") as fh:
|
|
300
|
+
fh.write(self.intro + "\n")
|
|
301
|
+
|
|
302
|
+
with open(self.file, "a", encoding="utf-8") as fh:
|
|
303
|
+
role_display = message.role.capitalize()
|
|
304
|
+
fh.write(f"\n{role_display}: {message.content}")
|
|
305
|
+
|
|
306
|
+
except Exception as e:
|
|
307
|
+
raise ConversationError(f"Failed to write to file: {str(e)}") from e
|
|
308
|
+
|
|
309
|
+
def validate_message(self, role: str, content: str) -> bool:
|
|
310
|
+
"""Validate message with enhanced role checking."""
|
|
311
|
+
valid_roles = {'user', 'assistant', 'tool', 'system'}
|
|
312
|
+
if role not in valid_roles:
|
|
313
|
+
return False
|
|
314
|
+
if not isinstance(content, str):
|
|
315
|
+
return False
|
|
316
|
+
# Allow empty content for assistant (needed for streaming)
|
|
317
|
+
if not content and role != 'assistant':
|
|
318
|
+
return False
|
|
319
|
+
return True
|
|
320
|
+
|
|
321
|
+
def handle_tool_response(self, response: str) -> Dict[str, Any]:
|
|
322
|
+
"""Process tool responses with enhanced error handling."""
|
|
323
|
+
try:
|
|
324
|
+
if "<tool_call>" in response:
|
|
325
|
+
function_call_data = self._parse_function_call(response)
|
|
326
|
+
|
|
327
|
+
if "error" in function_call_data:
|
|
328
|
+
return {
|
|
329
|
+
"is_tool_call": True,
|
|
330
|
+
"success": False,
|
|
331
|
+
"result": function_call_data["error"],
|
|
332
|
+
"original_response": response
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
result = self.execute_function(function_call_data)
|
|
336
|
+
self.add_message("tool", result)
|
|
337
|
+
|
|
338
|
+
return {
|
|
339
|
+
"is_tool_call": True,
|
|
340
|
+
"success": True,
|
|
341
|
+
"result": result,
|
|
342
|
+
"tool_calls": function_call_data.get("tool_calls", []),
|
|
343
|
+
"original_response": response
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
return {
|
|
347
|
+
"is_tool_call": False,
|
|
348
|
+
"result": response,
|
|
349
|
+
"original_response": response
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
except Exception as e:
|
|
353
|
+
raise ToolCallError(f"Failed to handle tool response: {str(e)}") from e
|
|
354
|
+
|
|
355
|
+
def _parse_function_call(self, response: str) -> FunctionCallData:
|
|
356
|
+
"""Parse function calls with improved JSON handling."""
|
|
357
|
+
try:
|
|
358
|
+
# Extract content between tool call tags
|
|
359
|
+
start_tag = "<tool_call>"
|
|
360
|
+
end_tag = "</tool_call>"
|
|
361
|
+
start_idx = response.find(start_tag)
|
|
362
|
+
end_idx = response.rfind(end_tag)
|
|
363
|
+
|
|
364
|
+
if start_idx == -1 or end_idx == -1:
|
|
365
|
+
raise ValueError("No valid tool call tags found")
|
|
366
|
+
|
|
367
|
+
json_str = response[start_idx + len(start_tag):end_idx].strip()
|
|
368
|
+
|
|
369
|
+
# Handle both single and multiple tool calls
|
|
370
|
+
try:
|
|
371
|
+
parsed = json.loads(json_str)
|
|
372
|
+
if isinstance(parsed, dict):
|
|
373
|
+
return {"tool_calls": [parsed]}
|
|
374
|
+
elif isinstance(parsed, list):
|
|
375
|
+
return {"tool_calls": parsed}
|
|
376
|
+
else:
|
|
377
|
+
raise ValueError("Invalid tool call structure")
|
|
378
|
+
except json.JSONDecodeError:
|
|
379
|
+
# Try to extract valid JSON if embedded in other content
|
|
380
|
+
import re
|
|
381
|
+
json_pattern = re.search(r'\{[\s\S]*\}', json_str)
|
|
382
|
+
if json_pattern:
|
|
383
|
+
parsed = json.loads(json_pattern.group(0))
|
|
384
|
+
return {"tool_calls": [parsed]}
|
|
385
|
+
raise
|
|
386
|
+
|
|
387
|
+
except Exception as e:
|
|
388
|
+
return {"error": str(e)}
|
|
389
|
+
|
|
390
|
+
def execute_function(self, function_call_data: FunctionCallData) -> str:
|
|
391
|
+
"""Execute functions with enhanced error handling."""
|
|
392
|
+
try:
|
|
393
|
+
tool_calls = function_call_data.get("tool_calls", [])
|
|
394
|
+
if not tool_calls:
|
|
395
|
+
raise ValueError("No tool calls provided")
|
|
396
|
+
|
|
397
|
+
results = []
|
|
398
|
+
for tool_call in tool_calls:
|
|
399
|
+
name = tool_call.get("name")
|
|
400
|
+
arguments = tool_call.get("arguments", {})
|
|
401
|
+
|
|
402
|
+
if not name or not isinstance(arguments, dict):
|
|
403
|
+
raise ValueError(f"Invalid tool call format: {tool_call}")
|
|
404
|
+
|
|
405
|
+
# Execute the tool (implement actual logic here)
|
|
406
|
+
results.append(f"Executed {name} with arguments {arguments}")
|
|
407
|
+
|
|
408
|
+
return "; ".join(results)
|
|
409
|
+
|
|
410
|
+
except Exception as e:
|
|
411
|
+
raise ToolCallError(f"Failed to execute function: {str(e)}") from e
|
|
412
|
+
|
|
413
|
+
def get_tools_description(self) -> str:
|
|
414
|
+
"""Get formatted tools description."""
|
|
415
|
+
if not self.tools:
|
|
416
|
+
return ""
|
|
417
|
+
|
|
418
|
+
return "\n".join(
|
|
419
|
+
f"- {fn.name}: {fn.description} (Parameters: {', '.join(f'{name}: {typ}' for name, typ in fn.parameters.items())})"
|
|
420
|
+
for fn in self.tools
|
|
421
|
+
)
|
|
422
|
+
|
|
423
|
+
def update_chat_history(self, prompt: str, response: str) -> None:
|
|
424
|
+
"""Update chat history with a new prompt-response pair.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
prompt: The user's prompt/question
|
|
428
|
+
response: The assistant's response
|
|
429
|
+
|
|
430
|
+
This method adds both the user's prompt and the assistant's response
|
|
431
|
+
to the conversation history as separate messages.
|
|
432
|
+
"""
|
|
433
|
+
# Add user's message (normalize role)
|
|
434
|
+
self.add_message("user", prompt)
|
|
435
|
+
|
|
436
|
+
# Add assistant's response (normalize role)
|
|
437
|
+
self.add_message("assistant", response)
|
|
438
|
+
|