webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
webscout/conversation.py
CHANGED
|
@@ -2,59 +2,71 @@ import os
|
|
|
2
2
|
import json
|
|
3
3
|
import logging
|
|
4
4
|
from typing import Optional, Dict, List, Any, TypedDict, Callable, TypeVar, Union
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from datetime import datetime
|
|
5
7
|
|
|
6
8
|
T = TypeVar('T')
|
|
7
9
|
|
|
10
|
+
class ConversationError(Exception):
|
|
11
|
+
"""Base exception for conversation-related errors."""
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
class ToolCallError(ConversationError):
|
|
15
|
+
"""Raised when there's an error with tool calls."""
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
class MessageValidationError(ConversationError):
|
|
19
|
+
"""Raised when message validation fails."""
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class Message:
|
|
24
|
+
"""Represents a single message in the conversation."""
|
|
25
|
+
role: str
|
|
26
|
+
content: str
|
|
27
|
+
timestamp: datetime = datetime.now()
|
|
28
|
+
metadata: Dict[str, Any] = None
|
|
29
|
+
|
|
30
|
+
def __post_init__(self):
|
|
31
|
+
if self.metadata is None:
|
|
32
|
+
self.metadata = {}
|
|
8
33
|
|
|
9
34
|
class FunctionCall(TypedDict):
|
|
10
35
|
"""Type for a function call."""
|
|
11
36
|
name: str
|
|
12
37
|
arguments: Dict[str, Any]
|
|
13
38
|
|
|
14
|
-
|
|
15
39
|
class ToolDefinition(TypedDict):
|
|
16
40
|
"""Type for a tool definition."""
|
|
17
41
|
type: str
|
|
18
42
|
function: Dict[str, Any]
|
|
19
43
|
|
|
20
|
-
|
|
21
44
|
class FunctionCallData(TypedDict, total=False):
|
|
22
45
|
"""Type for function call data"""
|
|
23
46
|
tool_calls: List[FunctionCall]
|
|
24
47
|
error: str
|
|
25
48
|
|
|
26
|
-
|
|
27
49
|
class Fn:
|
|
28
|
-
"""
|
|
29
|
-
Represents a function (tool) that the agent can call.
|
|
30
|
-
"""
|
|
50
|
+
"""Represents a function (tool) that the agent can call."""
|
|
31
51
|
def __init__(self, name: str, description: str, parameters: Dict[str, str]) -> None:
|
|
32
52
|
self.name: str = name
|
|
33
53
|
self.description: str = description
|
|
34
54
|
self.parameters: Dict[str, str] = parameters
|
|
35
55
|
|
|
36
|
-
|
|
37
56
|
def tools(func: Callable[..., T]) -> Callable[..., T]:
|
|
38
|
-
"""Decorator to mark a function as a tool
|
|
57
|
+
"""Decorator to mark a function as a tool."""
|
|
39
58
|
func._is_tool = True # type: ignore
|
|
40
59
|
return func
|
|
41
60
|
|
|
42
|
-
|
|
43
61
|
class Conversation:
|
|
44
|
-
"""
|
|
45
|
-
|
|
46
|
-
This class is responsible for managing chat conversations, including:
|
|
47
|
-
- Maintaining chat history
|
|
48
|
-
- Loading/saving conversations from/to files
|
|
49
|
-
- Generating prompts based on context
|
|
50
|
-
- Managing token limits and history pruning
|
|
51
|
-
- Supporting tool calling functionality
|
|
62
|
+
"""Modern conversation manager with enhanced features.
|
|
52
63
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
64
|
+
Key Features:
|
|
65
|
+
- Robust message handling with metadata
|
|
66
|
+
- Enhanced tool calling support
|
|
67
|
+
- Efficient history management
|
|
68
|
+
- Improved error handling
|
|
69
|
+
- Memory optimization
|
|
58
70
|
"""
|
|
59
71
|
|
|
60
72
|
intro = (
|
|
@@ -69,128 +81,136 @@ class Conversation:
|
|
|
69
81
|
filepath: Optional[str] = None,
|
|
70
82
|
update_file: bool = True,
|
|
71
83
|
tools: Optional[List[Fn]] = None,
|
|
84
|
+
compression_threshold: int = 10000,
|
|
72
85
|
):
|
|
73
|
-
"""Initialize
|
|
74
|
-
|
|
75
|
-
Args:
|
|
76
|
-
status (bool): Flag to control history tracking. Defaults to True.
|
|
77
|
-
max_tokens (int): Maximum tokens for completion response. Defaults to 600.
|
|
78
|
-
filepath (str, optional): Path to save/load conversation history. Defaults to None.
|
|
79
|
-
update_file (bool): Whether to append new messages to file. Defaults to True.
|
|
80
|
-
tools (List[Fn], optional): List of tools available for the conversation. Defaults to None.
|
|
81
|
-
|
|
82
|
-
Examples:
|
|
83
|
-
>>> chat = Conversation(max_tokens=500)
|
|
84
|
-
>>> chat = Conversation(filepath="chat_history.txt")
|
|
85
|
-
"""
|
|
86
|
+
"""Initialize conversation manager with modern features."""
|
|
86
87
|
self.status = status
|
|
87
88
|
self.max_tokens_to_sample = max_tokens
|
|
88
|
-
self.
|
|
89
|
-
self.history_format = "\nUser
|
|
90
|
-
self.tool_history_format = "\nUser
|
|
89
|
+
self.messages: List[Message] = []
|
|
90
|
+
self.history_format = "\nUser: %(user)s\nAssistant: %(llm)s"
|
|
91
|
+
self.tool_history_format = "\nUser: %(user)s\nAssistant: <tool_call>%(tool_json)s</tool_call>\nTool: %(result)s"
|
|
91
92
|
self.file = filepath
|
|
92
93
|
self.update_file = update_file
|
|
93
94
|
self.history_offset = 10250
|
|
94
95
|
self.prompt_allowance = 10
|
|
95
96
|
self.tools = tools or []
|
|
97
|
+
self.compression_threshold = compression_threshold
|
|
98
|
+
self.logger = self._setup_logger()
|
|
96
99
|
|
|
97
100
|
if filepath:
|
|
98
101
|
self.load_conversation(filepath, False)
|
|
99
102
|
|
|
100
|
-
def
|
|
101
|
-
"""
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
), f"Filepath needs to be of str datatype not {type(filepath)}"
|
|
113
|
-
assert (
|
|
114
|
-
os.path.isfile(filepath) if exists else True
|
|
115
|
-
), f"File '{filepath}' does not exist"
|
|
116
|
-
|
|
117
|
-
if not os.path.isfile(filepath):
|
|
118
|
-
with open(filepath, "w", encoding="utf-8") as fh:
|
|
119
|
-
fh.write(self.intro)
|
|
120
|
-
else:
|
|
121
|
-
with open(filepath, encoding="utf-8") as fh:
|
|
122
|
-
file_contents = fh.readlines()
|
|
123
|
-
if file_contents:
|
|
124
|
-
self.intro = file_contents[0] # First line is intro
|
|
125
|
-
self.chat_history = "\n".join(file_contents[1:])
|
|
126
|
-
|
|
127
|
-
def __trim_chat_history(self, chat_history: str, intro: str) -> str:
|
|
128
|
-
"""Keep the chat history fresh by trimming it when it gets too long!
|
|
103
|
+
def _setup_logger(self) -> logging.Logger:
|
|
104
|
+
"""Set up enhanced logging."""
|
|
105
|
+
logger = logging.getLogger("conversation")
|
|
106
|
+
if not logger.handlers:
|
|
107
|
+
handler = logging.StreamHandler()
|
|
108
|
+
formatter = logging.Formatter(
|
|
109
|
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
110
|
+
)
|
|
111
|
+
handler.setFormatter(formatter)
|
|
112
|
+
logger.addHandler(handler)
|
|
113
|
+
logger.setLevel(logging.INFO)
|
|
114
|
+
return logger
|
|
129
115
|
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
116
|
+
def load_conversation(self, filepath: str, exists: bool = True) -> None:
|
|
117
|
+
"""Load conversation with improved error handling."""
|
|
118
|
+
try:
|
|
119
|
+
if not isinstance(filepath, str):
|
|
120
|
+
raise TypeError(f"Filepath must be str, not {type(filepath)}")
|
|
121
|
+
|
|
122
|
+
if exists and not os.path.isfile(filepath):
|
|
123
|
+
raise FileNotFoundError(f"File '{filepath}' does not exist")
|
|
124
|
+
|
|
125
|
+
if not os.path.isfile(filepath):
|
|
126
|
+
with open(filepath, "w", encoding="utf-8") as fh:
|
|
127
|
+
fh.write(self.intro)
|
|
128
|
+
else:
|
|
129
|
+
with open(filepath, encoding="utf-8") as fh:
|
|
130
|
+
file_contents = fh.readlines()
|
|
131
|
+
if file_contents:
|
|
132
|
+
self.intro = file_contents[0]
|
|
133
|
+
self._process_history_from_file(file_contents[1:])
|
|
134
|
+
except Exception as e:
|
|
135
|
+
self.logger.error(f"Error loading conversation: {str(e)}")
|
|
136
|
+
raise ConversationError(f"Failed to load conversation: {str(e)}") from e
|
|
137
|
+
|
|
138
|
+
def _process_history_from_file(self, lines: List[str]) -> None:
|
|
139
|
+
"""Process and structure conversation history from file."""
|
|
140
|
+
current_role = None
|
|
141
|
+
current_content = []
|
|
142
|
+
|
|
143
|
+
for line in lines:
|
|
144
|
+
line = line.strip()
|
|
145
|
+
if line.startswith(("User:", "Assistant:", "Tool:")):
|
|
146
|
+
if current_role and current_content:
|
|
147
|
+
self.messages.append(Message(
|
|
148
|
+
role=current_role,
|
|
149
|
+
content="\n".join(current_content)
|
|
150
|
+
))
|
|
151
|
+
current_content = []
|
|
152
|
+
current_role = line.split(":")[0].lower()
|
|
153
|
+
content = ":".join(line.split(":")[1:]).strip()
|
|
154
|
+
current_content.append(content)
|
|
155
|
+
elif line:
|
|
156
|
+
current_content.append(line)
|
|
157
|
+
|
|
158
|
+
if current_role and current_content:
|
|
159
|
+
self.messages.append(Message(
|
|
160
|
+
role=current_role,
|
|
161
|
+
content="\n".join(current_content)
|
|
162
|
+
))
|
|
163
|
+
|
|
164
|
+
def _compress_history(self) -> None:
|
|
165
|
+
"""Compress history when it exceeds threshold."""
|
|
166
|
+
if len(self.messages) > self.compression_threshold:
|
|
167
|
+
# Keep recent messages and summarize older ones
|
|
168
|
+
keep_recent = 100 # Adjust based on needs
|
|
169
|
+
self.messages = (
|
|
170
|
+
[self._summarize_messages(self.messages[:-keep_recent])] +
|
|
171
|
+
self.messages[-keep_recent:]
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
def _summarize_messages(self, messages: List[Message]) -> Message:
|
|
175
|
+
"""Create a summary message from older messages."""
|
|
176
|
+
return Message(
|
|
177
|
+
role="system",
|
|
178
|
+
content="[History Summary] Previous conversation summarized for context",
|
|
179
|
+
metadata={"summarized_count": len(messages)}
|
|
180
|
+
)
|
|
155
181
|
|
|
156
182
|
def gen_complete_prompt(self, prompt: str, intro: Optional[str] = None) -> str:
|
|
157
|
-
"""Generate
|
|
158
|
-
|
|
159
|
-
This method:
|
|
160
|
-
- Combines the intro, history, and new prompt
|
|
161
|
-
- Adds tools information if available
|
|
162
|
-
- Trims history if needed
|
|
163
|
-
- Keeps everything organized and flowing
|
|
164
|
-
|
|
165
|
-
Args:
|
|
166
|
-
prompt (str): Your message to add to the chat
|
|
167
|
-
intro (str, optional): Custom intro to use. Default: None (uses class intro)
|
|
168
|
-
|
|
169
|
-
Returns:
|
|
170
|
-
str: The complete conversation prompt, ready for the LLM!
|
|
171
|
-
|
|
172
|
-
Examples:
|
|
173
|
-
>>> chat = Conversation()
|
|
174
|
-
>>> prompt = chat.gen_complete_prompt("What's good?")
|
|
175
|
-
"""
|
|
183
|
+
"""Generate complete prompt with enhanced context management."""
|
|
176
184
|
if not self.status:
|
|
177
185
|
return prompt
|
|
178
186
|
|
|
179
|
-
intro = intro or self.intro
|
|
180
|
-
'''You are a helpful and versatile AI assistant. Your goal is to provide concise and informative responses directly to user queries. Use available tools in correct format to enhance responses or execute actions as needed.
|
|
181
|
-
''')
|
|
187
|
+
intro = intro or self.intro
|
|
182
188
|
|
|
183
|
-
# Add tool information if
|
|
189
|
+
# Add tool information if available
|
|
184
190
|
tools_description = self.get_tools_description()
|
|
185
191
|
if tools_description:
|
|
186
192
|
try:
|
|
187
|
-
from datetime import datetime
|
|
188
193
|
date_str = f"Current date: {datetime.now().strftime('%d %b %Y')}"
|
|
189
194
|
except:
|
|
190
195
|
date_str = ""
|
|
191
196
|
|
|
192
|
-
intro = (
|
|
193
|
-
|
|
197
|
+
intro = self._generate_enhanced_intro(intro, tools_description, date_str)
|
|
198
|
+
|
|
199
|
+
# Generate history string with proper formatting
|
|
200
|
+
history = self._generate_history_string()
|
|
201
|
+
|
|
202
|
+
# Combine and trim if needed
|
|
203
|
+
complete_prompt = intro + self._trim_chat_history(
|
|
204
|
+
history + "\nUser: " + prompt + "\nAssistant:",
|
|
205
|
+
intro
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
return complete_prompt
|
|
209
|
+
|
|
210
|
+
def _generate_enhanced_intro(self, intro: str, tools_description: str, date_str: str) -> str:
|
|
211
|
+
"""Generate enhanced introduction with tools and guidelines."""
|
|
212
|
+
return f'''
|
|
213
|
+
{intro}
|
|
194
214
|
|
|
195
215
|
{date_str}
|
|
196
216
|
|
|
@@ -199,367 +219,218 @@ class Conversation:
|
|
|
199
219
|
Your goal is to assist the user effectively. Analyze each query and choose one of two response modes:
|
|
200
220
|
|
|
201
221
|
**1. Tool Mode:**
|
|
202
|
-
- **When:** If the query requires external data, calculations, or functions listed under AVAILABLE TOOLS
|
|
203
|
-
- **Action:** Output *ONLY* the complete JSON tool call
|
|
204
|
-
- **
|
|
205
|
-
- **Example (Output is *only* this block):**
|
|
206
|
-
```json
|
|
207
|
-
<tool_call>
|
|
208
|
-
{{
|
|
209
|
-
"name": "search",
|
|
210
|
-
"arguments": {{ "query": "latest population of Tokyo" }}
|
|
211
|
-
}}
|
|
212
|
-
</tool_call>
|
|
213
|
-
```
|
|
222
|
+
- **When:** If the query requires external data, calculations, or functions listed under AVAILABLE TOOLS.
|
|
223
|
+
- **Action:** Output *ONLY* the complete JSON tool call within tags.
|
|
224
|
+
- **Format:** Must start with `<tool_call>` and end with `</tool_call>`.
|
|
214
225
|
|
|
215
226
|
**2. Conversational Mode:**
|
|
216
|
-
- **When:**
|
|
217
|
-
- **Action:** Respond directly
|
|
218
|
-
- **Example:** *User:* "Explain photosynthesis." *Assistant:* "Photosynthesis is how plants use sunlight, water, and carbon dioxide to create their food (glucose) and release oxygen."
|
|
219
|
-
|
|
220
|
-
**ABSOLUTE PROHIBITIONS:**
|
|
221
|
-
- **NEVER Explain Tool Use:** Don't say you're using a tool, which one, or why.
|
|
222
|
-
- **NEVER Describe JSON/Tags:** Do not mention `tool_call`, JSON structure, or parameters.
|
|
223
|
-
- **NEVER Apologize for Tools:** No need to say sorry for lacking direct info.
|
|
224
|
-
- **NEVER Mix Text and Tool Calls:** Tool calls must be standalone.
|
|
225
|
-
|
|
226
|
-
**Be concise and relevant in all responses.**
|
|
227
|
+
- **When:** For queries answerable with internal knowledge.
|
|
228
|
+
- **Action:** Respond directly and concisely.
|
|
227
229
|
|
|
228
230
|
**AVAILABLE TOOLS:**
|
|
229
231
|
{tools_description}
|
|
230
232
|
|
|
231
|
-
**TOOL FORMAT
|
|
233
|
+
**TOOL FORMAT:**
|
|
232
234
|
<tool_call>
|
|
233
235
|
{{
|
|
234
236
|
"name": "tool_name",
|
|
235
237
|
"arguments": {{
|
|
236
238
|
"param": "value"
|
|
237
|
-
/* Add other parameters as needed */
|
|
238
239
|
}}
|
|
239
240
|
}}
|
|
240
241
|
</tool_call>
|
|
242
|
+
'''
|
|
243
|
+
|
|
244
|
+
def _generate_history_string(self) -> str:
|
|
245
|
+
"""Generate formatted history string from messages."""
|
|
246
|
+
history_parts = []
|
|
247
|
+
for msg in self.messages:
|
|
248
|
+
if msg.role == "system" and msg.metadata.get("summarized_count"):
|
|
249
|
+
history_parts.append(f"[Previous messages summarized: {msg.metadata['summarized_count']}]")
|
|
250
|
+
else:
|
|
251
|
+
role_display = msg.role.capitalize()
|
|
252
|
+
if "<tool_call>" in msg.content:
|
|
253
|
+
history_parts.append(f"{role_display}: {msg.content}")
|
|
254
|
+
else:
|
|
255
|
+
history_parts.append(f"{role_display}: {msg.content}")
|
|
256
|
+
return "\n".join(history_parts)
|
|
241
257
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
3. Avoid *all* prohibited explanations/text.
|
|
246
|
-
''')
|
|
247
|
-
|
|
248
|
-
incomplete_chat_history = self.chat_history + self.history_format % {
|
|
249
|
-
"user": prompt,
|
|
250
|
-
"llm": ""
|
|
251
|
-
}
|
|
252
|
-
complete_prompt = intro + self.__trim_chat_history(incomplete_chat_history, intro)
|
|
253
|
-
return complete_prompt
|
|
254
|
-
|
|
255
|
-
def update_chat_history(
|
|
256
|
-
self, prompt: str, response: str, force: bool = False
|
|
257
|
-
) -> None:
|
|
258
|
-
"""Keep the conversation flowing by updating the chat history!
|
|
259
|
-
|
|
260
|
-
This method:
|
|
261
|
-
- Adds new messages to the history
|
|
262
|
-
- Updates the file if needed
|
|
263
|
-
- Keeps everything organized
|
|
264
|
-
|
|
265
|
-
Args:
|
|
266
|
-
prompt (str): Your message to add
|
|
267
|
-
response (str): The LLM's response
|
|
268
|
-
force (bool): Force update even if history is off. Default: False
|
|
269
|
-
|
|
270
|
-
Examples:
|
|
271
|
-
>>> chat = Conversation()
|
|
272
|
-
>>> chat.update_chat_history("Hi!", "Hello there!")
|
|
273
|
-
"""
|
|
274
|
-
if not self.status and not force:
|
|
275
|
-
return
|
|
276
|
-
|
|
277
|
-
new_history = self.history_format % {"user": prompt, "llm": response}
|
|
258
|
+
def _trim_chat_history(self, chat_history: str, intro: str) -> str:
|
|
259
|
+
"""Trim chat history with improved token management."""
|
|
260
|
+
total_length = len(intro) + len(chat_history)
|
|
278
261
|
|
|
279
|
-
if
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
self.chat_history += new_history
|
|
290
|
-
# logger.info(f"Chat history updated with prompt: {prompt}")
|
|
291
|
-
|
|
292
|
-
def update_chat_history_with_tool(
|
|
293
|
-
self, prompt: str, tool_name: str, tool_result: str, force: bool = False
|
|
294
|
-
) -> None:
|
|
295
|
-
"""Update chat history with a tool call and its result.
|
|
296
|
-
|
|
297
|
-
This method:
|
|
298
|
-
- Adds tool call interaction to the history
|
|
299
|
-
- Updates the file if needed
|
|
300
|
-
- Maintains the conversation flow with tools
|
|
301
|
-
|
|
302
|
-
Args:
|
|
303
|
-
prompt (str): The user's message that triggered the tool call
|
|
304
|
-
tool_name (str): Name of the tool that was called
|
|
305
|
-
tool_result (str): Result returned by the tool
|
|
306
|
-
force (bool): Force update even if history is off. Default: False
|
|
307
|
-
|
|
308
|
-
Examples:
|
|
309
|
-
>>> chat = Conversation()
|
|
310
|
-
>>> chat.update_chat_history_with_tool("What's the weather?", "weather_tool", "It's sunny, 75°F")
|
|
311
|
-
"""
|
|
312
|
-
if not self.status and not force:
|
|
313
|
-
return
|
|
314
|
-
|
|
315
|
-
new_history = self.tool_history_format % {
|
|
316
|
-
"user": prompt,
|
|
317
|
-
"tool": tool_name,
|
|
318
|
-
"result": tool_result
|
|
319
|
-
}
|
|
262
|
+
if total_length > self.history_offset:
|
|
263
|
+
truncate_at = (total_length - self.history_offset) + self.prompt_allowance
|
|
264
|
+
# Try to truncate at a message boundary
|
|
265
|
+
lines = chat_history[truncate_at:].split('\n')
|
|
266
|
+
for i, line in enumerate(lines):
|
|
267
|
+
if line.startswith(("User:", "Assistant:", "Tool:")):
|
|
268
|
+
return "... " + "\n".join(lines[i:])
|
|
269
|
+
return "... " + chat_history[truncate_at:]
|
|
270
|
+
return chat_history
|
|
320
271
|
|
|
321
|
-
|
|
322
|
-
|
|
272
|
+
def add_message(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
|
|
273
|
+
"""Add a message with enhanced validation and metadata support."""
|
|
274
|
+
try:
|
|
275
|
+
if not self.validate_message(role, content):
|
|
276
|
+
raise MessageValidationError("Invalid message role or content")
|
|
277
|
+
|
|
278
|
+
message = Message(role=role, content=content, metadata=metadata or {})
|
|
279
|
+
self.messages.append(message)
|
|
280
|
+
|
|
281
|
+
if self.file and self.update_file:
|
|
282
|
+
self._append_to_file(message)
|
|
283
|
+
|
|
284
|
+
self._compress_history()
|
|
285
|
+
|
|
286
|
+
except Exception as e:
|
|
287
|
+
self.logger.error(f"Error adding message: {str(e)}")
|
|
288
|
+
raise ConversationError(f"Failed to add message: {str(e)}") from e
|
|
289
|
+
|
|
290
|
+
def _append_to_file(self, message: Message) -> None:
|
|
291
|
+
"""Append message to file with error handling."""
|
|
292
|
+
try:
|
|
323
293
|
if not os.path.exists(self.file):
|
|
324
294
|
with open(self.file, "w", encoding="utf-8") as fh:
|
|
325
295
|
fh.write(self.intro + "\n")
|
|
326
296
|
|
|
327
|
-
# Append new history
|
|
328
297
|
with open(self.file, "a", encoding="utf-8") as fh:
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
This method:
|
|
337
|
-
- Validates the message role
|
|
338
|
-
- Adds the message to history
|
|
339
|
-
- Updates file if needed
|
|
340
|
-
|
|
341
|
-
Args:
|
|
342
|
-
role (str): Who's sending? ('user', 'llm', 'tool', or 'reasoning')
|
|
343
|
-
content (str): What's the message?
|
|
344
|
-
|
|
345
|
-
Examples:
|
|
346
|
-
>>> chat = Conversation()
|
|
347
|
-
>>> chat.add_message("user", "Hey there!")
|
|
348
|
-
>>> chat.add_message("llm", "Hi! How can I help?")
|
|
349
|
-
"""
|
|
350
|
-
if not self.validate_message(role, content):
|
|
351
|
-
raise ValueError("Invalid message role or content")
|
|
352
|
-
|
|
353
|
-
role_formats = {
|
|
354
|
-
"user": "User",
|
|
355
|
-
"llm": "LLM",
|
|
356
|
-
"tool": "Tool",
|
|
357
|
-
"reasoning": "Reasoning"
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
if role in role_formats:
|
|
361
|
-
self.chat_history += f"\n{role_formats[role]} : {content}"
|
|
362
|
-
else:
|
|
363
|
-
raise ValueError(f"Invalid role: {role}. Must be one of {list(role_formats.keys())}")
|
|
364
|
-
|
|
365
|
-
# # Enhanced logging for message addition
|
|
366
|
-
# logger.info(f"Added message from {role}: {content}")
|
|
367
|
-
# logging.info(f"Message added: {role}: {content}")
|
|
298
|
+
role_display = message.role.capitalize()
|
|
299
|
+
fh.write(f"\n{role_display}: {message.content}")
|
|
300
|
+
|
|
301
|
+
except Exception as e:
|
|
302
|
+
self.logger.error(f"Error writing to file: {str(e)}")
|
|
303
|
+
raise ConversationError(f"Failed to write to file: {str(e)}") from e
|
|
368
304
|
|
|
369
305
|
def validate_message(self, role: str, content: str) -> bool:
|
|
370
|
-
"""Validate
|
|
371
|
-
valid_roles = {
|
|
306
|
+
"""Validate message with enhanced role checking."""
|
|
307
|
+
valid_roles = {'user', 'assistant', 'tool', 'system'}
|
|
372
308
|
if role not in valid_roles:
|
|
373
|
-
|
|
309
|
+
self.logger.error(f"Invalid role: {role}")
|
|
374
310
|
return False
|
|
375
|
-
if not content:
|
|
376
|
-
|
|
311
|
+
if not content or not isinstance(content, str):
|
|
312
|
+
self.logger.error("Invalid content")
|
|
377
313
|
return False
|
|
378
314
|
return True
|
|
379
315
|
|
|
380
|
-
def
|
|
381
|
-
"""
|
|
316
|
+
def handle_tool_response(self, response: str) -> Dict[str, Any]:
|
|
317
|
+
"""Process tool responses with enhanced error handling."""
|
|
318
|
+
try:
|
|
319
|
+
if "<tool_call>" in response:
|
|
320
|
+
function_call_data = self._parse_function_call(response)
|
|
321
|
+
|
|
322
|
+
if "error" in function_call_data:
|
|
323
|
+
return {
|
|
324
|
+
"is_tool_call": True,
|
|
325
|
+
"success": False,
|
|
326
|
+
"result": function_call_data["error"],
|
|
327
|
+
"original_response": response
|
|
328
|
+
}
|
|
382
329
|
|
|
383
|
-
|
|
384
|
-
|
|
330
|
+
result = self.execute_function(function_call_data)
|
|
331
|
+
self.add_message("tool", result)
|
|
385
332
|
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
333
|
+
return {
|
|
334
|
+
"is_tool_call": True,
|
|
335
|
+
"success": True,
|
|
336
|
+
"result": result,
|
|
337
|
+
"tool_calls": function_call_data.get("tool_calls", []),
|
|
338
|
+
"original_response": response
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
return {
|
|
342
|
+
"is_tool_call": False,
|
|
343
|
+
"result": response,
|
|
344
|
+
"original_response": response
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
except Exception as e:
|
|
348
|
+
self.logger.error(f"Error handling tool response: {str(e)}")
|
|
349
|
+
raise ToolCallError(f"Failed to handle tool response: {str(e)}") from e
|
|
350
|
+
|
|
351
|
+
def _parse_function_call(self, response: str) -> FunctionCallData:
|
|
352
|
+
"""Parse function calls with improved JSON handling."""
|
|
389
353
|
try:
|
|
390
|
-
#
|
|
391
|
-
start_tag
|
|
392
|
-
end_tag
|
|
393
|
-
start_idx
|
|
394
|
-
end_idx
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
# Try to parse the JSON directly
|
|
410
|
-
try:
|
|
411
|
-
parsed_response: Any = json.loads(json_str)
|
|
412
|
-
if isinstance(parsed_response, dict):
|
|
413
|
-
return {"tool_calls": [parsed_response]}
|
|
414
|
-
else:
|
|
415
|
-
raise ValueError("Invalid JSON structure in tool call.")
|
|
416
|
-
except json.JSONDecodeError:
|
|
417
|
-
# If direct parsing failed, try to extract just the JSON object
|
|
418
|
-
import re
|
|
419
|
-
json_pattern = re.search(r'\{[\s\S]*\}', json_str)
|
|
420
|
-
if json_pattern:
|
|
421
|
-
parsed_response = json.loads(json_pattern.group(0))
|
|
422
|
-
return {"tool_calls": [parsed_response]}
|
|
423
|
-
raise
|
|
424
|
-
else:
|
|
425
|
-
# Extract JSON content - for the format with brackets
|
|
426
|
-
json_str: str = response[start_idx + len(start_tag):end_idx].strip()
|
|
427
|
-
parsed_response: Any = json.loads(json_str)
|
|
428
|
-
|
|
429
|
-
if isinstance(parsed_response, list):
|
|
430
|
-
return {"tool_calls": parsed_response}
|
|
431
|
-
elif isinstance(parsed_response, dict):
|
|
432
|
-
return {"tool_calls": [parsed_response]}
|
|
354
|
+
# Extract content between tool call tags
|
|
355
|
+
start_tag = "<tool_call>"
|
|
356
|
+
end_tag = "</tool_call>"
|
|
357
|
+
start_idx = response.find(start_tag)
|
|
358
|
+
end_idx = response.rfind(end_tag)
|
|
359
|
+
|
|
360
|
+
if start_idx == -1 or end_idx == -1:
|
|
361
|
+
raise ValueError("No valid tool call tags found")
|
|
362
|
+
|
|
363
|
+
json_str = response[start_idx + len(start_tag):end_idx].strip()
|
|
364
|
+
|
|
365
|
+
# Handle both single and multiple tool calls
|
|
366
|
+
try:
|
|
367
|
+
parsed = json.loads(json_str)
|
|
368
|
+
if isinstance(parsed, dict):
|
|
369
|
+
return {"tool_calls": [parsed]}
|
|
370
|
+
elif isinstance(parsed, list):
|
|
371
|
+
return {"tool_calls": parsed}
|
|
433
372
|
else:
|
|
434
|
-
raise ValueError("
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
373
|
+
raise ValueError("Invalid tool call structure")
|
|
374
|
+
except json.JSONDecodeError:
|
|
375
|
+
# Try to extract valid JSON if embedded in other content
|
|
376
|
+
import re
|
|
377
|
+
json_pattern = re.search(r'\{[\s\S]*\}', json_str)
|
|
378
|
+
if json_pattern:
|
|
379
|
+
parsed = json.loads(json_pattern.group(0))
|
|
380
|
+
return {"tool_calls": [parsed]}
|
|
381
|
+
raise
|
|
382
|
+
|
|
383
|
+
except Exception as e:
|
|
384
|
+
self.logger.error(f"Error parsing function call: {str(e)}")
|
|
438
385
|
return {"error": str(e)}
|
|
439
386
|
|
|
440
387
|
def execute_function(self, function_call_data: FunctionCallData) -> str:
|
|
441
|
-
"""Execute
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
arguments: Dict[str, Any] = tool_call.get("arguments", {})
|
|
458
|
-
|
|
459
|
-
if not function_name or not isinstance(arguments, dict):
|
|
460
|
-
results.append(f"Invalid tool call: {tool_call}")
|
|
461
|
-
continue
|
|
462
|
-
|
|
463
|
-
# Here you would implement the actual execution logic for each tool
|
|
464
|
-
# For demonstration, we'll return a placeholder response
|
|
465
|
-
results.append(f"Executed {function_name} with arguments {arguments}")
|
|
466
|
-
|
|
467
|
-
return "; ".join(results)
|
|
468
|
-
|
|
469
|
-
def _convert_fns_to_tools(self, fns: Optional[List[Fn]]) -> List[ToolDefinition]:
|
|
470
|
-
"""Convert functions to tool definitions for the LLM.
|
|
471
|
-
|
|
472
|
-
Args:
|
|
473
|
-
fns (Optional[List[Fn]]): List of function definitions
|
|
388
|
+
"""Execute functions with enhanced error handling."""
|
|
389
|
+
try:
|
|
390
|
+
tool_calls = function_call_data.get("tool_calls", [])
|
|
391
|
+
if not tool_calls:
|
|
392
|
+
raise ValueError("No tool calls provided")
|
|
393
|
+
|
|
394
|
+
results = []
|
|
395
|
+
for tool_call in tool_calls:
|
|
396
|
+
name = tool_call.get("name")
|
|
397
|
+
arguments = tool_call.get("arguments", {})
|
|
398
|
+
|
|
399
|
+
if not name or not isinstance(arguments, dict):
|
|
400
|
+
raise ValueError(f"Invalid tool call format: {tool_call}")
|
|
401
|
+
|
|
402
|
+
# Execute the tool (implement actual logic here)
|
|
403
|
+
results.append(f"Executed {name} with arguments {arguments}")
|
|
474
404
|
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
tools: List[ToolDefinition] = []
|
|
482
|
-
for fn in fns:
|
|
483
|
-
tool: ToolDefinition = {
|
|
484
|
-
"type": "function",
|
|
485
|
-
"function": {
|
|
486
|
-
"name": fn.name,
|
|
487
|
-
"description": fn.description,
|
|
488
|
-
"parameters": {
|
|
489
|
-
"type": "object",
|
|
490
|
-
"properties": {
|
|
491
|
-
param_name: {
|
|
492
|
-
"type": param_type,
|
|
493
|
-
"description": f"The {param_name} parameter"
|
|
494
|
-
} for param_name, param_type in fn.parameters.items()
|
|
495
|
-
},
|
|
496
|
-
"required": list(fn.parameters.keys())
|
|
497
|
-
}
|
|
498
|
-
}
|
|
499
|
-
}
|
|
500
|
-
tools.append(tool)
|
|
501
|
-
return tools
|
|
405
|
+
return "; ".join(results)
|
|
406
|
+
|
|
407
|
+
except Exception as e:
|
|
408
|
+
self.logger.error(f"Error executing function: {str(e)}")
|
|
409
|
+
raise ToolCallError(f"Failed to execute function: {str(e)}") from e
|
|
502
410
|
|
|
503
411
|
def get_tools_description(self) -> str:
|
|
504
|
-
"""Get
|
|
505
|
-
|
|
506
|
-
Returns:
|
|
507
|
-
str: Formatted tools description
|
|
508
|
-
"""
|
|
412
|
+
"""Get formatted tools description."""
|
|
509
413
|
if not self.tools:
|
|
510
414
|
return ""
|
|
511
415
|
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
return "\n".join(tools_desc)
|
|
518
|
-
|
|
519
|
-
def handle_tool_response(self, response: str) -> Dict[str, Any]:
|
|
520
|
-
"""Process a response that might contain a tool call.
|
|
521
|
-
|
|
522
|
-
This method:
|
|
523
|
-
- Checks if the response contains a tool call
|
|
524
|
-
- Parses and executes the tool call if present
|
|
525
|
-
- Returns the appropriate result
|
|
416
|
+
return "\n".join(
|
|
417
|
+
f"- {fn.name}: {fn.description} (Parameters: {', '.join(f'{name}: {typ}' for name, typ in fn.parameters.items())})"
|
|
418
|
+
for fn in self.tools
|
|
419
|
+
)
|
|
526
420
|
|
|
421
|
+
def update_chat_history(self, prompt: str, response: str) -> None:
|
|
422
|
+
"""Update chat history with a new prompt-response pair.
|
|
423
|
+
|
|
527
424
|
Args:
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
425
|
+
prompt: The user's prompt/question
|
|
426
|
+
response: The assistant's response
|
|
427
|
+
|
|
428
|
+
This method adds both the user's prompt and the assistant's response
|
|
429
|
+
to the conversation history as separate messages.
|
|
532
430
|
"""
|
|
533
|
-
#
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
return {
|
|
539
|
-
"is_tool_call": True,
|
|
540
|
-
"success": False,
|
|
541
|
-
"result": function_call_data["error"],
|
|
542
|
-
"original_response": response
|
|
543
|
-
}
|
|
544
|
-
|
|
545
|
-
# Execute the function call
|
|
546
|
-
result = self.execute_function(function_call_data)
|
|
547
|
-
|
|
548
|
-
# Add the result to chat history as a tool message
|
|
549
|
-
self.add_message("tool", result)
|
|
550
|
-
|
|
551
|
-
return {
|
|
552
|
-
"is_tool_call": True,
|
|
553
|
-
"success": True,
|
|
554
|
-
"result": result,
|
|
555
|
-
"tool_calls": function_call_data.get("tool_calls", []),
|
|
556
|
-
"original_response": response
|
|
557
|
-
}
|
|
558
|
-
|
|
559
|
-
return {
|
|
560
|
-
"is_tool_call": False,
|
|
561
|
-
"result": response,
|
|
562
|
-
"original_response": response
|
|
563
|
-
}
|
|
564
|
-
|
|
431
|
+
# Add user's message
|
|
432
|
+
self.add_message("user", prompt)
|
|
433
|
+
|
|
434
|
+
# Add assistant's response
|
|
435
|
+
self.add_message("assistant", response)
|
|
565
436
|
|