webscout 7.8__py3-none-any.whl → 8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Bard.py +5 -25
- webscout/DWEBS.py +476 -476
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -103
- webscout/Extra/__init__.py +2 -0
- webscout/Extra/autocoder/__init__.py +1 -1
- webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
- webscout/Extra/tempmail/__init__.py +26 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +156 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Provider/AISEARCH/__init__.py +5 -1
- webscout/Provider/AISEARCH/hika_search.py +194 -0
- webscout/Provider/AISEARCH/monica_search.py +246 -0
- webscout/Provider/AISEARCH/scira_search.py +320 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
- webscout/Provider/AllenAI.py +255 -122
- webscout/Provider/DeepSeek.py +1 -2
- webscout/Provider/Deepinfra.py +296 -286
- webscout/Provider/ElectronHub.py +709 -716
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +28 -6
- webscout/Provider/Gemini.py +167 -165
- webscout/Provider/GithubChat.py +2 -1
- webscout/Provider/Groq.py +38 -24
- webscout/Provider/LambdaChat.py +2 -1
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OpenGPT.py +199 -0
- webscout/Provider/PI.py +39 -24
- webscout/Provider/TextPollinationsAI.py +232 -230
- webscout/Provider/Youchat.py +326 -296
- webscout/Provider/__init__.py +10 -4
- webscout/Provider/ai4chat.py +58 -56
- webscout/Provider/akashgpt.py +34 -22
- webscout/Provider/copilot.py +427 -427
- webscout/Provider/freeaichat.py +9 -2
- webscout/Provider/labyrinth.py +121 -20
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/scira_chat.py +271 -0
- webscout/Provider/typefully.py +280 -0
- webscout/Provider/uncovr.py +312 -299
- webscout/Provider/yep.py +64 -12
- webscout/__init__.py +38 -36
- webscout/cli.py +293 -293
- webscout/conversation.py +350 -17
- webscout/litprinter/__init__.py +59 -667
- webscout/optimizers.py +419 -419
- webscout/update_checker.py +14 -12
- webscout/version.py +1 -1
- webscout/webscout_search.py +1346 -1282
- webscout/webscout_search_async.py +877 -813
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/METADATA +44 -39
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/RECORD +63 -46
- webscout/Provider/DARKAI.py +0 -225
- webscout/Provider/EDITEE.py +0 -192
- webscout/litprinter/colors.py +0 -54
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/LICENSE.md +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/WHEEL +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/entry_points.txt +0 -0
- {webscout-7.8.dist-info → webscout-8.0.dist-info}/top_level.txt +0 -0
webscout/conversation.py
CHANGED
|
@@ -1,5 +1,43 @@
|
|
|
1
1
|
import os
|
|
2
|
-
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Optional, Dict, List, Any, TypedDict, Callable, TypeVar, Union
|
|
5
|
+
|
|
6
|
+
T = TypeVar('T')
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FunctionCall(TypedDict):
|
|
10
|
+
"""Type for a function call."""
|
|
11
|
+
name: str
|
|
12
|
+
arguments: Dict[str, Any]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ToolDefinition(TypedDict):
|
|
16
|
+
"""Type for a tool definition."""
|
|
17
|
+
type: str
|
|
18
|
+
function: Dict[str, Any]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class FunctionCallData(TypedDict, total=False):
|
|
22
|
+
"""Type for function call data"""
|
|
23
|
+
tool_calls: List[FunctionCall]
|
|
24
|
+
error: str
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Fn:
|
|
28
|
+
"""
|
|
29
|
+
Represents a function (tool) that the agent can call.
|
|
30
|
+
"""
|
|
31
|
+
def __init__(self, name: str, description: str, parameters: Dict[str, str]) -> None:
|
|
32
|
+
self.name: str = name
|
|
33
|
+
self.description: str = description
|
|
34
|
+
self.parameters: Dict[str, str] = parameters
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def tools(func: Callable[..., T]) -> Callable[..., T]:
|
|
38
|
+
"""Decorator to mark a function as a tool and automatically convert it."""
|
|
39
|
+
func._is_tool = True # type: ignore
|
|
40
|
+
return func
|
|
3
41
|
|
|
4
42
|
|
|
5
43
|
class Conversation:
|
|
@@ -10,6 +48,7 @@ class Conversation:
|
|
|
10
48
|
- Loading/saving conversations from/to files
|
|
11
49
|
- Generating prompts based on context
|
|
12
50
|
- Managing token limits and history pruning
|
|
51
|
+
- Supporting tool calling functionality
|
|
13
52
|
|
|
14
53
|
Examples:
|
|
15
54
|
>>> chat = Conversation(max_tokens=500)
|
|
@@ -19,8 +58,8 @@ class Conversation:
|
|
|
19
58
|
"""
|
|
20
59
|
|
|
21
60
|
intro = (
|
|
22
|
-
"You're a Large Language Model
|
|
23
|
-
"
|
|
61
|
+
"You're a helpful Large Language Model assistant. "
|
|
62
|
+
"Respond directly to the user's questions or use tools when appropriate."
|
|
24
63
|
)
|
|
25
64
|
|
|
26
65
|
def __init__(
|
|
@@ -29,6 +68,7 @@ class Conversation:
|
|
|
29
68
|
max_tokens: int = 600,
|
|
30
69
|
filepath: Optional[str] = None,
|
|
31
70
|
update_file: bool = True,
|
|
71
|
+
tools: Optional[List[Fn]] = None,
|
|
32
72
|
):
|
|
33
73
|
"""Initialize a new Conversation manager.
|
|
34
74
|
|
|
@@ -37,6 +77,7 @@ class Conversation:
|
|
|
37
77
|
max_tokens (int): Maximum tokens for completion response. Defaults to 600.
|
|
38
78
|
filepath (str, optional): Path to save/load conversation history. Defaults to None.
|
|
39
79
|
update_file (bool): Whether to append new messages to file. Defaults to True.
|
|
80
|
+
tools (List[Fn], optional): List of tools available for the conversation. Defaults to None.
|
|
40
81
|
|
|
41
82
|
Examples:
|
|
42
83
|
>>> chat = Conversation(max_tokens=500)
|
|
@@ -46,10 +87,12 @@ class Conversation:
|
|
|
46
87
|
self.max_tokens_to_sample = max_tokens
|
|
47
88
|
self.chat_history = "" # Initialize as empty string
|
|
48
89
|
self.history_format = "\nUser : %(user)s\nLLM :%(llm)s"
|
|
90
|
+
self.tool_history_format = "\nUser : %(user)s\nLLM : [Tool Call: %(tool)s]\nTool : %(result)s"
|
|
49
91
|
self.file = filepath
|
|
50
92
|
self.update_file = update_file
|
|
51
93
|
self.history_offset = 10250
|
|
52
94
|
self.prompt_allowance = 10
|
|
95
|
+
self.tools = tools or []
|
|
53
96
|
|
|
54
97
|
if filepath:
|
|
55
98
|
self.load_conversation(filepath, False)
|
|
@@ -115,6 +158,7 @@ class Conversation:
|
|
|
115
158
|
|
|
116
159
|
This method:
|
|
117
160
|
- Combines the intro, history, and new prompt
|
|
161
|
+
- Adds tools information if available
|
|
118
162
|
- Trims history if needed
|
|
119
163
|
- Keeps everything organized and flowing
|
|
120
164
|
|
|
@@ -133,16 +177,79 @@ class Conversation:
|
|
|
133
177
|
return prompt
|
|
134
178
|
|
|
135
179
|
intro = intro or self.intro or (
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
180
|
+
'''You are a helpful and versatile AI assistant. Your goal is to provide concise and informative responses directly to user queries. Use available tools in correct format to enhance responses or execute actions as needed.
|
|
181
|
+
''')
|
|
182
|
+
|
|
183
|
+
# Add tool information if tools are available
|
|
184
|
+
tools_description = self.get_tools_description()
|
|
185
|
+
if tools_description:
|
|
186
|
+
try:
|
|
187
|
+
from datetime import datetime
|
|
188
|
+
date_str = f"Current date: {datetime.now().strftime('%d %b %Y')}"
|
|
189
|
+
except:
|
|
190
|
+
date_str = ""
|
|
191
|
+
|
|
192
|
+
intro = (f'''
|
|
193
|
+
{intro}
|
|
194
|
+
|
|
195
|
+
{date_str}
|
|
196
|
+
|
|
197
|
+
**CORE PROTOCOL:**
|
|
198
|
+
|
|
199
|
+
Your goal is to assist the user effectively. Analyze each query and choose one of two response modes:
|
|
200
|
+
|
|
201
|
+
**1. Tool Mode:**
|
|
202
|
+
- **When:** If the query requires external data, calculations, or functions listed under AVAILABLE TOOLS (e.g., web search, current info).
|
|
203
|
+
- **Action:** Output *ONLY* the complete JSON tool call, exactly matching the TOOL FORMAT below, enclosed in `<tool_call>` and `</tool_call>` tags.
|
|
204
|
+
- **CRITICAL:** Absolutely NO text, whitespace, or characters before `<tool_call>` or after `</tool_call>`. The output *must* start with `<tool_call>` and end with `</tool_call>`.
|
|
205
|
+
- **Example (Output is *only* this block):**
|
|
206
|
+
```json
|
|
207
|
+
<tool_call>
|
|
208
|
+
{{
|
|
209
|
+
"name": "search",
|
|
210
|
+
"arguments": {{ "query": "latest population of Tokyo" }}
|
|
211
|
+
}}
|
|
212
|
+
</tool_call>
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
**2. Conversational Mode:**
|
|
216
|
+
- **When:** If the query can be answered using your internal knowledge, is creative, or conversational.
|
|
217
|
+
- **Action:** Respond directly, clearly, and concisely.
|
|
218
|
+
- **Example:** *User:* "Explain photosynthesis." *Assistant:* "Photosynthesis is how plants use sunlight, water, and carbon dioxide to create their food (glucose) and release oxygen."
|
|
219
|
+
|
|
220
|
+
**ABSOLUTE PROHIBITIONS:**
|
|
221
|
+
- **NEVER Explain Tool Use:** Don't say you're using a tool, which one, or why.
|
|
222
|
+
- **NEVER Describe JSON/Tags:** Do not mention `tool_call`, JSON structure, or parameters.
|
|
223
|
+
- **NEVER Apologize for Tools:** No need to say sorry for lacking direct info.
|
|
224
|
+
- **NEVER Mix Text and Tool Calls:** Tool calls must be standalone.
|
|
225
|
+
|
|
226
|
+
**Be concise and relevant in all responses.**
|
|
227
|
+
|
|
228
|
+
**AVAILABLE TOOLS:**
|
|
229
|
+
{tools_description}
|
|
230
|
+
|
|
231
|
+
**TOOL FORMAT (Use Exactly):**
|
|
232
|
+
<tool_call>
|
|
233
|
+
{{
|
|
234
|
+
"name": "tool_name",
|
|
235
|
+
"arguments": {{
|
|
236
|
+
"param": "value"
|
|
237
|
+
/* Add other parameters as needed */
|
|
238
|
+
}}
|
|
239
|
+
}}
|
|
240
|
+
</tool_call>
|
|
241
|
+
|
|
242
|
+
**Summary Check:**
|
|
243
|
+
1. Tool needed? -> Output *only* the JSON in tags.
|
|
244
|
+
2. No tool needed? -> Respond directly and conversationally.
|
|
245
|
+
3. Avoid *all* prohibited explanations/text.
|
|
246
|
+
''')
|
|
139
247
|
|
|
140
248
|
incomplete_chat_history = self.chat_history + self.history_format % {
|
|
141
249
|
"user": prompt,
|
|
142
250
|
"llm": ""
|
|
143
251
|
}
|
|
144
252
|
complete_prompt = intro + self.__trim_chat_history(incomplete_chat_history, intro)
|
|
145
|
-
# logger.info(f"Generated prompt: {complete_prompt}")
|
|
146
253
|
return complete_prompt
|
|
147
254
|
|
|
148
255
|
def update_chat_history(
|
|
@@ -182,6 +289,47 @@ class Conversation:
|
|
|
182
289
|
self.chat_history += new_history
|
|
183
290
|
# logger.info(f"Chat history updated with prompt: {prompt}")
|
|
184
291
|
|
|
292
|
+
def update_chat_history_with_tool(
|
|
293
|
+
self, prompt: str, tool_name: str, tool_result: str, force: bool = False
|
|
294
|
+
) -> None:
|
|
295
|
+
"""Update chat history with a tool call and its result.
|
|
296
|
+
|
|
297
|
+
This method:
|
|
298
|
+
- Adds tool call interaction to the history
|
|
299
|
+
- Updates the file if needed
|
|
300
|
+
- Maintains the conversation flow with tools
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
prompt (str): The user's message that triggered the tool call
|
|
304
|
+
tool_name (str): Name of the tool that was called
|
|
305
|
+
tool_result (str): Result returned by the tool
|
|
306
|
+
force (bool): Force update even if history is off. Default: False
|
|
307
|
+
|
|
308
|
+
Examples:
|
|
309
|
+
>>> chat = Conversation()
|
|
310
|
+
>>> chat.update_chat_history_with_tool("What's the weather?", "weather_tool", "It's sunny, 75°F")
|
|
311
|
+
"""
|
|
312
|
+
if not self.status and not force:
|
|
313
|
+
return
|
|
314
|
+
|
|
315
|
+
new_history = self.tool_history_format % {
|
|
316
|
+
"user": prompt,
|
|
317
|
+
"tool": tool_name,
|
|
318
|
+
"result": tool_result
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
if self.file and self.update_file:
|
|
322
|
+
# Create file if it doesn't exist
|
|
323
|
+
if not os.path.exists(self.file):
|
|
324
|
+
with open(self.file, "w", encoding="utf-8") as fh:
|
|
325
|
+
fh.write(self.intro + "\n")
|
|
326
|
+
|
|
327
|
+
# Append new history
|
|
328
|
+
with open(self.file, "a", encoding="utf-8") as fh:
|
|
329
|
+
fh.write(new_history)
|
|
330
|
+
|
|
331
|
+
self.chat_history += new_history
|
|
332
|
+
|
|
185
333
|
def add_message(self, role: str, content: str) -> None:
|
|
186
334
|
"""Add a new message to the chat - simple and clean!
|
|
187
335
|
|
|
@@ -218,15 +366,200 @@ class Conversation:
|
|
|
218
366
|
# logger.info(f"Added message from {role}: {content}")
|
|
219
367
|
# logging.info(f"Message added: {role}: {content}")
|
|
220
368
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
369
|
+
def validate_message(self, role: str, content: str) -> bool:
|
|
370
|
+
"""Validate the message role and content."""
|
|
371
|
+
valid_roles = { 'user', 'llm', 'tool', 'reasoning', 'function_call' }
|
|
372
|
+
if role not in valid_roles:
|
|
373
|
+
logging.error(f"Invalid role: {role}")
|
|
374
|
+
return False
|
|
375
|
+
if not content:
|
|
376
|
+
logging.error("Content cannot be empty.")
|
|
377
|
+
return False
|
|
378
|
+
return True
|
|
379
|
+
|
|
380
|
+
def _parse_function_call(self, response: str) -> FunctionCallData:
|
|
381
|
+
"""Parse a function call from the LLM's response.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
response (str): The LLM's response containing a function call
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
FunctionCallData: Parsed function call data or error
|
|
388
|
+
"""
|
|
389
|
+
try:
|
|
390
|
+
# First try the standard format with square brackets: <tool_call>[...]</tool_call>
|
|
391
|
+
start_tag: str = "<tool_call>["
|
|
392
|
+
end_tag: str = "]</tool_call>"
|
|
393
|
+
start_idx: int = response.find(start_tag)
|
|
394
|
+
end_idx: int = response.rfind(end_tag)
|
|
395
|
+
|
|
396
|
+
# If not found, try the alternate format: <tool_call>\n{...}\n</tool_call>
|
|
397
|
+
if start_idx == -1 or end_idx == -1 or end_idx <= start_idx:
|
|
398
|
+
start_tag = "<tool_call>"
|
|
399
|
+
end_tag = "</tool_call>"
|
|
400
|
+
start_idx = response.find(start_tag)
|
|
401
|
+
end_idx = response.rfind(end_tag)
|
|
402
|
+
|
|
403
|
+
if start_idx == -1 or end_idx == -1 or end_idx <= start_idx:
|
|
404
|
+
raise ValueError("No valid <tool_call> JSON structure found in the response.")
|
|
405
|
+
|
|
406
|
+
# Extract JSON content - for the format without brackets
|
|
407
|
+
json_str: str = response[start_idx + len(start_tag):end_idx].strip()
|
|
408
|
+
|
|
409
|
+
# Try to parse the JSON directly
|
|
410
|
+
try:
|
|
411
|
+
parsed_response: Any = json.loads(json_str)
|
|
412
|
+
if isinstance(parsed_response, dict):
|
|
413
|
+
return {"tool_calls": [parsed_response]}
|
|
414
|
+
else:
|
|
415
|
+
raise ValueError("Invalid JSON structure in tool call.")
|
|
416
|
+
except json.JSONDecodeError:
|
|
417
|
+
# If direct parsing failed, try to extract just the JSON object
|
|
418
|
+
import re
|
|
419
|
+
json_pattern = re.search(r'\{[\s\S]*\}', json_str)
|
|
420
|
+
if json_pattern:
|
|
421
|
+
parsed_response = json.loads(json_pattern.group(0))
|
|
422
|
+
return {"tool_calls": [parsed_response]}
|
|
423
|
+
raise
|
|
424
|
+
else:
|
|
425
|
+
# Extract JSON content - for the format with brackets
|
|
426
|
+
json_str: str = response[start_idx + len(start_tag):end_idx].strip()
|
|
427
|
+
parsed_response: Any = json.loads(json_str)
|
|
428
|
+
|
|
429
|
+
if isinstance(parsed_response, list):
|
|
430
|
+
return {"tool_calls": parsed_response}
|
|
431
|
+
elif isinstance(parsed_response, dict):
|
|
432
|
+
return {"tool_calls": [parsed_response]}
|
|
433
|
+
else:
|
|
434
|
+
raise ValueError("<tool_call> should contain a list or a dictionary of tool calls.")
|
|
435
|
+
|
|
436
|
+
except (ValueError, json.JSONDecodeError) as e:
|
|
437
|
+
logging.error(f"Error parsing function call: %s", e)
|
|
438
|
+
return {"error": str(e)}
|
|
439
|
+
|
|
440
|
+
def execute_function(self, function_call_data: FunctionCallData) -> str:
|
|
441
|
+
"""Execute a function call and return the result.
|
|
442
|
+
|
|
443
|
+
Args:
|
|
444
|
+
function_call_data (FunctionCallData): The function call data
|
|
445
|
+
|
|
446
|
+
Returns:
|
|
447
|
+
str: Result of the function execution
|
|
448
|
+
"""
|
|
449
|
+
tool_calls: Optional[List[FunctionCall]] = function_call_data.get("tool_calls")
|
|
450
|
+
|
|
451
|
+
if not tool_calls or not isinstance(tool_calls, list):
|
|
452
|
+
return "Invalid tool_calls format."
|
|
453
|
+
|
|
454
|
+
results: List[str] = []
|
|
455
|
+
for tool_call in tool_calls:
|
|
456
|
+
function_name: str = tool_call.get("name")
|
|
457
|
+
arguments: Dict[str, Any] = tool_call.get("arguments", {})
|
|
458
|
+
|
|
459
|
+
if not function_name or not isinstance(arguments, dict):
|
|
460
|
+
results.append(f"Invalid tool call: {tool_call}")
|
|
461
|
+
continue
|
|
462
|
+
|
|
463
|
+
# Here you would implement the actual execution logic for each tool
|
|
464
|
+
# For demonstration, we'll return a placeholder response
|
|
465
|
+
results.append(f"Executed {function_name} with arguments {arguments}")
|
|
466
|
+
|
|
467
|
+
return "; ".join(results)
|
|
468
|
+
|
|
469
|
+
def _convert_fns_to_tools(self, fns: Optional[List[Fn]]) -> List[ToolDefinition]:
|
|
470
|
+
"""Convert functions to tool definitions for the LLM.
|
|
471
|
+
|
|
472
|
+
Args:
|
|
473
|
+
fns (Optional[List[Fn]]): List of function definitions
|
|
474
|
+
|
|
475
|
+
Returns:
|
|
476
|
+
List[ToolDefinition]: List of tool definitions
|
|
477
|
+
"""
|
|
478
|
+
if not fns:
|
|
479
|
+
return []
|
|
480
|
+
|
|
481
|
+
tools: List[ToolDefinition] = []
|
|
482
|
+
for fn in fns:
|
|
483
|
+
tool: ToolDefinition = {
|
|
484
|
+
"type": "function",
|
|
485
|
+
"function": {
|
|
486
|
+
"name": fn.name,
|
|
487
|
+
"description": fn.description,
|
|
488
|
+
"parameters": {
|
|
489
|
+
"type": "object",
|
|
490
|
+
"properties": {
|
|
491
|
+
param_name: {
|
|
492
|
+
"type": param_type,
|
|
493
|
+
"description": f"The {param_name} parameter"
|
|
494
|
+
} for param_name, param_type in fn.parameters.items()
|
|
495
|
+
},
|
|
496
|
+
"required": list(fn.parameters.keys())
|
|
497
|
+
}
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
tools.append(tool)
|
|
501
|
+
return tools
|
|
502
|
+
|
|
503
|
+
def get_tools_description(self) -> str:
|
|
504
|
+
"""Get a formatted string of available tools for the intro prompt.
|
|
505
|
+
|
|
506
|
+
Returns:
|
|
507
|
+
str: Formatted tools description
|
|
508
|
+
"""
|
|
509
|
+
if not self.tools:
|
|
510
|
+
return ""
|
|
511
|
+
|
|
512
|
+
tools_desc = []
|
|
513
|
+
for fn in self.tools:
|
|
514
|
+
params_desc = ", ".join([f"{name}: {typ}" for name, typ in fn.parameters.items()])
|
|
515
|
+
tools_desc.append(f"- {fn.name}: {fn.description} (Parameters: {params_desc})")
|
|
516
|
+
|
|
517
|
+
return "\n".join(tools_desc)
|
|
518
|
+
|
|
519
|
+
def handle_tool_response(self, response: str) -> Dict[str, Any]:
|
|
520
|
+
"""Process a response that might contain a tool call.
|
|
521
|
+
|
|
522
|
+
This method:
|
|
523
|
+
- Checks if the response contains a tool call
|
|
524
|
+
- Parses and executes the tool call if present
|
|
525
|
+
- Returns the appropriate result
|
|
526
|
+
|
|
527
|
+
Args:
|
|
528
|
+
response (str): The LLM's response
|
|
529
|
+
|
|
530
|
+
Returns:
|
|
531
|
+
Dict[str, Any]: Result containing 'is_tool_call', 'result', and 'original_response'
|
|
532
|
+
"""
|
|
533
|
+
# Check if response contains a tool call
|
|
534
|
+
if "<tool_call>" in response:
|
|
535
|
+
function_call_data = self._parse_function_call(response)
|
|
536
|
+
|
|
537
|
+
if "error" in function_call_data:
|
|
538
|
+
return {
|
|
539
|
+
"is_tool_call": True,
|
|
540
|
+
"success": False,
|
|
541
|
+
"result": function_call_data["error"],
|
|
542
|
+
"original_response": response
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
# Execute the function call
|
|
546
|
+
result = self.execute_function(function_call_data)
|
|
547
|
+
|
|
548
|
+
# Add the result to chat history as a tool message
|
|
549
|
+
self.add_message("tool", result)
|
|
550
|
+
|
|
551
|
+
return {
|
|
552
|
+
"is_tool_call": True,
|
|
553
|
+
"success": True,
|
|
554
|
+
"result": result,
|
|
555
|
+
"tool_calls": function_call_data.get("tool_calls", []),
|
|
556
|
+
"original_response": response
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
return {
|
|
560
|
+
"is_tool_call": False,
|
|
561
|
+
"result": response,
|
|
562
|
+
"original_response": response
|
|
563
|
+
}
|
|
231
564
|
|
|
232
565
|
|