webscout 8.2.4__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (110) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Provider/AISEARCH/scira_search.py +3 -5
  6. webscout/Provider/Aitopia.py +75 -51
  7. webscout/Provider/AllenAI.py +64 -67
  8. webscout/Provider/ChatGPTClone.py +33 -34
  9. webscout/Provider/ChatSandbox.py +342 -0
  10. webscout/Provider/Cloudflare.py +79 -32
  11. webscout/Provider/Deepinfra.py +69 -56
  12. webscout/Provider/ElectronHub.py +48 -39
  13. webscout/Provider/ExaChat.py +36 -20
  14. webscout/Provider/GPTWeb.py +24 -18
  15. webscout/Provider/GithubChat.py +52 -49
  16. webscout/Provider/GizAI.py +285 -0
  17. webscout/Provider/Glider.py +39 -28
  18. webscout/Provider/Groq.py +48 -20
  19. webscout/Provider/HeckAI.py +18 -36
  20. webscout/Provider/Jadve.py +30 -37
  21. webscout/Provider/LambdaChat.py +36 -59
  22. webscout/Provider/MCPCore.py +18 -21
  23. webscout/Provider/Marcus.py +23 -14
  24. webscout/Provider/Nemotron.py +218 -0
  25. webscout/Provider/Netwrck.py +35 -26
  26. webscout/Provider/OPENAI/__init__.py +1 -1
  27. webscout/Provider/OPENAI/exachat.py +4 -0
  28. webscout/Provider/OPENAI/scirachat.py +3 -4
  29. webscout/Provider/OPENAI/textpollinations.py +20 -22
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/PI.py +22 -13
  32. webscout/Provider/StandardInput.py +42 -30
  33. webscout/Provider/TeachAnything.py +24 -12
  34. webscout/Provider/TextPollinationsAI.py +78 -76
  35. webscout/Provider/TwoAI.py +120 -88
  36. webscout/Provider/TypliAI.py +305 -0
  37. webscout/Provider/Venice.py +24 -22
  38. webscout/Provider/VercelAI.py +31 -12
  39. webscout/Provider/WiseCat.py +1 -1
  40. webscout/Provider/WrDoChat.py +370 -0
  41. webscout/Provider/__init__.py +11 -13
  42. webscout/Provider/ai4chat.py +5 -3
  43. webscout/Provider/akashgpt.py +59 -66
  44. webscout/Provider/asksteve.py +53 -44
  45. webscout/Provider/cerebras.py +77 -31
  46. webscout/Provider/chatglm.py +47 -37
  47. webscout/Provider/elmo.py +38 -32
  48. webscout/Provider/freeaichat.py +57 -43
  49. webscout/Provider/granite.py +24 -21
  50. webscout/Provider/hermes.py +27 -20
  51. webscout/Provider/learnfastai.py +25 -20
  52. webscout/Provider/llmchatco.py +48 -78
  53. webscout/Provider/multichat.py +13 -3
  54. webscout/Provider/scira_chat.py +50 -30
  55. webscout/Provider/scnet.py +27 -21
  56. webscout/Provider/searchchat.py +16 -24
  57. webscout/Provider/sonus.py +37 -39
  58. webscout/Provider/toolbaz.py +24 -46
  59. webscout/Provider/turboseek.py +37 -41
  60. webscout/Provider/typefully.py +30 -22
  61. webscout/Provider/typegpt.py +47 -51
  62. webscout/Provider/uncovr.py +46 -40
  63. webscout/__init__.py +0 -1
  64. webscout/cli.py +256 -0
  65. webscout/conversation.py +305 -448
  66. webscout/exceptions.py +3 -0
  67. webscout/swiftcli/__init__.py +80 -794
  68. webscout/swiftcli/core/__init__.py +7 -0
  69. webscout/swiftcli/core/cli.py +297 -0
  70. webscout/swiftcli/core/context.py +104 -0
  71. webscout/swiftcli/core/group.py +241 -0
  72. webscout/swiftcli/decorators/__init__.py +28 -0
  73. webscout/swiftcli/decorators/command.py +221 -0
  74. webscout/swiftcli/decorators/options.py +220 -0
  75. webscout/swiftcli/decorators/output.py +252 -0
  76. webscout/swiftcli/exceptions.py +21 -0
  77. webscout/swiftcli/plugins/__init__.py +9 -0
  78. webscout/swiftcli/plugins/base.py +135 -0
  79. webscout/swiftcli/plugins/manager.py +262 -0
  80. webscout/swiftcli/utils/__init__.py +59 -0
  81. webscout/swiftcli/utils/formatting.py +252 -0
  82. webscout/swiftcli/utils/parsing.py +267 -0
  83. webscout/version.py +1 -1
  84. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/METADATA +166 -45
  85. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/RECORD +89 -89
  86. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/WHEEL +1 -1
  87. webscout-8.2.6.dist-info/entry_points.txt +3 -0
  88. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -1
  89. inferno/__init__.py +0 -6
  90. inferno/__main__.py +0 -9
  91. inferno/cli.py +0 -6
  92. inferno/lol.py +0 -589
  93. webscout/LLM.py +0 -442
  94. webscout/Local/__init__.py +0 -12
  95. webscout/Local/__main__.py +0 -9
  96. webscout/Local/api.py +0 -576
  97. webscout/Local/cli.py +0 -516
  98. webscout/Local/config.py +0 -75
  99. webscout/Local/llm.py +0 -287
  100. webscout/Local/model_manager.py +0 -253
  101. webscout/Local/server.py +0 -721
  102. webscout/Local/utils.py +0 -93
  103. webscout/Provider/Chatify.py +0 -175
  104. webscout/Provider/PizzaGPT.py +0 -228
  105. webscout/Provider/askmyai.py +0 -158
  106. webscout/Provider/gaurish.py +0 -244
  107. webscout/Provider/promptrefine.py +0 -193
  108. webscout/Provider/tutorai.py +0 -270
  109. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  110. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
webscout/conversation.py CHANGED
@@ -2,59 +2,71 @@ import os
2
2
  import json
3
3
  import logging
4
4
  from typing import Optional, Dict, List, Any, TypedDict, Callable, TypeVar, Union
5
+ from dataclasses import dataclass
6
+ from datetime import datetime
5
7
 
6
8
  T = TypeVar('T')
7
9
 
10
+ class ConversationError(Exception):
11
+ """Base exception for conversation-related errors."""
12
+ pass
13
+
14
+ class ToolCallError(ConversationError):
15
+ """Raised when there's an error with tool calls."""
16
+ pass
17
+
18
+ class MessageValidationError(ConversationError):
19
+ """Raised when message validation fails."""
20
+ pass
21
+
22
+ @dataclass
23
+ class Message:
24
+ """Represents a single message in the conversation."""
25
+ role: str
26
+ content: str
27
+ timestamp: datetime = datetime.now()
28
+ metadata: Dict[str, Any] = None
29
+
30
+ def __post_init__(self):
31
+ if self.metadata is None:
32
+ self.metadata = {}
8
33
 
9
34
  class FunctionCall(TypedDict):
10
35
  """Type for a function call."""
11
36
  name: str
12
37
  arguments: Dict[str, Any]
13
38
 
14
-
15
39
  class ToolDefinition(TypedDict):
16
40
  """Type for a tool definition."""
17
41
  type: str
18
42
  function: Dict[str, Any]
19
43
 
20
-
21
44
  class FunctionCallData(TypedDict, total=False):
22
45
  """Type for function call data"""
23
46
  tool_calls: List[FunctionCall]
24
47
  error: str
25
48
 
26
-
27
49
  class Fn:
28
- """
29
- Represents a function (tool) that the agent can call.
30
- """
50
+ """Represents a function (tool) that the agent can call."""
31
51
  def __init__(self, name: str, description: str, parameters: Dict[str, str]) -> None:
32
52
  self.name: str = name
33
53
  self.description: str = description
34
54
  self.parameters: Dict[str, str] = parameters
35
55
 
36
-
37
56
  def tools(func: Callable[..., T]) -> Callable[..., T]:
38
- """Decorator to mark a function as a tool and automatically convert it."""
57
+ """Decorator to mark a function as a tool."""
39
58
  func._is_tool = True # type: ignore
40
59
  return func
41
60
 
42
-
43
61
  class Conversation:
44
- """Handles prompt generation based on history and maintains chat context.
45
-
46
- This class is responsible for managing chat conversations, including:
47
- - Maintaining chat history
48
- - Loading/saving conversations from/to files
49
- - Generating prompts based on context
50
- - Managing token limits and history pruning
51
- - Supporting tool calling functionality
62
+ """Modern conversation manager with enhanced features.
52
63
 
53
- Examples:
54
- >>> chat = Conversation(max_tokens=500)
55
- >>> chat.add_message("user", "Hello!")
56
- >>> chat.add_message("llm", "Hi there!")
57
- >>> prompt = chat.gen_complete_prompt("What's up?")
64
+ Key Features:
65
+ - Robust message handling with metadata
66
+ - Enhanced tool calling support
67
+ - Efficient history management
68
+ - Improved error handling
69
+ - Memory optimization
58
70
  """
59
71
 
60
72
  intro = (
@@ -69,130 +81,136 @@ class Conversation:
69
81
  filepath: Optional[str] = None,
70
82
  update_file: bool = True,
71
83
  tools: Optional[List[Fn]] = None,
84
+ compression_threshold: int = 10000,
72
85
  ):
73
- """Initialize a new Conversation manager.
74
-
75
- Args:
76
- status (bool): Flag to control history tracking. Defaults to True.
77
- max_tokens (int): Maximum tokens for completion response. Defaults to 600.
78
- filepath (str, optional): Path to save/load conversation history. Defaults to None.
79
- update_file (bool): Whether to append new messages to file. Defaults to True.
80
- tools (List[Fn], optional): List of tools available for the conversation. Defaults to None.
81
-
82
- Examples:
83
- >>> chat = Conversation(max_tokens=500)
84
- >>> chat = Conversation(filepath="chat_history.txt")
85
- """
86
+ """Initialize conversation manager with modern features."""
86
87
  self.status = status
87
88
  self.max_tokens_to_sample = max_tokens
88
- self.chat_history = "" # Initialize as empty string
89
- # Updated history formats
89
+ self.messages: List[Message] = []
90
90
  self.history_format = "\nUser: %(user)s\nAssistant: %(llm)s"
91
- # Tool format: Assistant outputs the tool call, then Tool provides the result
92
91
  self.tool_history_format = "\nUser: %(user)s\nAssistant: <tool_call>%(tool_json)s</tool_call>\nTool: %(result)s"
93
92
  self.file = filepath
94
93
  self.update_file = update_file
95
94
  self.history_offset = 10250
96
95
  self.prompt_allowance = 10
97
96
  self.tools = tools or []
97
+ self.compression_threshold = compression_threshold
98
+ self.logger = self._setup_logger()
98
99
 
99
100
  if filepath:
100
101
  self.load_conversation(filepath, False)
101
102
 
102
- def load_conversation(self, filepath: str, exists: bool = True) -> None:
103
- """Load conversation history from a text file.
104
-
105
- Args:
106
- filepath (str): Path to the history file
107
- exists (bool): Flag for file availability. Defaults to True.
103
+ def _setup_logger(self) -> logging.Logger:
104
+ """Set up enhanced logging."""
105
+ logger = logging.getLogger("conversation")
106
+ if not logger.handlers:
107
+ handler = logging.StreamHandler()
108
+ formatter = logging.Formatter(
109
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
110
+ )
111
+ handler.setFormatter(formatter)
112
+ logger.addHandler(handler)
113
+ logger.setLevel(logging.INFO)
114
+ return logger
108
115
 
109
- Raises:
110
- AssertionError: If filepath is not str or file doesn't exist
111
- """
112
- assert isinstance(
113
- filepath, str
114
- ), f"Filepath needs to be of str datatype not {type(filepath)}"
115
- assert (
116
- os.path.isfile(filepath) if exists else True
117
- ), f"File '{filepath}' does not exist"
118
-
119
- if not os.path.isfile(filepath):
120
- with open(filepath, "w", encoding="utf-8") as fh:
121
- fh.write(self.intro)
122
- else:
123
- with open(filepath, encoding="utf-8") as fh:
124
- file_contents = fh.readlines()
125
- if file_contents:
126
- self.intro = file_contents[0] # First line is intro
127
- self.chat_history = "\n".join(file_contents[1:])
128
-
129
- def __trim_chat_history(self, chat_history: str, intro: str) -> str:
130
- """Keep the chat history fresh by trimming it when it gets too long!
131
-
132
- This method makes sure we don't exceed our token limits by:
133
- - Calculating total length (intro + history)
134
- - Trimming older messages if needed
135
- - Keeping the convo smooth and within limits
136
-
137
- Args:
138
- chat_history (str): The current chat history to trim
139
- intro (str): The conversation's intro/system prompt
140
-
141
- Returns:
142
- str: The trimmed chat history, ready to use!
143
-
144
- Examples:
145
- >>> chat = Conversation(max_tokens=500)
146
- >>> trimmed = chat._Conversation__trim_chat_history("Hello! Hi!", "Intro")
147
- """
148
- len_of_intro = len(intro)
149
- len_of_chat_history = len(chat_history)
150
- total = self.max_tokens_to_sample + len_of_intro + len_of_chat_history
151
-
152
- if total > self.history_offset:
153
- truncate_at = (total - self.history_offset) + self.prompt_allowance
154
- trimmed_chat_history = chat_history[truncate_at:]
155
- return "... " + trimmed_chat_history
156
- return chat_history
116
+ def load_conversation(self, filepath: str, exists: bool = True) -> None:
117
+ """Load conversation with improved error handling."""
118
+ try:
119
+ if not isinstance(filepath, str):
120
+ raise TypeError(f"Filepath must be str, not {type(filepath)}")
121
+
122
+ if exists and not os.path.isfile(filepath):
123
+ raise FileNotFoundError(f"File '{filepath}' does not exist")
124
+
125
+ if not os.path.isfile(filepath):
126
+ with open(filepath, "w", encoding="utf-8") as fh:
127
+ fh.write(self.intro)
128
+ else:
129
+ with open(filepath, encoding="utf-8") as fh:
130
+ file_contents = fh.readlines()
131
+ if file_contents:
132
+ self.intro = file_contents[0]
133
+ self._process_history_from_file(file_contents[1:])
134
+ except Exception as e:
135
+ self.logger.error(f"Error loading conversation: {str(e)}")
136
+ raise ConversationError(f"Failed to load conversation: {str(e)}") from e
137
+
138
+ def _process_history_from_file(self, lines: List[str]) -> None:
139
+ """Process and structure conversation history from file."""
140
+ current_role = None
141
+ current_content = []
142
+
143
+ for line in lines:
144
+ line = line.strip()
145
+ if line.startswith(("User:", "Assistant:", "Tool:")):
146
+ if current_role and current_content:
147
+ self.messages.append(Message(
148
+ role=current_role,
149
+ content="\n".join(current_content)
150
+ ))
151
+ current_content = []
152
+ current_role = line.split(":")[0].lower()
153
+ content = ":".join(line.split(":")[1:]).strip()
154
+ current_content.append(content)
155
+ elif line:
156
+ current_content.append(line)
157
+
158
+ if current_role and current_content:
159
+ self.messages.append(Message(
160
+ role=current_role,
161
+ content="\n".join(current_content)
162
+ ))
163
+
164
+ def _compress_history(self) -> None:
165
+ """Compress history when it exceeds threshold."""
166
+ if len(self.messages) > self.compression_threshold:
167
+ # Keep recent messages and summarize older ones
168
+ keep_recent = 100 # Adjust based on needs
169
+ self.messages = (
170
+ [self._summarize_messages(self.messages[:-keep_recent])] +
171
+ self.messages[-keep_recent:]
172
+ )
173
+
174
+ def _summarize_messages(self, messages: List[Message]) -> Message:
175
+ """Create a summary message from older messages."""
176
+ return Message(
177
+ role="system",
178
+ content="[History Summary] Previous conversation summarized for context",
179
+ metadata={"summarized_count": len(messages)}
180
+ )
157
181
 
158
182
  def gen_complete_prompt(self, prompt: str, intro: Optional[str] = None) -> str:
159
- """Generate a complete prompt that's ready to go!
160
-
161
- This method:
162
- - Combines the intro, history, and new prompt
163
- - Adds tools information if available
164
- - Trims history if needed
165
- - Keeps everything organized and flowing
166
-
167
- Args:
168
- prompt (str): Your message to add to the chat
169
- intro (str, optional): Custom intro to use. Default: None (uses class intro)
170
-
171
- Returns:
172
- str: The complete conversation prompt, ready for the LLM!
173
-
174
- Examples:
175
- >>> chat = Conversation()
176
- >>> prompt = chat.gen_complete_prompt("What's good?")
177
- """
183
+ """Generate complete prompt with enhanced context management."""
178
184
  if not self.status:
179
185
  return prompt
180
186
 
181
- intro = intro or self.intro or (
182
- '''You are a helpful and versatile AI assistant. Your goal is to provide concise and informative responses directly to user queries. Use available tools in correct format to enhance responses or execute actions as needed.
183
- ''')
187
+ intro = intro or self.intro
184
188
 
185
- # Add tool information if tools are available
189
+ # Add tool information if available
186
190
  tools_description = self.get_tools_description()
187
191
  if tools_description:
188
192
  try:
189
- from datetime import datetime
190
193
  date_str = f"Current date: {datetime.now().strftime('%d %b %Y')}"
191
194
  except:
192
195
  date_str = ""
193
196
 
194
- intro = (f'''
195
- {intro}
197
+ intro = self._generate_enhanced_intro(intro, tools_description, date_str)
198
+
199
+ # Generate history string with proper formatting
200
+ history = self._generate_history_string()
201
+
202
+ # Combine and trim if needed
203
+ complete_prompt = intro + self._trim_chat_history(
204
+ history + "\nUser: " + prompt + "\nAssistant:",
205
+ intro
206
+ )
207
+
208
+ return complete_prompt
209
+
210
+ def _generate_enhanced_intro(self, intro: str, tools_description: str, date_str: str) -> str:
211
+ """Generate enhanced introduction with tools and guidelines."""
212
+ return f'''
213
+ {intro}
196
214
 
197
215
  {date_str}
198
216
 
@@ -201,379 +219,218 @@ class Conversation:
201
219
  Your goal is to assist the user effectively. Analyze each query and choose one of two response modes:
202
220
 
203
221
  **1. Tool Mode:**
204
- - **When:** If the query requires external data, calculations, or functions listed under AVAILABLE TOOLS (e.g., web search, current info).
205
- - **Action:** Output *ONLY* the complete JSON tool call, exactly matching the TOOL FORMAT below, enclosed in `<tool_call>` and `</tool_call>` tags.
206
- - **CRITICAL:** Absolutely NO text, whitespace, or characters before `<tool_call>` or after `</tool_call>`. The output *must* start with `<tool_call>` and end with `</tool_call>`.
207
- - **Example (Output is *only* this block):**
208
- ```json
209
- <tool_call>
210
- {{
211
- "name": "search",
212
- "arguments": {{ "query": "latest population of Tokyo" }}
213
- }}
214
- </tool_call>
215
- ```
222
+ - **When:** If the query requires external data, calculations, or functions listed under AVAILABLE TOOLS.
223
+ - **Action:** Output *ONLY* the complete JSON tool call within tags.
224
+ - **Format:** Must start with `<tool_call>` and end with `</tool_call>`.
216
225
 
217
226
  **2. Conversational Mode:**
218
- - **When:** If the query can be answered using your internal knowledge, is creative, or conversational.
219
- - **Action:** Respond directly, clearly, and concisely.
220
- - **Example:** *User:* "Explain photosynthesis." *Assistant:* "Photosynthesis is how plants use sunlight, water, and carbon dioxide to create their food (glucose) and release oxygen."
221
-
222
- **ABSOLUTE PROHIBITIONS:**
223
- - **NEVER Explain Tool Use:** Don't say you're using a tool, which one, or why.
224
- - **NEVER Describe JSON/Tags:** Do not mention `tool_call`, JSON structure, or parameters.
225
- - **NEVER Apologize for Tools:** No need to say sorry for lacking direct info.
226
- - **NEVER Mix Text and Tool Calls:** Tool calls must be standalone.
227
-
228
- **Be concise and relevant in all responses.**
227
+ - **When:** For queries answerable with internal knowledge.
228
+ - **Action:** Respond directly and concisely.
229
229
 
230
230
  **AVAILABLE TOOLS:**
231
231
  {tools_description}
232
232
 
233
- **TOOL FORMAT (Use Exactly):**
233
+ **TOOL FORMAT:**
234
234
  <tool_call>
235
235
  {{
236
236
  "name": "tool_name",
237
237
  "arguments": {{
238
238
  "param": "value"
239
- /* Add other parameters as needed */
240
239
  }}
241
240
  }}
242
241
  </tool_call>
242
+ '''
243
+
244
+ def _generate_history_string(self) -> str:
245
+ """Generate formatted history string from messages."""
246
+ history_parts = []
247
+ for msg in self.messages:
248
+ if msg.role == "system" and msg.metadata.get("summarized_count"):
249
+ history_parts.append(f"[Previous messages summarized: {msg.metadata['summarized_count']}]")
250
+ else:
251
+ role_display = msg.role.capitalize()
252
+ if "<tool_call>" in msg.content:
253
+ history_parts.append(f"{role_display}: {msg.content}")
254
+ else:
255
+ history_parts.append(f"{role_display}: {msg.content}")
256
+ return "\n".join(history_parts)
243
257
 
244
- **Summary Check:**
245
- 1. Tool needed? -> Output *only* the JSON in tags.
246
- 2. No tool needed? -> Respond directly and conversationally.
247
- 3. Avoid *all* prohibited explanations/text.
248
- ''')
249
-
250
- incomplete_chat_history = self.chat_history + "\nUser: " + prompt + "\nAssistant:" # Ensure it ends correctly
251
- complete_prompt = intro + self.__trim_chat_history(incomplete_chat_history, intro)
252
- return complete_prompt
253
-
254
- def update_chat_history(
255
- self, prompt: str, response: str, force: bool = False
256
- ) -> None:
257
- """Keep the conversation flowing by updating the chat history!
258
-
259
- This method:
260
- - Adds new messages to the history
261
- - Updates the file if needed
262
- - Keeps everything organized
263
-
264
- Args:
265
- prompt (str): Your message to add
266
- response (str): The LLM's response
267
- force (bool): Force update even if history is off. Default: False
268
-
269
- Examples:
270
- >>> chat = Conversation()
271
- >>> chat.update_chat_history("Hi!", "Hello there!")
272
- """
273
- if not self.status and not force:
274
- return
275
-
276
- # Use the updated history_format
277
- new_history = self.history_format % {"user": prompt, "llm": response}
258
+ def _trim_chat_history(self, chat_history: str, intro: str) -> str:
259
+ """Trim chat history with improved token management."""
260
+ total_length = len(intro) + len(chat_history)
278
261
 
279
- if self.file and self.update_file:
280
- # Create file if it doesn't exist
281
- if not os.path.exists(self.file):
282
- with open(self.file, "w", encoding="utf-8") as fh:
283
- fh.write(self.intro + "\n")
284
-
285
- # Append new history
286
- with open(self.file, "a", encoding="utf-8") as fh:
287
- fh.write(new_history)
288
-
289
- self.chat_history += new_history
290
- # logger.info(f"Chat history updated with prompt: {prompt}")
291
-
292
- def update_chat_history_with_tool(
293
- self, prompt: str, tool_call_json: str, tool_result: str, force: bool = False # Changed tool_name to tool_call_json
294
- ) -> None:
295
- """Update chat history with a tool call and its result.
296
-
297
- This method:
298
- - Adds tool call interaction to the history using the new format
299
- - Updates the file if needed
300
- - Maintains the conversation flow with tools
262
+ if total_length > self.history_offset:
263
+ truncate_at = (total_length - self.history_offset) + self.prompt_allowance
264
+ # Try to truncate at a message boundary
265
+ lines = chat_history[truncate_at:].split('\n')
266
+ for i, line in enumerate(lines):
267
+ if line.startswith(("User:", "Assistant:", "Tool:")):
268
+ return "... " + "\n".join(lines[i:])
269
+ return "... " + chat_history[truncate_at:]
270
+ return chat_history
301
271
 
302
- Args:
303
- prompt (str): The user's message that triggered the tool call
304
- tool_call_json (str): The JSON string representing the tool call made by the assistant
305
- tool_result (str): Result returned by the tool
306
- force (bool): Force update even if history is off. Default: False
307
-
308
- Examples:
309
- >>> chat = Conversation()
310
- >>> tool_json = '{"name": "weather_tool", "arguments": {"location": "London"}}'
311
- >>> chat.update_chat_history_with_tool("What's the weather?", tool_json, "It's sunny, 75°F")
312
- """
313
- if not self.status and not force:
314
- return
315
-
316
- # Use the updated tool_history_format
317
- new_history = self.tool_history_format % {
318
- "user": prompt,
319
- "tool_json": tool_call_json, # Use the JSON string
320
- "result": tool_result
321
- }
322
-
323
- if self.file and self.update_file:
324
- # Create file if it doesn't exist
272
+ def add_message(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
273
+ """Add a message with enhanced validation and metadata support."""
274
+ try:
275
+ if not self.validate_message(role, content):
276
+ raise MessageValidationError("Invalid message role or content")
277
+
278
+ message = Message(role=role, content=content, metadata=metadata or {})
279
+ self.messages.append(message)
280
+
281
+ if self.file and self.update_file:
282
+ self._append_to_file(message)
283
+
284
+ self._compress_history()
285
+
286
+ except Exception as e:
287
+ self.logger.error(f"Error adding message: {str(e)}")
288
+ raise ConversationError(f"Failed to add message: {str(e)}") from e
289
+
290
+ def _append_to_file(self, message: Message) -> None:
291
+ """Append message to file with error handling."""
292
+ try:
325
293
  if not os.path.exists(self.file):
326
294
  with open(self.file, "w", encoding="utf-8") as fh:
327
295
  fh.write(self.intro + "\n")
328
296
 
329
- # Append new history
330
297
  with open(self.file, "a", encoding="utf-8") as fh:
331
- fh.write(new_history)
332
-
333
- self.chat_history += new_history
334
-
335
- def add_message(self, role: str, content: str) -> None:
336
- """Add a new message to the chat - simple and clean!
337
-
338
- This method:
339
- - Validates the message role
340
- - Adds the message to history
341
- - Updates file if needed
342
-
343
- Args:
344
- role (str): Who's sending? ('user', 'llm', 'tool', or 'reasoning')
345
- content (str): What's the message?
346
-
347
- Examples:
348
- >>> chat = Conversation()
349
- >>> chat.add_message("user", "Hey there!")
350
- >>> chat.add_message("llm", "Hi! How can I help?")
351
- """
352
- if not self.validate_message(role, content):
353
- raise ValueError("Invalid message role or content")
354
-
355
- # Updated role formats to match User/Assistant
356
- role_formats = {
357
- "user": "User",
358
- "assistant": "Assistant", # Changed from 'llm'
359
- "llm": "Assistant", # Keep llm for backward compatibility? Or remove? Let's keep for now.
360
- "tool": "Tool",
361
- "reasoning": "Reasoning" # Keep reasoning if used internally
362
- }
363
-
364
- if role in role_formats:
365
- # Special handling for assistant's tool call output
366
- if role == "assistant" and "<tool_call>" in content:
367
- # History format already includes the tags, just add the content
368
- self.chat_history += f"\n{role_formats[role]}: {content}"
369
- elif role == "tool":
370
- # Tool results follow the Assistant's tool call
371
- self.chat_history += f"\n{role_formats[role]}: {content}"
372
- else:
373
- # Standard user/assistant message
374
- self.chat_history += f"\n{role_formats[role]}: {content}"
375
- else:
376
- raise ValueError(f"Invalid role: {role}. Must be one of {list(role_formats.keys())}")
377
-
378
- # ... (logging remains the same) ...
298
+ role_display = message.role.capitalize()
299
+ fh.write(f"\n{role_display}: {message.content}")
300
+
301
+ except Exception as e:
302
+ self.logger.error(f"Error writing to file: {str(e)}")
303
+ raise ConversationError(f"Failed to write to file: {str(e)}") from e
379
304
 
380
305
  def validate_message(self, role: str, content: str) -> bool:
381
- """Validate the message role and content."""
382
- # Updated valid roles
383
- valid_roles = {'user', 'assistant', 'llm', 'tool', 'reasoning'} # Changed 'llm' to 'assistant', kept 'llm' maybe?
306
+ """Validate message with enhanced role checking."""
307
+ valid_roles = {'user', 'assistant', 'tool', 'system'}
384
308
  if role not in valid_roles:
385
- logging.error(f"Invalid role: {role}")
309
+ self.logger.error(f"Invalid role: {role}")
386
310
  return False
387
- if not content:
388
- logging.error("Content cannot be empty.")
311
+ if not content or not isinstance(content, str):
312
+ self.logger.error("Invalid content")
389
313
  return False
390
314
  return True
391
315
 
392
- def _parse_function_call(self, response: str) -> FunctionCallData:
393
- """Parse a function call from the LLM's response.
316
+ def handle_tool_response(self, response: str) -> Dict[str, Any]:
317
+ """Process tool responses with enhanced error handling."""
318
+ try:
319
+ if "<tool_call>" in response:
320
+ function_call_data = self._parse_function_call(response)
321
+
322
+ if "error" in function_call_data:
323
+ return {
324
+ "is_tool_call": True,
325
+ "success": False,
326
+ "result": function_call_data["error"],
327
+ "original_response": response
328
+ }
394
329
 
395
- Args:
396
- response (str): The LLM's response containing a function call
330
+ result = self.execute_function(function_call_data)
331
+ self.add_message("tool", result)
397
332
 
398
- Returns:
399
- FunctionCallData: Parsed function call data or error
400
- """
333
+ return {
334
+ "is_tool_call": True,
335
+ "success": True,
336
+ "result": result,
337
+ "tool_calls": function_call_data.get("tool_calls", []),
338
+ "original_response": response
339
+ }
340
+
341
+ return {
342
+ "is_tool_call": False,
343
+ "result": response,
344
+ "original_response": response
345
+ }
346
+
347
+ except Exception as e:
348
+ self.logger.error(f"Error handling tool response: {str(e)}")
349
+ raise ToolCallError(f"Failed to handle tool response: {str(e)}") from e
350
+
351
+ def _parse_function_call(self, response: str) -> FunctionCallData:
352
+ """Parse function calls with improved JSON handling."""
401
353
  try:
402
- # First try the standard format with square brackets: <tool_call>[...]</tool_call>
403
- start_tag: str = "<tool_call>["
404
- end_tag: str = "]</tool_call>"
405
- start_idx: int = response.find(start_tag)
406
- end_idx: int = response.rfind(end_tag)
407
-
408
- # If not found, try the alternate format: <tool_call>\n{...}\n</tool_call>
409
- if start_idx == -1 or end_idx == -1 or end_idx <= start_idx:
410
- start_tag = "<tool_call>"
411
- end_tag = "</tool_call>"
412
- start_idx = response.find(start_tag)
413
- end_idx = response.rfind(end_tag)
414
-
415
- if start_idx == -1 or end_idx == -1 or end_idx <= start_idx:
416
- raise ValueError("No valid <tool_call> JSON structure found in the response.")
417
-
418
- # Extract JSON content - for the format without brackets
419
- json_str: str = response[start_idx + len(start_tag):end_idx].strip()
420
-
421
- # Try to parse the JSON directly
422
- try:
423
- parsed_response: Any = json.loads(json_str)
424
- if isinstance(parsed_response, dict):
425
- return {"tool_calls": [parsed_response]}
426
- else:
427
- raise ValueError("Invalid JSON structure in tool call.")
428
- except json.JSONDecodeError:
429
- # If direct parsing failed, try to extract just the JSON object
430
- import re
431
- json_pattern = re.search(r'\{[\s\S]*\}', json_str)
432
- if json_pattern:
433
- parsed_response = json.loads(json_pattern.group(0))
434
- return {"tool_calls": [parsed_response]}
435
- raise
436
- else:
437
- # Extract JSON content - for the format with brackets
438
- json_str: str = response[start_idx + len(start_tag):end_idx].strip()
439
- parsed_response: Any = json.loads(json_str)
440
-
441
- if isinstance(parsed_response, list):
442
- return {"tool_calls": parsed_response}
443
- elif isinstance(parsed_response, dict):
444
- return {"tool_calls": [parsed_response]}
354
+ # Extract content between tool call tags
355
+ start_tag = "<tool_call>"
356
+ end_tag = "</tool_call>"
357
+ start_idx = response.find(start_tag)
358
+ end_idx = response.rfind(end_tag)
359
+
360
+ if start_idx == -1 or end_idx == -1:
361
+ raise ValueError("No valid tool call tags found")
362
+
363
+ json_str = response[start_idx + len(start_tag):end_idx].strip()
364
+
365
+ # Handle both single and multiple tool calls
366
+ try:
367
+ parsed = json.loads(json_str)
368
+ if isinstance(parsed, dict):
369
+ return {"tool_calls": [parsed]}
370
+ elif isinstance(parsed, list):
371
+ return {"tool_calls": parsed}
445
372
  else:
446
- raise ValueError("<tool_call> should contain a list or a dictionary of tool calls.")
447
-
448
- except (ValueError, json.JSONDecodeError) as e:
449
- logging.error(f"Error parsing function call: %s", e)
373
+ raise ValueError("Invalid tool call structure")
374
+ except json.JSONDecodeError:
375
+ # Try to extract valid JSON if embedded in other content
376
+ import re
377
+ json_pattern = re.search(r'\{[\s\S]*\}', json_str)
378
+ if json_pattern:
379
+ parsed = json.loads(json_pattern.group(0))
380
+ return {"tool_calls": [parsed]}
381
+ raise
382
+
383
+ except Exception as e:
384
+ self.logger.error(f"Error parsing function call: {str(e)}")
450
385
  return {"error": str(e)}
451
386
 
452
387
  def execute_function(self, function_call_data: FunctionCallData) -> str:
453
- """Execute a function call and return the result.
454
-
455
- Args:
456
- function_call_data (FunctionCallData): The function call data
457
-
458
- Returns:
459
- str: Result of the function execution
460
- """
461
- tool_calls: Optional[List[FunctionCall]] = function_call_data.get("tool_calls")
462
-
463
- if not tool_calls or not isinstance(tool_calls, list):
464
- return "Invalid tool_calls format."
465
-
466
- results: List[str] = []
467
- for tool_call in tool_calls:
468
- function_name: str = tool_call.get("name")
469
- arguments: Dict[str, Any] = tool_call.get("arguments", {})
470
-
471
- if not function_name or not isinstance(arguments, dict):
472
- results.append(f"Invalid tool call: {tool_call}")
473
- continue
474
-
475
- # Here you would implement the actual execution logic for each tool
476
- # For demonstration, we'll return a placeholder response
477
- results.append(f"Executed {function_name} with arguments {arguments}")
478
-
479
- return "; ".join(results)
480
-
481
- def _convert_fns_to_tools(self, fns: Optional[List[Fn]]) -> List[ToolDefinition]:
482
- """Convert functions to tool definitions for the LLM.
388
+ """Execute functions with enhanced error handling."""
389
+ try:
390
+ tool_calls = function_call_data.get("tool_calls", [])
391
+ if not tool_calls:
392
+ raise ValueError("No tool calls provided")
393
+
394
+ results = []
395
+ for tool_call in tool_calls:
396
+ name = tool_call.get("name")
397
+ arguments = tool_call.get("arguments", {})
398
+
399
+ if not name or not isinstance(arguments, dict):
400
+ raise ValueError(f"Invalid tool call format: {tool_call}")
401
+
402
+ # Execute the tool (implement actual logic here)
403
+ results.append(f"Executed {name} with arguments {arguments}")
483
404
 
484
- Args:
485
- fns (Optional[List[Fn]]): List of function definitions
486
-
487
- Returns:
488
- List[ToolDefinition]: List of tool definitions
489
- """
490
- if not fns:
491
- return []
492
-
493
- tools: List[ToolDefinition] = []
494
- for fn in fns:
495
- tool: ToolDefinition = {
496
- "type": "function",
497
- "function": {
498
- "name": fn.name,
499
- "description": fn.description,
500
- "parameters": {
501
- "type": "object",
502
- "properties": {
503
- param_name: {
504
- "type": param_type,
505
- "description": f"The {param_name} parameter"
506
- } for param_name, param_type in fn.parameters.items()
507
- },
508
- "required": list(fn.parameters.keys())
509
- }
510
- }
511
- }
512
- tools.append(tool)
513
- return tools
405
+ return "; ".join(results)
406
+
407
+ except Exception as e:
408
+ self.logger.error(f"Error executing function: {str(e)}")
409
+ raise ToolCallError(f"Failed to execute function: {str(e)}") from e
514
410
 
515
411
  def get_tools_description(self) -> str:
516
- """Get a formatted string of available tools for the intro prompt.
517
-
518
- Returns:
519
- str: Formatted tools description
520
- """
412
+ """Get formatted tools description."""
521
413
  if not self.tools:
522
414
  return ""
523
415
 
524
- tools_desc = []
525
- for fn in self.tools:
526
- params_desc = ", ".join([f"{name}: {typ}" for name, typ in fn.parameters.items()])
527
- tools_desc.append(f"- {fn.name}: {fn.description} (Parameters: {params_desc})")
528
-
529
- return "\n".join(tools_desc)
530
-
531
- def handle_tool_response(self, response: str) -> Dict[str, Any]:
532
- """Process a response that might contain a tool call.
533
-
534
- This method:
535
- - Checks if the response contains a tool call
536
- - Parses and executes the tool call if present
537
- - Returns the appropriate result
416
+ return "\n".join(
417
+ f"- {fn.name}: {fn.description} (Parameters: {', '.join(f'{name}: {typ}' for name, typ in fn.parameters.items())})"
418
+ for fn in self.tools
419
+ )
538
420
 
421
+ def update_chat_history(self, prompt: str, response: str) -> None:
422
+ """Update chat history with a new prompt-response pair.
423
+
539
424
  Args:
540
- response (str): The LLM's response
541
-
542
- Returns:
543
- Dict[str, Any]: Result containing 'is_tool_call', 'result', and 'original_response'
425
+ prompt: The user's prompt/question
426
+ response: The assistant's response
427
+
428
+ This method adds both the user's prompt and the assistant's response
429
+ to the conversation history as separate messages.
544
430
  """
545
- # Check if response contains a tool call
546
- if "<tool_call>" in response:
547
- function_call_data = self._parse_function_call(response)
548
-
549
- if "error" in function_call_data:
550
- return {
551
- "is_tool_call": True,
552
- "success": False,
553
- "result": function_call_data["error"],
554
- "original_response": response
555
- }
556
-
557
- # Execute the function call
558
- result = self.execute_function(function_call_data)
559
-
560
- # Add the result to chat history as a tool message
561
- # The assistant's response (the tool call itself) should have been added before calling this
562
- # Now we add the tool's result
563
- self.add_message("tool", result) # This will now correctly add "\nTool: <result>"
564
-
565
- return {
566
- "is_tool_call": True,
567
- "success": True,
568
- "result": result, # This is the tool's execution result
569
- "tool_calls": function_call_data.get("tool_calls", []),
570
- "original_response": response # This is the LLM's response containing the <tool_call>
571
- }
572
-
573
- return {
574
- "is_tool_call": False,
575
- "result": response,
576
- "original_response": response
577
- }
578
-
431
+ # Add user's message
432
+ self.add_message("user", prompt)
433
+
434
+ # Add assistant's response
435
+ self.add_message("assistant", response)
579
436