agentic-blocks 0.1.9__tar.gz → 0.1.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.9
3
+ Version: 0.1.11
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -14,7 +14,7 @@ agentic_blocks = []
14
14
 
15
15
  [project]
16
16
  name = "agentic-blocks"
17
- version = "0.1.9"
17
+ version = "0.1.11"
18
18
  description = "Simple building blocks for agentic AI systems with MCP client and conversation management"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.11"
@@ -9,6 +9,7 @@ from dotenv import load_dotenv
9
9
  from openai import OpenAI
10
10
 
11
11
  from agentic_blocks.messages import Messages
12
+ from agentic_blocks.utils.tools_utils import langchain_tools_to_openai_format
12
13
 
13
14
 
14
15
  class LLMError(Exception):
@@ -19,7 +20,7 @@ class LLMError(Exception):
19
20
 
20
21
  def call_llm(
21
22
  messages: Union[Messages, List[Dict[str, Any]]],
22
- tools: Optional[List[Dict[str, Any]]] = None,
23
+ tools: Optional[Union[List[Dict[str, Any]], List]] = None,
23
24
  api_key: Optional[str] = None,
24
25
  model: str = "gpt-4o-mini",
25
26
  base_url: Optional[str] = None,
@@ -30,7 +31,7 @@ def call_llm(
30
31
 
31
32
  Args:
32
33
  messages: Either a Messages instance or a list of message dicts
33
- tools: Optional list of tools in OpenAI function calling format
34
+ tools: Optional list of tools in OpenAI function calling format or LangChain StructuredTools
34
35
  api_key: OpenAI API key (if not provided, loads from .env OPENAI_API_KEY)
35
36
  model: Model name to use for completion
36
37
  base_url: Base URL for the API (useful for VLLM or other OpenAI-compatible servers)
@@ -79,6 +80,15 @@ def call_llm(
79
80
  if not conversation_messages:
80
81
  raise LLMError("No messages provided for completion.")
81
82
 
83
+ # Handle tools parameter - convert LangChain tools if needed
84
+ openai_tools = None
85
+ if tools:
86
+ # Check if it's a list of LangChain StructuredTools
87
+ if tools and hasattr(tools[0], 'args_schema'):
88
+ openai_tools = langchain_tools_to_openai_format(tools)
89
+ else:
90
+ openai_tools = tools
91
+
82
92
  try:
83
93
  # Prepare completion parameters
84
94
  completion_params = {
@@ -87,8 +97,8 @@ def call_llm(
87
97
  **kwargs,
88
98
  }
89
99
 
90
- if tools:
91
- completion_params["tools"] = tools
100
+ if openai_tools:
101
+ completion_params["tools"] = openai_tools
92
102
  completion_params["tool_choice"] = "auto"
93
103
 
94
104
  # Make completion request
@@ -0,0 +1,176 @@
1
+ """
2
+ Utilities for working with tools across different formats.
3
+ """
4
+
5
+ from typing import Dict, Any, List
6
+
7
+
8
+ def langchain_tool_to_openai_format(tool) -> Dict[str, Any]:
9
+ """
10
+ Convert a LangChain StructuredTool to OpenAI function calling format.
11
+
12
+ Args:
13
+ tool: A langchain_core.tools.structured.StructuredTool instance
14
+
15
+ Returns:
16
+ Dictionary in OpenAI function calling format, compatible with
17
+ MCPClient.list_tools() output and call_llm() tools parameter
18
+ """
19
+ schema = tool.args_schema.model_json_schema()
20
+
21
+ return {
22
+ "type": "function",
23
+ "function": {
24
+ "name": schema.get("title", tool.name),
25
+ "description": schema.get("description", ""),
26
+ "parameters": {
27
+ "type": "object",
28
+ "properties": schema.get("properties", {}),
29
+ "required": schema.get("required", [])
30
+ }
31
+ }
32
+ }
33
+
34
+
35
+ def langchain_tools_to_openai_format(tools: List) -> List[Dict[str, Any]]:
36
+ """
37
+ Convert a list of LangChain StructuredTools to OpenAI function calling format.
38
+
39
+ Args:
40
+ tools: List of langchain_core.tools.structured.StructuredTool instances
41
+
42
+ Returns:
43
+ List of dictionaries in OpenAI function calling format, compatible with
44
+ MCPClient.list_tools() output and call_llm() tools parameter
45
+ """
46
+ return [langchain_tool_to_openai_format(tool) for tool in tools]
47
+
48
+
49
+ def create_tool_registry(tools: List) -> Dict[str, Any]:
50
+ """
51
+ Create a registry mapping tool names to LangChain tool instances.
52
+
53
+ Args:
54
+ tools: List of langchain_core.tools.structured.StructuredTool instances
55
+
56
+ Returns:
57
+ Dictionary mapping tool names to tool instances
58
+ """
59
+ return {tool.name: tool for tool in tools}
60
+
61
+
62
+ def execute_tool_call(tool_call: Dict[str, Any], tool_registry: Dict[str, Any]) -> Dict[str, Any]:
63
+ """
64
+ Execute a single tool call using LangChain tool registry.
65
+
66
+ Args:
67
+ tool_call: Dictionary with 'tool_name', 'arguments', and 'tool_call_id' keys
68
+ tool_registry: Registry mapping tool names to tool instances
69
+
70
+ Returns:
71
+ Dictionary with 'tool_call_id', 'result', and 'is_error' keys
72
+ """
73
+ tool_name = tool_call.get('tool_name')
74
+ arguments = tool_call.get('arguments', {})
75
+ tool_call_id = tool_call.get('tool_call_id')
76
+
77
+ try:
78
+ if tool_name not in tool_registry:
79
+ raise ValueError(f"Tool '{tool_name}' not found in registry")
80
+
81
+ tool = tool_registry[tool_name]
82
+ result = tool.invoke(arguments)
83
+
84
+ return {
85
+ 'tool_call_id': tool_call_id,
86
+ 'result': result,
87
+ 'is_error': False
88
+ }
89
+ except Exception as e:
90
+ return {
91
+ 'tool_call_id': tool_call_id,
92
+ 'result': f"Error executing tool '{tool_name}': {str(e)}",
93
+ 'is_error': True
94
+ }
95
+
96
+
97
+ def execute_pending_tool_calls(messages, tool_registry: Dict[str, Any]) -> List[Dict[str, Any]]:
98
+ """
99
+ Execute all pending tool calls from a Messages instance and add responses back.
100
+
101
+ Args:
102
+ messages: Messages instance with pending tool calls
103
+ tool_registry: Registry mapping tool names to tool instances
104
+
105
+ Returns:
106
+ List of execution results compatible with Messages.add_tool_responses format
107
+ """
108
+ pending_tool_calls = messages.get_pending_tool_calls()
109
+ results = []
110
+
111
+ for tool_call in pending_tool_calls:
112
+ result = execute_tool_call(tool_call, tool_registry)
113
+
114
+ # Convert to format expected by Messages.add_tool_responses
115
+ if result['is_error']:
116
+ tool_response = {
117
+ 'tool_call_id': result['tool_call_id'],
118
+ 'is_error': True,
119
+ 'error': result['result']
120
+ }
121
+ else:
122
+ tool_response = {
123
+ 'tool_call_id': result['tool_call_id'],
124
+ 'is_error': False,
125
+ 'tool_response': result['result']
126
+ }
127
+
128
+ results.append(tool_response)
129
+
130
+ # Add tool response back to messages using individual method
131
+ if result['is_error']:
132
+ messages.add_tool_response(result['tool_call_id'], result['result'])
133
+ else:
134
+ messages.add_tool_response(result['tool_call_id'], str(result['result']))
135
+
136
+ return results
137
+
138
+
139
+ def execute_and_add_tool_responses(messages, tool_registry: Dict[str, Any]) -> List[Dict[str, Any]]:
140
+ """
141
+ Execute all pending tool calls and add them using Messages.add_tool_responses batch method.
142
+
143
+ Args:
144
+ messages: Messages instance with pending tool calls
145
+ tool_registry: Registry mapping tool names to tool instances
146
+
147
+ Returns:
148
+ List of execution results compatible with Messages.add_tool_responses format
149
+ """
150
+ pending_tool_calls = messages.get_pending_tool_calls()
151
+ results = []
152
+
153
+ for tool_call in pending_tool_calls:
154
+ result = execute_tool_call(tool_call, tool_registry)
155
+
156
+ # Convert to format expected by Messages.add_tool_responses
157
+ if result['is_error']:
158
+ tool_response = {
159
+ 'tool_call_id': result['tool_call_id'],
160
+ 'is_error': True,
161
+ 'error': result['result']
162
+ }
163
+ else:
164
+ tool_response = {
165
+ 'tool_call_id': result['tool_call_id'],
166
+ 'is_error': False,
167
+ 'tool_response': result['result']
168
+ }
169
+
170
+ results.append(tool_response)
171
+
172
+ # Add all responses at once using the batch method
173
+ if results:
174
+ messages.add_tool_responses(results)
175
+
176
+ return results
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.9
3
+ Version: 0.1.11
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -9,4 +9,5 @@ src/agentic_blocks.egg-info/PKG-INFO
9
9
  src/agentic_blocks.egg-info/SOURCES.txt
10
10
  src/agentic_blocks.egg-info/dependency_links.txt
11
11
  src/agentic_blocks.egg-info/requires.txt
12
- src/agentic_blocks.egg-info/top_level.txt
12
+ src/agentic_blocks.egg-info/top_level.txt
13
+ src/agentic_blocks/utils/tools_utils.py
File without changes