agentic-blocks 0.1.8__tar.gz → 0.1.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -23,6 +23,7 @@ Requires-Dist: mcp
23
23
  Requires-Dist: requests
24
24
  Requires-Dist: python-dotenv
25
25
  Requires-Dist: openai
26
+ Requires-Dist: langchain-core
26
27
  Provides-Extra: test
27
28
  Requires-Dist: pytest; extra == "test"
28
29
  Provides-Extra: dev
@@ -14,7 +14,7 @@ agentic_blocks = []
14
14
 
15
15
  [project]
16
16
  name = "agentic-blocks"
17
- version = "0.1.8"
17
+ version = "0.1.10"
18
18
  description = "Simple building blocks for agentic AI systems with MCP client and conversation management"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.11"
@@ -39,6 +39,7 @@ dependencies = [
39
39
  "requests",
40
40
  "python-dotenv",
41
41
  "openai",
42
+ "langchain-core",
42
43
  ]
43
44
 
44
45
  [project.urls]
@@ -9,6 +9,7 @@ from dotenv import load_dotenv
9
9
  from openai import OpenAI
10
10
 
11
11
  from agentic_blocks.messages import Messages
12
+ from agentic_blocks.utils.tools_utils import langchain_tools_to_openai_format
12
13
 
13
14
 
14
15
  class LLMError(Exception):
@@ -19,7 +20,7 @@ class LLMError(Exception):
19
20
 
20
21
  def call_llm(
21
22
  messages: Union[Messages, List[Dict[str, Any]]],
22
- tools: Optional[List[Dict[str, Any]]] = None,
23
+ tools: Optional[Union[List[Dict[str, Any]], List]] = None,
23
24
  api_key: Optional[str] = None,
24
25
  model: str = "gpt-4o-mini",
25
26
  base_url: Optional[str] = None,
@@ -30,7 +31,7 @@ def call_llm(
30
31
 
31
32
  Args:
32
33
  messages: Either a Messages instance or a list of message dicts
33
- tools: Optional list of tools in OpenAI function calling format
34
+ tools: Optional list of tools in OpenAI function calling format or LangChain StructuredTools
34
35
  api_key: OpenAI API key (if not provided, loads from .env OPENAI_API_KEY)
35
36
  model: Model name to use for completion
36
37
  base_url: Base URL for the API (useful for VLLM or other OpenAI-compatible servers)
@@ -79,6 +80,15 @@ def call_llm(
79
80
  if not conversation_messages:
80
81
  raise LLMError("No messages provided for completion.")
81
82
 
83
+ # Handle tools parameter - convert LangChain tools if needed
84
+ openai_tools = None
85
+ if tools:
86
+ # Check if it's a list of LangChain StructuredTools
87
+ if tools and hasattr(tools[0], 'args_schema'):
88
+ openai_tools = langchain_tools_to_openai_format(tools)
89
+ else:
90
+ openai_tools = tools
91
+
82
92
  try:
83
93
  # Prepare completion parameters
84
94
  completion_params = {
@@ -87,8 +97,8 @@ def call_llm(
87
97
  **kwargs,
88
98
  }
89
99
 
90
- if tools:
91
- completion_params["tools"] = tools
100
+ if openai_tools:
101
+ completion_params["tools"] = openai_tools
92
102
  completion_params["tool_choice"] = "auto"
93
103
 
94
104
  # Make completion request
@@ -205,6 +205,51 @@ class Messages:
205
205
 
206
206
  return False
207
207
 
208
+ def get_pending_tool_calls(self) -> List[Dict[str, Any]]:
209
+ """
210
+ Get pending tool calls that need execution, formatted for MCPClient.call_tool().
211
+
212
+ Returns:
213
+ List of dictionaries with 'tool_name', 'arguments', and 'tool_call_id' keys
214
+ """
215
+ pending_calls = []
216
+
217
+ if not self.messages:
218
+ return pending_calls
219
+
220
+ last_message = self.messages[-1]
221
+
222
+ # Check if the last message is an assistant message with tool calls
223
+ if last_message.get("role") == "assistant" and "tool_calls" in last_message:
224
+ # Get tool call IDs that have responses
225
+ responded_tool_call_ids = set()
226
+ for msg in reversed(self.messages):
227
+ if msg.get("role") == "tool" and msg.get("tool_call_id"):
228
+ responded_tool_call_ids.add(msg.get("tool_call_id"))
229
+
230
+ # Find tool calls that don't have responses
231
+ for tool_call in last_message["tool_calls"]:
232
+ tool_call_id = tool_call.get("id")
233
+ if tool_call_id not in responded_tool_call_ids:
234
+ function_info = tool_call.get("function", {})
235
+ tool_name = function_info.get("name")
236
+ arguments_str = function_info.get("arguments", "{}")
237
+
238
+ # Parse arguments JSON string to dict
239
+ import json
240
+ try:
241
+ arguments = json.loads(arguments_str)
242
+ except json.JSONDecodeError:
243
+ arguments = {}
244
+
245
+ pending_calls.append({
246
+ "tool_name": tool_name,
247
+ "arguments": arguments,
248
+ "tool_call_id": tool_call_id
249
+ })
250
+
251
+ return pending_calls
252
+
208
253
  def __str__(self) -> str:
209
254
  """Return messages in a simple, readable format."""
210
255
  if not self.messages:
@@ -0,0 +1,46 @@
1
+ """
2
+ Utilities for working with tools across different formats.
3
+ """
4
+
5
+ from typing import Dict, Any, List
6
+
7
+
8
+ def langchain_tool_to_openai_format(tool) -> Dict[str, Any]:
9
+ """
10
+ Convert a LangChain StructuredTool to OpenAI function calling format.
11
+
12
+ Args:
13
+ tool: A langchain_core.tools.structured.StructuredTool instance
14
+
15
+ Returns:
16
+ Dictionary in OpenAI function calling format, compatible with
17
+ MCPClient.list_tools() output and call_llm() tools parameter
18
+ """
19
+ schema = tool.args_schema.model_json_schema()
20
+
21
+ return {
22
+ "type": "function",
23
+ "function": {
24
+ "name": schema.get("title", tool.name),
25
+ "description": schema.get("description", ""),
26
+ "parameters": {
27
+ "type": "object",
28
+ "properties": schema.get("properties", {}),
29
+ "required": schema.get("required", [])
30
+ }
31
+ }
32
+ }
33
+
34
+
35
+ def langchain_tools_to_openai_format(tools: List) -> List[Dict[str, Any]]:
36
+ """
37
+ Convert a list of LangChain StructuredTools to OpenAI function calling format.
38
+
39
+ Args:
40
+ tools: List of langchain_core.tools.structured.StructuredTool instances
41
+
42
+ Returns:
43
+ List of dictionaries in OpenAI function calling format, compatible with
44
+ MCPClient.list_tools() output and call_llm() tools parameter
45
+ """
46
+ return [langchain_tool_to_openai_format(tool) for tool in tools]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -23,6 +23,7 @@ Requires-Dist: mcp
23
23
  Requires-Dist: requests
24
24
  Requires-Dist: python-dotenv
25
25
  Requires-Dist: openai
26
+ Requires-Dist: langchain-core
26
27
  Provides-Extra: test
27
28
  Requires-Dist: pytest; extra == "test"
28
29
  Provides-Extra: dev
@@ -9,4 +9,5 @@ src/agentic_blocks.egg-info/PKG-INFO
9
9
  src/agentic_blocks.egg-info/SOURCES.txt
10
10
  src/agentic_blocks.egg-info/dependency_links.txt
11
11
  src/agentic_blocks.egg-info/requires.txt
12
- src/agentic_blocks.egg-info/top_level.txt
12
+ src/agentic_blocks.egg-info/top_level.txt
13
+ src/agentic_blocks/utils/tools_utils.py
@@ -2,6 +2,7 @@ mcp
2
2
  requests
3
3
  python-dotenv
4
4
  openai
5
+ langchain-core
5
6
 
6
7
  [dev]
7
8
  pytest
File without changes