agentic-blocks 0.1.15__tar.gz → 0.1.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agentic_blocks-0.1.15/src/agentic_blocks.egg-info → agentic_blocks-0.1.17}/PKG-INFO +1 -1
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/pyproject.toml +1 -1
- agentic_blocks-0.1.17/src/agentic_blocks/agent.py +89 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks/utils/tools_utils.py +12 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17/src/agentic_blocks.egg-info}/PKG-INFO +1 -1
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks.egg-info/SOURCES.txt +1 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/LICENSE +0 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/README.md +0 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/setup.cfg +0 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks/__init__.py +0 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks/llm.py +0 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks/mcp_client.py +0 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks/messages.py +0 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks.egg-info/dependency_links.txt +0 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks.egg-info/requires.txt +0 -0
- {agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks.egg-info/top_level.txt +0 -0
@@ -14,7 +14,7 @@ agentic_blocks = []
|
|
14
14
|
|
15
15
|
[project]
|
16
16
|
name = "agentic-blocks"
|
17
|
-
version = "0.1.
|
17
|
+
version = "0.1.17"
|
18
18
|
description = "Simple building blocks for agentic AI systems with MCP client and conversation management"
|
19
19
|
readme = "README.md"
|
20
20
|
requires-python = ">=3.11"
|
@@ -0,0 +1,89 @@
|
|
1
|
+
from pocketflow import Node, Flow
|
2
|
+
from agentic_blocks.utils.tools_utils import (
|
3
|
+
create_tool_registry,
|
4
|
+
execute_pending_tool_calls,
|
5
|
+
)
|
6
|
+
from agentic_blocks import call_llm, Messages
|
7
|
+
|
8
|
+
|
9
|
+
class Agent:
|
10
|
+
def __init__(self, system_prompt: str, tools: list):
|
11
|
+
self.system_prompt = system_prompt
|
12
|
+
self.tools = tools
|
13
|
+
self.tool_registry = create_tool_registry(tools)
|
14
|
+
|
15
|
+
# Create nodes
|
16
|
+
self.llm_node = self._create_llm_node()
|
17
|
+
self.tool_node = self._create_tool_node()
|
18
|
+
self.answer_node = self._create_answer_node()
|
19
|
+
|
20
|
+
# Set up flow
|
21
|
+
self.llm_node - "tool_node" >> self.tool_node
|
22
|
+
self.tool_node - "llm_node" >> self.llm_node
|
23
|
+
self.llm_node - "answer_node" >> self.answer_node
|
24
|
+
|
25
|
+
self.flow = Flow(self.llm_node)
|
26
|
+
|
27
|
+
def _create_llm_node(self):
|
28
|
+
class LLMNode(Node):
|
29
|
+
def __init__(self, system_prompt, tools):
|
30
|
+
super().__init__()
|
31
|
+
self.system_prompt = system_prompt
|
32
|
+
self.tools = tools
|
33
|
+
|
34
|
+
def prep(self, shared):
|
35
|
+
return shared["messages"]
|
36
|
+
|
37
|
+
def exec(self, messages) -> Messages:
|
38
|
+
model = "qwen/qwen3-235b-a22b-2507"
|
39
|
+
response = call_llm(model=model, messages=messages, tools=self.tools)
|
40
|
+
messages.add_response_message(response)
|
41
|
+
return messages
|
42
|
+
|
43
|
+
def post(self, shared, prep_res, messages):
|
44
|
+
if messages.has_pending_tool_calls():
|
45
|
+
return "tool_node"
|
46
|
+
else:
|
47
|
+
return "answer_node"
|
48
|
+
|
49
|
+
return LLMNode(self.system_prompt, self.tools)
|
50
|
+
|
51
|
+
def _create_tool_node(self):
|
52
|
+
class ToolNode(Node):
|
53
|
+
def __init__(self, tool_registry):
|
54
|
+
super().__init__()
|
55
|
+
self.tool_registry = tool_registry
|
56
|
+
|
57
|
+
def prep(self, shared):
|
58
|
+
return shared["messages"]
|
59
|
+
|
60
|
+
def exec(self, messages) -> Messages:
|
61
|
+
tool_responses = execute_pending_tool_calls(
|
62
|
+
messages, self.tool_registry
|
63
|
+
)
|
64
|
+
messages.add_tool_responses(tool_responses)
|
65
|
+
return messages
|
66
|
+
|
67
|
+
def post(self, shared, prep_res, messages):
|
68
|
+
return "llm_node"
|
69
|
+
|
70
|
+
return ToolNode(self.tool_registry)
|
71
|
+
|
72
|
+
def _create_answer_node(self):
|
73
|
+
class AnswerNode(Node):
|
74
|
+
def prep(self, shared):
|
75
|
+
messages = shared["messages"]
|
76
|
+
shared["answer"] = messages.get_messages()[-1]["content"]
|
77
|
+
return messages
|
78
|
+
|
79
|
+
return AnswerNode()
|
80
|
+
|
81
|
+
def invoke(self, user_prompt: str) -> str:
|
82
|
+
messages = Messages(user_prompt=user_prompt)
|
83
|
+
if self.system_prompt:
|
84
|
+
messages.add_system_message(self.system_prompt)
|
85
|
+
|
86
|
+
shared = {"messages": messages}
|
87
|
+
self.flow.run(shared)
|
88
|
+
|
89
|
+
return shared["answer"]
|
@@ -2,6 +2,7 @@
|
|
2
2
|
Utilities for working with tools across different formats.
|
3
3
|
"""
|
4
4
|
|
5
|
+
import json
|
5
6
|
from typing import Dict, Any, List
|
6
7
|
|
7
8
|
|
@@ -206,3 +207,14 @@ def execute_and_add_tool_responses(
|
|
206
207
|
messages.add_tool_responses(results)
|
207
208
|
|
208
209
|
return results
|
210
|
+
|
211
|
+
|
212
|
+
def print_tool(tool) -> None:
|
213
|
+
"""
|
214
|
+
Print a single LangChain tool in OpenAI format in a readable JSON structure.
|
215
|
+
|
216
|
+
Args:
|
217
|
+
tool: A langchain_core.tools.structured.StructuredTool instance
|
218
|
+
"""
|
219
|
+
openai_tool = langchain_tool_to_openai_format(tool)
|
220
|
+
print(json.dumps(openai_tool, indent=2))
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{agentic_blocks-0.1.15 → agentic_blocks-0.1.17}/src/agentic_blocks.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|