agentic-blocks 0.1.16__tar.gz → 0.1.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.16
3
+ Version: 0.1.18
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -14,7 +14,7 @@ agentic_blocks = []
14
14
 
15
15
  [project]
16
16
  name = "agentic-blocks"
17
- version = "0.1.16"
17
+ version = "0.1.18"
18
18
  description = "Simple building blocks for agentic AI systems with MCP client and conversation management"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.11"
@@ -0,0 +1,88 @@
1
+ from pocketflow import Node, Flow
2
+ from agentic_blocks.utils.tools_utils import (
3
+ create_tool_registry,
4
+ execute_pending_tool_calls,
5
+ )
6
+ from agentic_blocks import call_llm, Messages
7
+
8
+
9
+ class Agent:
10
+ def __init__(self, system_prompt: str, tools: list):
11
+ self.system_prompt = system_prompt
12
+ self.tools = tools
13
+ self.tool_registry = create_tool_registry(tools)
14
+
15
+ # Create nodes
16
+ self.llm_node = self._create_llm_node()
17
+ self.tool_node = self._create_tool_node()
18
+ self.answer_node = self._create_answer_node()
19
+
20
+ # Set up flow
21
+ self.llm_node - "tool_node" >> self.tool_node
22
+ self.tool_node - "llm_node" >> self.llm_node
23
+ self.llm_node - "answer_node" >> self.answer_node
24
+
25
+ self.flow = Flow(self.llm_node)
26
+
27
+ def _create_llm_node(self):
28
+ class LLMNode(Node):
29
+ def __init__(self, system_prompt, tools):
30
+ super().__init__()
31
+ self.system_prompt = system_prompt
32
+ self.tools = tools
33
+
34
+ def prep(self, shared):
35
+ return shared["messages"]
36
+
37
+ def exec(self, messages) -> Messages:
38
+ response = call_llm(messages=messages, tools=self.tools)
39
+ messages.add_response_message(response)
40
+ return messages
41
+
42
+ def post(self, shared, prep_res, messages):
43
+ if messages.has_pending_tool_calls():
44
+ return "tool_node"
45
+ else:
46
+ return "answer_node"
47
+
48
+ return LLMNode(self.system_prompt, self.tools)
49
+
50
+ def _create_tool_node(self):
51
+ class ToolNode(Node):
52
+ def __init__(self, tool_registry):
53
+ super().__init__()
54
+ self.tool_registry = tool_registry
55
+
56
+ def prep(self, shared):
57
+ return shared["messages"]
58
+
59
+ def exec(self, messages) -> Messages:
60
+ tool_responses = execute_pending_tool_calls(
61
+ messages, self.tool_registry
62
+ )
63
+ messages.add_tool_responses(tool_responses)
64
+ return messages
65
+
66
+ def post(self, shared, prep_res, messages):
67
+ return "llm_node"
68
+
69
+ return ToolNode(self.tool_registry)
70
+
71
+ def _create_answer_node(self):
72
+ class AnswerNode(Node):
73
+ def prep(self, shared):
74
+ messages = shared["messages"]
75
+ shared["answer"] = messages.get_messages()[-1]["content"]
76
+ return messages
77
+
78
+ return AnswerNode()
79
+
80
+ def invoke(self, user_prompt: str) -> str:
81
+ messages = Messages(user_prompt=user_prompt)
82
+ if self.system_prompt:
83
+ messages.add_system_message(self.system_prompt)
84
+
85
+ shared = {"messages": messages}
86
+ self.flow.run(shared)
87
+
88
+ return shared["answer"]
@@ -22,7 +22,7 @@ def call_llm(
22
22
  messages: Union[Messages, List[Dict[str, Any]]],
23
23
  tools: Optional[Union[List[Dict[str, Any]], List]] = None,
24
24
  api_key: Optional[str] = None,
25
- model: str = "gpt-4o-mini",
25
+ model: Optional[str] = None,
26
26
  base_url: Optional[str] = None,
27
27
  **kwargs,
28
28
  ) -> Any:
@@ -52,6 +52,11 @@ def call_llm(
52
52
  if not api_key:
53
53
  api_key = os.getenv("OPENROUTER_API_KEY")
54
54
 
55
+ if not base_url:
56
+ base_url = os.getenv("BASE_URL")
57
+ if not model:
58
+ model = os.getenv("MODEL_ID")
59
+
55
60
  if not api_key and not base_url:
56
61
  raise LLMError(
57
62
  "API key not found. Set OPENROUTER_API_KEY or OPENAI_API_KEY environment variable or pass api_key parameter."
@@ -64,12 +69,7 @@ def call_llm(
64
69
  api_key = "EMPTY"
65
70
 
66
71
  # Initialize OpenAI client
67
- client_kwargs = {}
68
- if api_key:
69
- client_kwargs["api_key"] = api_key
70
- if base_url:
71
- client_kwargs["base_url"] = base_url
72
- client = OpenAI(**client_kwargs)
72
+ client = OpenAI(api_key=api_key, base_url=base_url)
73
73
 
74
74
  # Handle different message input types
75
75
  if isinstance(messages, Messages):
@@ -84,7 +84,7 @@ def call_llm(
84
84
  openai_tools = None
85
85
  if tools:
86
86
  # Check if it's a list of LangChain StructuredTools
87
- if tools and hasattr(tools[0], 'args_schema'):
87
+ if tools and hasattr(tools[0], "args_schema"):
88
88
  openai_tools = langchain_tools_to_openai_format(tools)
89
89
  else:
90
90
  openai_tools = tools
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.16
3
+ Version: 0.1.18
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -2,6 +2,7 @@ LICENSE
2
2
  README.md
3
3
  pyproject.toml
4
4
  src/agentic_blocks/__init__.py
5
+ src/agentic_blocks/agent.py
5
6
  src/agentic_blocks/llm.py
6
7
  src/agentic_blocks/mcp_client.py
7
8
  src/agentic_blocks/messages.py
File without changes