agentic-blocks 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agentic_blocks/llm.py CHANGED
@@ -24,7 +24,7 @@ def call_llm(
24
24
  model: str = "gpt-4o-mini",
25
25
  base_url: Optional[str] = None,
26
26
  **kwargs,
27
- ) -> str:
27
+ ) -> Any:
28
28
  """
29
29
  Call an LLM completion API with the provided messages.
30
30
 
@@ -37,7 +37,7 @@ def call_llm(
37
37
  **kwargs: Additional parameters to pass to OpenAI API
38
38
 
39
39
  Returns:
40
- The assistant's response content as a string
40
+ The complete message object from the OpenAI API response
41
41
 
42
42
  Raises:
43
43
  LLMError: If API call fails or configuration is invalid
@@ -48,12 +48,20 @@ def call_llm(
48
48
  # Get API key
49
49
  if not api_key:
50
50
  api_key = os.getenv("OPENAI_API_KEY")
51
+ if not api_key:
52
+ api_key = os.getenv("OPENROUTER_API_KEY")
51
53
 
52
54
  if not api_key and not base_url:
53
55
  raise LLMError(
54
- "OpenAI API key not found. Set OPENAI_API_KEY environment variable or pass api_key parameter."
56
+ "API key not found. Set OPENROUTER_API_KEY or OPENAI_API_KEY environment variable or pass api_key parameter."
55
57
  )
56
58
 
59
+ if api_key and api_key.startswith("sk-or"):
60
+ base_url = "https://openrouter.ai/api/v1"
61
+
62
+ if base_url and not api_key:
63
+ api_key = "EMPTY"
64
+
57
65
  # Initialize OpenAI client
58
66
  client_kwargs = {}
59
67
  if api_key:
@@ -86,8 +94,8 @@ def call_llm(
86
94
  # Make completion request
87
95
  response = client.chat.completions.create(**completion_params)
88
96
 
89
- # Extract and return response content
90
- return response.choices[0].message.content or ""
97
+ # Return the complete message object
98
+ return response.choices[0].message
91
99
 
92
100
  except Exception as e:
93
101
  raise LLMError(f"Failed to call LLM API: {e}")
@@ -132,12 +140,19 @@ def example_usage():
132
140
  # Call with Messages object
133
141
  print("Using Messages object:")
134
142
  response1 = call_llm(messages_obj, temperature=0.7)
135
- print(f"Response: {response1}")
143
+ print(f"Response: {response1.content}")
136
144
 
137
145
  # Call with raw message list
138
146
  print("\nUsing raw message list:")
139
147
  response2 = call_llm(messages_list, tools=tools, temperature=0.5)
140
- print(f"Response: {response2}")
148
+ if hasattr(response2, "tool_calls") and response2.tool_calls:
149
+ print(f"Tool calls requested: {len(response2.tool_calls)}")
150
+ for i, tool_call in enumerate(response2.tool_calls):
151
+ print(
152
+ f" {i + 1}. {tool_call.function.name}({tool_call.function.arguments})"
153
+ )
154
+ else:
155
+ print(f"Response: {response2.content}")
141
156
 
142
157
  except LLMError as e:
143
158
  print(f"Error: {e}")
@@ -125,6 +125,35 @@ class Messages:
125
125
 
126
126
  self.add_tool_response(tool_call_id, content)
127
127
 
128
+ def add_response_message(self, message):
129
+ """
130
+ Add a response message (ChatCompletionMessage) to the conversation.
131
+
132
+ Args:
133
+ message: A ChatCompletionMessage object with role, content, and potentially tool_calls
134
+ """
135
+ # Convert the ChatCompletionMessage to a dictionary format
136
+ msg_dict = {"role": message.role}
137
+
138
+ # Add content (handle None case)
139
+ msg_dict["content"] = message.content or ""
140
+
141
+ # Add tool_calls if present
142
+ if message.tool_calls:
143
+ msg_dict["tool_calls"] = []
144
+ for tool_call in message.tool_calls:
145
+ tool_call_dict = {
146
+ "id": tool_call.id,
147
+ "type": tool_call.type,
148
+ "function": {
149
+ "name": tool_call.function.name,
150
+ "arguments": tool_call.function.arguments
151
+ }
152
+ }
153
+ msg_dict["tool_calls"].append(tool_call_dict)
154
+
155
+ self.messages.append(msg_dict)
156
+
128
157
  def get_messages(self) -> List[Dict[str, Any]]:
129
158
  """Get the current messages list."""
130
159
  return self.messages
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.4
3
+ Version: 0.1.6
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -0,0 +1,9 @@
1
+ agentic_blocks/__init__.py,sha256=LJy2tzTwX9ZjPw8dqkXOWiude7ZDDIaBIvaLC8U4d_Y,435
2
+ agentic_blocks/llm.py,sha256=CznQ5iNFz_nQsGqjSmtZbCz1YyL6ha1qvnaoFOwsJtk,4868
3
+ agentic_blocks/mcp_client.py,sha256=15mIN_Qw0OVNJAvfgO3jVZS4-AU4TtvEQSFDlL9ruqA,9773
4
+ agentic_blocks/messages.py,sha256=VKfqetR0mKrOnqKxzVzqm42qEKTlK0YNytoy_Oel2Wc,9224
5
+ agentic_blocks-0.1.6.dist-info/licenses/LICENSE,sha256=r4IcBaAjTv3-yfjXgDPuRD953Qci0Y0nQn5JfHwLyBY,1073
6
+ agentic_blocks-0.1.6.dist-info/METADATA,sha256=62GDddF1n1iSVGjlsbJRBHdYDJ1kPjFElsHU8wWb-aw,9445
7
+ agentic_blocks-0.1.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
+ agentic_blocks-0.1.6.dist-info/top_level.txt,sha256=-1a4RAemqicXLU1rRzw4QHV3KlNeQDNxVs3m2gAT238,15
9
+ agentic_blocks-0.1.6.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- agentic_blocks/__init__.py,sha256=LJy2tzTwX9ZjPw8dqkXOWiude7ZDDIaBIvaLC8U4d_Y,435
2
- agentic_blocks/llm.py,sha256=uXNowV-uGq7G9igqgR-AgERhsYv_myTXFzOUeMjERc8,4253
3
- agentic_blocks/mcp_client.py,sha256=15mIN_Qw0OVNJAvfgO3jVZS4-AU4TtvEQSFDlL9ruqA,9773
4
- agentic_blocks/messages.py,sha256=rwcb_goGwfPiDIzl6Up46pAl-5Kw5aFb0uqd6pQtJPY,8162
5
- agentic_blocks-0.1.4.dist-info/licenses/LICENSE,sha256=r4IcBaAjTv3-yfjXgDPuRD953Qci0Y0nQn5JfHwLyBY,1073
6
- agentic_blocks-0.1.4.dist-info/METADATA,sha256=ec0HsLSkTeR91sqeNL8rwokCFe8VksPEcrRZ-kO-8Do,9445
7
- agentic_blocks-0.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
- agentic_blocks-0.1.4.dist-info/top_level.txt,sha256=-1a4RAemqicXLU1rRzw4QHV3KlNeQDNxVs3m2gAT238,15
9
- agentic_blocks-0.1.4.dist-info/RECORD,,