agentic-blocks 0.1.4__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -14,7 +14,7 @@ agentic_blocks = []
14
14
 
15
15
  [project]
16
16
  name = "agentic-blocks"
17
- version = "0.1.4"
17
+ version = "0.1.5"
18
18
  description = "Simple building blocks for agentic AI systems with MCP client and conversation management"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.11"
@@ -24,7 +24,7 @@ def call_llm(
24
24
  model: str = "gpt-4o-mini",
25
25
  base_url: Optional[str] = None,
26
26
  **kwargs,
27
- ) -> str:
27
+ ) -> Any:
28
28
  """
29
29
  Call an LLM completion API with the provided messages.
30
30
 
@@ -37,7 +37,7 @@ def call_llm(
37
37
  **kwargs: Additional parameters to pass to OpenAI API
38
38
 
39
39
  Returns:
40
- The assistant's response content as a string
40
+ The complete message object from the OpenAI API response
41
41
 
42
42
  Raises:
43
43
  LLMError: If API call fails or configuration is invalid
@@ -86,8 +86,8 @@ def call_llm(
86
86
  # Make completion request
87
87
  response = client.chat.completions.create(**completion_params)
88
88
 
89
- # Extract and return response content
90
- return response.choices[0].message.content or ""
89
+ # Return the complete message object
90
+ return response.choices[0].message
91
91
 
92
92
  except Exception as e:
93
93
  raise LLMError(f"Failed to call LLM API: {e}")
@@ -132,12 +132,17 @@ def example_usage():
132
132
  # Call with Messages object
133
133
  print("Using Messages object:")
134
134
  response1 = call_llm(messages_obj, temperature=0.7)
135
- print(f"Response: {response1}")
135
+ print(f"Response: {response1.content}")
136
136
 
137
137
  # Call with raw message list
138
138
  print("\nUsing raw message list:")
139
139
  response2 = call_llm(messages_list, tools=tools, temperature=0.5)
140
- print(f"Response: {response2}")
140
+ if hasattr(response2, 'tool_calls') and response2.tool_calls:
141
+ print(f"Tool calls requested: {len(response2.tool_calls)}")
142
+ for i, tool_call in enumerate(response2.tool_calls):
143
+ print(f" {i+1}. {tool_call.function.name}({tool_call.function.arguments})")
144
+ else:
145
+ print(f"Response: {response2.content}")
141
146
 
142
147
  except LLMError as e:
143
148
  print(f"Error: {e}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
File without changes
File without changes
File without changes