agentic-blocks 0.1.1__tar.gz → 0.1.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agentic_blocks-0.1.1/src/agentic_blocks.egg-info → agentic_blocks-0.1.3}/PKG-INFO +87 -8
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/README.md +86 -7
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/pyproject.toml +1 -1
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/src/agentic_blocks/__init__.py +2 -1
- agentic_blocks-0.1.3/src/agentic_blocks/llm.py +140 -0
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3/src/agentic_blocks.egg-info}/PKG-INFO +87 -8
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/src/agentic_blocks.egg-info/SOURCES.txt +1 -0
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/LICENSE +0 -0
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/setup.cfg +0 -0
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/src/agentic_blocks/mcp_client.py +0 -0
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/src/agentic_blocks/messages.py +0 -0
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/src/agentic_blocks.egg-info/dependency_links.txt +0 -0
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/src/agentic_blocks.egg-info/requires.txt +0 -0
- {agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/src/agentic_blocks.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: agentic-blocks
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.3
|
4
4
|
Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
|
5
5
|
Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
|
6
6
|
License: MIT
|
@@ -41,8 +41,9 @@ Agentic Blocks provides clean, simple components for building AI agent systems,
|
|
41
41
|
|
42
42
|
- **MCP Client**: Connect to Model Control Protocol (MCP) endpoints with a sync-by-default API
|
43
43
|
- **Messages**: Manage LLM conversation history with OpenAI-compatible format
|
44
|
+
- **LLM**: Simple function for calling OpenAI-compatible completion APIs
|
44
45
|
|
45
|
-
|
46
|
+
All components follow principles of simplicity, maintainability, and ease of use.
|
46
47
|
|
47
48
|
## Installation
|
48
49
|
|
@@ -123,10 +124,73 @@ conversation = messages.get_messages()
|
|
123
124
|
print(messages)
|
124
125
|
```
|
125
126
|
|
126
|
-
|
127
|
+
### LLM - Call OpenAI-Compatible APIs
|
128
|
+
|
129
|
+
The `call_llm` function provides a simple interface for calling LLM completion APIs:
|
130
|
+
|
131
|
+
```python
|
132
|
+
from agentic_blocks import call_llm, Messages
|
133
|
+
|
134
|
+
# Method 1: Using with Messages object
|
135
|
+
messages = Messages(
|
136
|
+
system_prompt="You are a helpful assistant.",
|
137
|
+
user_prompt="What is the capital of France?"
|
138
|
+
)
|
139
|
+
|
140
|
+
response = call_llm(messages, temperature=0.7)
|
141
|
+
print(response) # "The capital of France is Paris."
|
142
|
+
```
|
143
|
+
|
144
|
+
```python
|
145
|
+
# Method 2: Using with raw message list
|
146
|
+
messages_list = [
|
147
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
148
|
+
{"role": "user", "content": "What is 2+2?"}
|
149
|
+
]
|
150
|
+
|
151
|
+
response = call_llm(messages_list, model="gpt-4o-mini")
|
152
|
+
print(response) # "2+2 equals 4."
|
153
|
+
```
|
127
154
|
|
128
155
|
```python
|
129
|
-
|
156
|
+
# Method 3: Using with tools (for function calling)
|
157
|
+
tools = [
|
158
|
+
{
|
159
|
+
"type": "function",
|
160
|
+
"function": {
|
161
|
+
"name": "get_weather",
|
162
|
+
"description": "Get current weather for a location",
|
163
|
+
"parameters": {
|
164
|
+
"type": "object",
|
165
|
+
"properties": {
|
166
|
+
"location": {"type": "string", "description": "City name"}
|
167
|
+
},
|
168
|
+
"required": ["location"]
|
169
|
+
}
|
170
|
+
}
|
171
|
+
}
|
172
|
+
]
|
173
|
+
|
174
|
+
messages = Messages(user_prompt="What's the weather like in Stockholm?")
|
175
|
+
response = call_llm(messages, tools=tools)
|
176
|
+
print(response)
|
177
|
+
```
|
178
|
+
|
179
|
+
**Environment Setup:**
|
180
|
+
Create a `.env` file in your project root:
|
181
|
+
```
|
182
|
+
OPENAI_API_KEY=your_api_key_here
|
183
|
+
```
|
184
|
+
|
185
|
+
Or pass the API key directly:
|
186
|
+
```python
|
187
|
+
response = call_llm(messages, api_key="your_api_key_here")
|
188
|
+
```
|
189
|
+
|
190
|
+
## Complete Example - Agent with MCP Tools and LLM
|
191
|
+
|
192
|
+
```python
|
193
|
+
from agentic_blocks import MCPClient, Messages, call_llm
|
130
194
|
|
131
195
|
def simple_agent():
|
132
196
|
# Initialize MCP client and conversation
|
@@ -170,10 +234,10 @@ def simple_agent():
|
|
170
234
|
response_text = result["content"][0]["text"]
|
171
235
|
messages.add_tool_response("search_001", response_text)
|
172
236
|
|
173
|
-
#
|
174
|
-
messages.
|
175
|
-
|
176
|
-
)
|
237
|
+
# Use LLM to generate response based on search results
|
238
|
+
messages.add_user_message("Based on the search results, please summarize the key AI news.")
|
239
|
+
llm_response = call_llm(messages, temperature=0.7)
|
240
|
+
messages.add_assistant_message(llm_response)
|
177
241
|
|
178
242
|
# Print conversation
|
179
243
|
print("\\nConversation:")
|
@@ -224,6 +288,21 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
|
|
224
288
|
- `get_messages() -> List[Dict]`: Get all messages
|
225
289
|
- `has_pending_tool_calls() -> bool`: Check for pending tool calls
|
226
290
|
|
291
|
+
### call_llm
|
292
|
+
|
293
|
+
```python
|
294
|
+
call_llm(messages, tools=None, api_key=None, model="gpt-4o-mini", **kwargs) -> str
|
295
|
+
```
|
296
|
+
|
297
|
+
**Parameters:**
|
298
|
+
- `messages`: Either a `Messages` instance or list of message dictionaries
|
299
|
+
- `tools`: Optional list of tools in OpenAI function calling format
|
300
|
+
- `api_key`: OpenAI API key (defaults to OPENAI_API_KEY from .env)
|
301
|
+
- `model`: Model name to use for completion
|
302
|
+
- `**kwargs`: Additional parameters passed to OpenAI API (temperature, max_tokens, etc.)
|
303
|
+
|
304
|
+
**Returns:** The assistant's response content as a string
|
305
|
+
|
227
306
|
## Requirements
|
228
307
|
|
229
308
|
- Python >= 3.11
|
@@ -8,8 +8,9 @@ Agentic Blocks provides clean, simple components for building AI agent systems,
|
|
8
8
|
|
9
9
|
- **MCP Client**: Connect to Model Control Protocol (MCP) endpoints with a sync-by-default API
|
10
10
|
- **Messages**: Manage LLM conversation history with OpenAI-compatible format
|
11
|
+
- **LLM**: Simple function for calling OpenAI-compatible completion APIs
|
11
12
|
|
12
|
-
|
13
|
+
All components follow principles of simplicity, maintainability, and ease of use.
|
13
14
|
|
14
15
|
## Installation
|
15
16
|
|
@@ -90,10 +91,73 @@ conversation = messages.get_messages()
|
|
90
91
|
print(messages)
|
91
92
|
```
|
92
93
|
|
93
|
-
|
94
|
+
### LLM - Call OpenAI-Compatible APIs
|
95
|
+
|
96
|
+
The `call_llm` function provides a simple interface for calling LLM completion APIs:
|
97
|
+
|
98
|
+
```python
|
99
|
+
from agentic_blocks import call_llm, Messages
|
100
|
+
|
101
|
+
# Method 1: Using with Messages object
|
102
|
+
messages = Messages(
|
103
|
+
system_prompt="You are a helpful assistant.",
|
104
|
+
user_prompt="What is the capital of France?"
|
105
|
+
)
|
106
|
+
|
107
|
+
response = call_llm(messages, temperature=0.7)
|
108
|
+
print(response) # "The capital of France is Paris."
|
109
|
+
```
|
110
|
+
|
111
|
+
```python
|
112
|
+
# Method 2: Using with raw message list
|
113
|
+
messages_list = [
|
114
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
115
|
+
{"role": "user", "content": "What is 2+2?"}
|
116
|
+
]
|
117
|
+
|
118
|
+
response = call_llm(messages_list, model="gpt-4o-mini")
|
119
|
+
print(response) # "2+2 equals 4."
|
120
|
+
```
|
94
121
|
|
95
122
|
```python
|
96
|
-
|
123
|
+
# Method 3: Using with tools (for function calling)
|
124
|
+
tools = [
|
125
|
+
{
|
126
|
+
"type": "function",
|
127
|
+
"function": {
|
128
|
+
"name": "get_weather",
|
129
|
+
"description": "Get current weather for a location",
|
130
|
+
"parameters": {
|
131
|
+
"type": "object",
|
132
|
+
"properties": {
|
133
|
+
"location": {"type": "string", "description": "City name"}
|
134
|
+
},
|
135
|
+
"required": ["location"]
|
136
|
+
}
|
137
|
+
}
|
138
|
+
}
|
139
|
+
]
|
140
|
+
|
141
|
+
messages = Messages(user_prompt="What's the weather like in Stockholm?")
|
142
|
+
response = call_llm(messages, tools=tools)
|
143
|
+
print(response)
|
144
|
+
```
|
145
|
+
|
146
|
+
**Environment Setup:**
|
147
|
+
Create a `.env` file in your project root:
|
148
|
+
```
|
149
|
+
OPENAI_API_KEY=your_api_key_here
|
150
|
+
```
|
151
|
+
|
152
|
+
Or pass the API key directly:
|
153
|
+
```python
|
154
|
+
response = call_llm(messages, api_key="your_api_key_here")
|
155
|
+
```
|
156
|
+
|
157
|
+
## Complete Example - Agent with MCP Tools and LLM
|
158
|
+
|
159
|
+
```python
|
160
|
+
from agentic_blocks import MCPClient, Messages, call_llm
|
97
161
|
|
98
162
|
def simple_agent():
|
99
163
|
# Initialize MCP client and conversation
|
@@ -137,10 +201,10 @@ def simple_agent():
|
|
137
201
|
response_text = result["content"][0]["text"]
|
138
202
|
messages.add_tool_response("search_001", response_text)
|
139
203
|
|
140
|
-
#
|
141
|
-
messages.
|
142
|
-
|
143
|
-
)
|
204
|
+
# Use LLM to generate response based on search results
|
205
|
+
messages.add_user_message("Based on the search results, please summarize the key AI news.")
|
206
|
+
llm_response = call_llm(messages, temperature=0.7)
|
207
|
+
messages.add_assistant_message(llm_response)
|
144
208
|
|
145
209
|
# Print conversation
|
146
210
|
print("\\nConversation:")
|
@@ -191,6 +255,21 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
|
|
191
255
|
- `get_messages() -> List[Dict]`: Get all messages
|
192
256
|
- `has_pending_tool_calls() -> bool`: Check for pending tool calls
|
193
257
|
|
258
|
+
### call_llm
|
259
|
+
|
260
|
+
```python
|
261
|
+
call_llm(messages, tools=None, api_key=None, model="gpt-4o-mini", **kwargs) -> str
|
262
|
+
```
|
263
|
+
|
264
|
+
**Parameters:**
|
265
|
+
- `messages`: Either a `Messages` instance or list of message dictionaries
|
266
|
+
- `tools`: Optional list of tools in OpenAI function calling format
|
267
|
+
- `api_key`: OpenAI API key (defaults to OPENAI_API_KEY from .env)
|
268
|
+
- `model`: Model name to use for completion
|
269
|
+
- `**kwargs`: Additional parameters passed to OpenAI API (temperature, max_tokens, etc.)
|
270
|
+
|
271
|
+
**Returns:** The assistant's response content as a string
|
272
|
+
|
194
273
|
## Requirements
|
195
274
|
|
196
275
|
- Python >= 3.11
|
@@ -14,7 +14,7 @@ agentic_blocks = []
|
|
14
14
|
|
15
15
|
[project]
|
16
16
|
name = "agentic-blocks"
|
17
|
-
version = "0.1.
|
17
|
+
version = "0.1.3"
|
18
18
|
description = "Simple building blocks for agentic AI systems with MCP client and conversation management"
|
19
19
|
readme = "README.md"
|
20
20
|
requires-python = ">=3.11"
|
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
from .mcp_client import MCPClient, MCPEndpointError
|
4
4
|
from .messages import Messages
|
5
|
+
from .llm import call_llm, LLMError
|
5
6
|
|
6
7
|
# Get version from package metadata
|
7
8
|
try:
|
@@ -10,4 +11,4 @@ try:
|
|
10
11
|
except Exception:
|
11
12
|
__version__ = "unknown"
|
12
13
|
|
13
|
-
__all__ = ["MCPClient", "MCPEndpointError", "Messages"]
|
14
|
+
__all__ = ["MCPClient", "MCPEndpointError", "Messages", "call_llm", "LLMError"]
|
@@ -0,0 +1,140 @@
|
|
1
|
+
"""
|
2
|
+
Simplified LLM client for calling completion APIs.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from typing import List, Dict, Any, Optional, Union
|
7
|
+
|
8
|
+
from dotenv import load_dotenv
|
9
|
+
from openai import OpenAI
|
10
|
+
|
11
|
+
from agentic_blocks.messages import Messages
|
12
|
+
|
13
|
+
|
14
|
+
class LLMError(Exception):
|
15
|
+
"""Exception raised when there's an error calling the LLM API."""
|
16
|
+
|
17
|
+
pass
|
18
|
+
|
19
|
+
|
20
|
+
def call_llm(
|
21
|
+
messages: Union[Messages, List[Dict[str, Any]]],
|
22
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
23
|
+
api_key: Optional[str] = None,
|
24
|
+
model: str = "gpt-4o-mini",
|
25
|
+
**kwargs,
|
26
|
+
) -> str:
|
27
|
+
"""
|
28
|
+
Call an LLM completion API with the provided messages.
|
29
|
+
|
30
|
+
Args:
|
31
|
+
messages: Either a Messages instance or a list of message dicts
|
32
|
+
tools: Optional list of tools in OpenAI function calling format
|
33
|
+
api_key: OpenAI API key (if not provided, loads from .env OPENAI_API_KEY)
|
34
|
+
model: Model name to use for completion
|
35
|
+
**kwargs: Additional parameters to pass to OpenAI API
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
The assistant's response content as a string
|
39
|
+
|
40
|
+
Raises:
|
41
|
+
LLMError: If API call fails or configuration is invalid
|
42
|
+
"""
|
43
|
+
# Load environment variables
|
44
|
+
load_dotenv()
|
45
|
+
|
46
|
+
# Get API key
|
47
|
+
if not api_key:
|
48
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
49
|
+
|
50
|
+
if not api_key:
|
51
|
+
raise LLMError(
|
52
|
+
"OpenAI API key not found. Set OPENAI_API_KEY environment variable or pass api_key parameter."
|
53
|
+
)
|
54
|
+
|
55
|
+
# Initialize OpenAI client
|
56
|
+
client = OpenAI(api_key=api_key)
|
57
|
+
|
58
|
+
# Handle different message input types
|
59
|
+
if isinstance(messages, Messages):
|
60
|
+
conversation_messages = messages.get_messages()
|
61
|
+
else:
|
62
|
+
conversation_messages = messages
|
63
|
+
|
64
|
+
if not conversation_messages:
|
65
|
+
raise LLMError("No messages provided for completion.")
|
66
|
+
|
67
|
+
try:
|
68
|
+
# Prepare completion parameters
|
69
|
+
completion_params = {
|
70
|
+
"model": model,
|
71
|
+
"messages": conversation_messages,
|
72
|
+
**kwargs,
|
73
|
+
}
|
74
|
+
|
75
|
+
if tools:
|
76
|
+
completion_params["tools"] = tools
|
77
|
+
completion_params["tool_choice"] = "auto"
|
78
|
+
|
79
|
+
# Make completion request
|
80
|
+
response = client.chat.completions.create(**completion_params)
|
81
|
+
|
82
|
+
# Extract and return response content
|
83
|
+
return response.choices[0].message.content or ""
|
84
|
+
|
85
|
+
except Exception as e:
|
86
|
+
raise LLMError(f"Failed to call LLM API: {e}")
|
87
|
+
|
88
|
+
|
89
|
+
def example_usage():
|
90
|
+
"""Example of how to use the call_llm function."""
|
91
|
+
# Example 1: Using with Messages object
|
92
|
+
messages_obj = Messages(
|
93
|
+
system_prompt="You are a helpful assistant.",
|
94
|
+
user_prompt="What is the capital of France?",
|
95
|
+
)
|
96
|
+
|
97
|
+
# Example 2: Using with raw message list
|
98
|
+
messages_list = [
|
99
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
100
|
+
{"role": "user", "content": "What is the capital of France?"},
|
101
|
+
]
|
102
|
+
|
103
|
+
# Example tools
|
104
|
+
tools = [
|
105
|
+
{
|
106
|
+
"type": "function",
|
107
|
+
"function": {
|
108
|
+
"name": "get_weather",
|
109
|
+
"description": "Get current weather for a location",
|
110
|
+
"parameters": {
|
111
|
+
"type": "object",
|
112
|
+
"properties": {
|
113
|
+
"location": {
|
114
|
+
"type": "string",
|
115
|
+
"description": "City and state, e.g. San Francisco, CA",
|
116
|
+
}
|
117
|
+
},
|
118
|
+
"required": ["location"],
|
119
|
+
},
|
120
|
+
},
|
121
|
+
}
|
122
|
+
]
|
123
|
+
|
124
|
+
try:
|
125
|
+
# Call with Messages object
|
126
|
+
print("Using Messages object:")
|
127
|
+
response1 = call_llm(messages_obj, temperature=0.7)
|
128
|
+
print(f"Response: {response1}")
|
129
|
+
|
130
|
+
# Call with raw message list
|
131
|
+
print("\nUsing raw message list:")
|
132
|
+
response2 = call_llm(messages_list, tools=tools, temperature=0.5)
|
133
|
+
print(f"Response: {response2}")
|
134
|
+
|
135
|
+
except LLMError as e:
|
136
|
+
print(f"Error: {e}")
|
137
|
+
|
138
|
+
|
139
|
+
if __name__ == "__main__":
|
140
|
+
example_usage()
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: agentic-blocks
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.3
|
4
4
|
Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
|
5
5
|
Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
|
6
6
|
License: MIT
|
@@ -41,8 +41,9 @@ Agentic Blocks provides clean, simple components for building AI agent systems,
|
|
41
41
|
|
42
42
|
- **MCP Client**: Connect to Model Control Protocol (MCP) endpoints with a sync-by-default API
|
43
43
|
- **Messages**: Manage LLM conversation history with OpenAI-compatible format
|
44
|
+
- **LLM**: Simple function for calling OpenAI-compatible completion APIs
|
44
45
|
|
45
|
-
|
46
|
+
All components follow principles of simplicity, maintainability, and ease of use.
|
46
47
|
|
47
48
|
## Installation
|
48
49
|
|
@@ -123,10 +124,73 @@ conversation = messages.get_messages()
|
|
123
124
|
print(messages)
|
124
125
|
```
|
125
126
|
|
126
|
-
|
127
|
+
### LLM - Call OpenAI-Compatible APIs
|
128
|
+
|
129
|
+
The `call_llm` function provides a simple interface for calling LLM completion APIs:
|
130
|
+
|
131
|
+
```python
|
132
|
+
from agentic_blocks import call_llm, Messages
|
133
|
+
|
134
|
+
# Method 1: Using with Messages object
|
135
|
+
messages = Messages(
|
136
|
+
system_prompt="You are a helpful assistant.",
|
137
|
+
user_prompt="What is the capital of France?"
|
138
|
+
)
|
139
|
+
|
140
|
+
response = call_llm(messages, temperature=0.7)
|
141
|
+
print(response) # "The capital of France is Paris."
|
142
|
+
```
|
143
|
+
|
144
|
+
```python
|
145
|
+
# Method 2: Using with raw message list
|
146
|
+
messages_list = [
|
147
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
148
|
+
{"role": "user", "content": "What is 2+2?"}
|
149
|
+
]
|
150
|
+
|
151
|
+
response = call_llm(messages_list, model="gpt-4o-mini")
|
152
|
+
print(response) # "2+2 equals 4."
|
153
|
+
```
|
127
154
|
|
128
155
|
```python
|
129
|
-
|
156
|
+
# Method 3: Using with tools (for function calling)
|
157
|
+
tools = [
|
158
|
+
{
|
159
|
+
"type": "function",
|
160
|
+
"function": {
|
161
|
+
"name": "get_weather",
|
162
|
+
"description": "Get current weather for a location",
|
163
|
+
"parameters": {
|
164
|
+
"type": "object",
|
165
|
+
"properties": {
|
166
|
+
"location": {"type": "string", "description": "City name"}
|
167
|
+
},
|
168
|
+
"required": ["location"]
|
169
|
+
}
|
170
|
+
}
|
171
|
+
}
|
172
|
+
]
|
173
|
+
|
174
|
+
messages = Messages(user_prompt="What's the weather like in Stockholm?")
|
175
|
+
response = call_llm(messages, tools=tools)
|
176
|
+
print(response)
|
177
|
+
```
|
178
|
+
|
179
|
+
**Environment Setup:**
|
180
|
+
Create a `.env` file in your project root:
|
181
|
+
```
|
182
|
+
OPENAI_API_KEY=your_api_key_here
|
183
|
+
```
|
184
|
+
|
185
|
+
Or pass the API key directly:
|
186
|
+
```python
|
187
|
+
response = call_llm(messages, api_key="your_api_key_here")
|
188
|
+
```
|
189
|
+
|
190
|
+
## Complete Example - Agent with MCP Tools and LLM
|
191
|
+
|
192
|
+
```python
|
193
|
+
from agentic_blocks import MCPClient, Messages, call_llm
|
130
194
|
|
131
195
|
def simple_agent():
|
132
196
|
# Initialize MCP client and conversation
|
@@ -170,10 +234,10 @@ def simple_agent():
|
|
170
234
|
response_text = result["content"][0]["text"]
|
171
235
|
messages.add_tool_response("search_001", response_text)
|
172
236
|
|
173
|
-
#
|
174
|
-
messages.
|
175
|
-
|
176
|
-
)
|
237
|
+
# Use LLM to generate response based on search results
|
238
|
+
messages.add_user_message("Based on the search results, please summarize the key AI news.")
|
239
|
+
llm_response = call_llm(messages, temperature=0.7)
|
240
|
+
messages.add_assistant_message(llm_response)
|
177
241
|
|
178
242
|
# Print conversation
|
179
243
|
print("\\nConversation:")
|
@@ -224,6 +288,21 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
|
|
224
288
|
- `get_messages() -> List[Dict]`: Get all messages
|
225
289
|
- `has_pending_tool_calls() -> bool`: Check for pending tool calls
|
226
290
|
|
291
|
+
### call_llm
|
292
|
+
|
293
|
+
```python
|
294
|
+
call_llm(messages, tools=None, api_key=None, model="gpt-4o-mini", **kwargs) -> str
|
295
|
+
```
|
296
|
+
|
297
|
+
**Parameters:**
|
298
|
+
- `messages`: Either a `Messages` instance or list of message dictionaries
|
299
|
+
- `tools`: Optional list of tools in OpenAI function calling format
|
300
|
+
- `api_key`: OpenAI API key (defaults to OPENAI_API_KEY from .env)
|
301
|
+
- `model`: Model name to use for completion
|
302
|
+
- `**kwargs`: Additional parameters passed to OpenAI API (temperature, max_tokens, etc.)
|
303
|
+
|
304
|
+
**Returns:** The assistant's response content as a string
|
305
|
+
|
227
306
|
## Requirements
|
228
307
|
|
229
308
|
- Python >= 3.11
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{agentic_blocks-0.1.1 → agentic_blocks-0.1.3}/src/agentic_blocks.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|