olbrain-python-sdk 0.2.1__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {olbrain_python_sdk-0.2.1/olbrain_python_sdk.egg-info → olbrain_python_sdk-0.3.0}/PKG-INFO +100 -52
  2. olbrain_python_sdk-0.3.0/README.md +251 -0
  3. olbrain_python_sdk-0.3.0/examples/basic_usage.py +149 -0
  4. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/examples/session_management.py +16 -0
  5. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/examples/streaming_responses.py +14 -0
  6. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain/__init__.py +9 -10
  7. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain/client.py +174 -182
  8. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain/session.py +12 -6
  9. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain/streaming.py +11 -0
  10. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0/olbrain_python_sdk.egg-info}/PKG-INFO +100 -52
  11. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/setup.py +3 -3
  12. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/test_agent.py +2 -2
  13. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/tests/test_client.py +60 -40
  14. olbrain_python_sdk-0.3.0/tests/test_session.py +112 -0
  15. olbrain_python_sdk-0.2.1/README.md +0 -203
  16. olbrain_python_sdk-0.2.1/examples/basic_usage.py +0 -163
  17. olbrain_python_sdk-0.2.1/tests/test_session.py +0 -89
  18. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/.gitignore +0 -0
  19. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/LICENSE +0 -0
  20. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/MANIFEST.in +0 -0
  21. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/examples/advanced_features.py +0 -0
  22. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/examples/error_handling.py +0 -0
  23. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain/exceptions.py +0 -0
  24. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain/utils.py +0 -0
  25. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain_python_sdk.egg-info/SOURCES.txt +0 -0
  26. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain_python_sdk.egg-info/dependency_links.txt +0 -0
  27. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain_python_sdk.egg-info/not-zip-safe +0 -0
  28. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain_python_sdk.egg-info/requires.txt +0 -0
  29. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/olbrain_python_sdk.egg-info/top_level.txt +0 -0
  30. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/pyproject.toml +0 -0
  31. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/requirements.txt +0 -0
  32. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/setup.cfg +0 -0
  33. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/tests/__init__.py +0 -0
  34. {olbrain_python_sdk-0.2.1 → olbrain_python_sdk-0.3.0}/tests/test_exceptions.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: olbrain-python-sdk
3
- Version: 0.2.1
3
+ Version: 0.3.0
4
4
  Summary: Official Python SDK for Olbrain AI agents
5
5
  Home-page: https://github.com/Olbrain/olbrain-python-sdk
6
6
  Author: Olbrain Team
@@ -63,7 +63,7 @@ pip install olbrain-python-sdk
63
63
  ## Quick Start
64
64
 
65
65
  ```python
66
- from olbrain import AgentClient
66
+ from olbrain import AgentClient, ChatResponse
67
67
 
68
68
  # Initialize client
69
69
  client = AgentClient(
@@ -71,82 +71,106 @@ client = AgentClient(
71
71
  api_key="sk_live_your_api_key"
72
72
  )
73
73
 
74
- # Create a session and send a message
74
+ # Create a session
75
75
  session_id = client.create_session(title="My Chat")
76
- response = client.send_and_wait(session_id, "Hello!")
77
76
 
77
+ # Send a message
78
+ response_data = client.send(session_id, "Hello!")
79
+
80
+ # Parse response
81
+ response = ChatResponse.from_dict(response_data, session_id)
78
82
  print(response.text)
83
+ print(f"Cost: ${response.cost:.6f}")
84
+
79
85
  client.close()
80
86
  ```
81
87
 
82
88
  ## Features
83
89
 
84
90
  - **Simple API** - Just `agent_id` and `api_key` to get started
85
- - **Session Management** - Create, update, archive sessions with metadata
86
- - **Sync & Streaming** - Both request-response and real-time streaming
91
+ - **Session-based Conversations** - Maintain conversation context across messages
92
+ - **Synchronous & Async** - Both sync responses and async webhook patterns
87
93
  - **Token Tracking** - Monitor usage and costs per request
88
94
  - **Model Override** - Switch models per-message
89
95
  - **Error Handling** - Comprehensive exception hierarchy
90
96
 
91
97
  ## Usage
92
98
 
93
- ### Synchronous Messaging
99
+ ### Basic Messaging
94
100
 
95
101
  ```python
96
- from olbrain import AgentClient
102
+ from olbrain import AgentClient, ChatResponse
97
103
 
98
104
  with AgentClient(agent_id="your-agent-id", api_key="sk_live_your_key") as client:
99
- session_id = client.create_session()
100
- response = client.send_and_wait(session_id, "What is Python?")
101
-
105
+ # Create session
106
+ session_id = client.create_session(
107
+ title="Support Chat",
108
+ user_id="user-123",
109
+ mode="production"
110
+ )
111
+
112
+ # Send message and get response
113
+ response_data = client.send(session_id, "What is Python?")
114
+
115
+ # Parse response
116
+ response = ChatResponse.from_dict(response_data, session_id)
102
117
  print(response.text)
103
118
  print(f"Tokens: {response.token_usage.total_tokens}")
119
+ print(f"Model: {response.model}")
120
+ print(f"Cost: ${response.cost:.6f}")
104
121
  ```
105
122
 
106
- ### Real-Time Streaming
123
+ ### Continuing a Conversation
107
124
 
108
125
  ```python
109
- from olbrain import AgentClient
126
+ # Send multiple messages in the same session
127
+ session_id = client.create_session(title="Q&A Session")
110
128
 
111
- client = AgentClient(agent_id="your-agent-id", api_key="sk_live_your_key")
129
+ # First message
130
+ response1 = client.send(session_id, "What is machine learning?")
131
+ print(ChatResponse.from_dict(response1, session_id).text)
112
132
 
113
- def on_message(msg):
114
- print(f"[{msg['role']}]: {msg['content']}")
115
-
116
- session_id = client.create_session(on_message=on_message)
117
- client.send(session_id, "Tell me a story")
118
- client.run() # Blocks and processes messages
133
+ # Follow-up message - agent remembers context
134
+ response2 = client.send(session_id, "Can you give me an example?")
135
+ print(ChatResponse.from_dict(response2, session_id).text)
119
136
  ```
120
137
 
121
- ### Session Management
138
+ ### Model Override
122
139
 
123
140
  ```python
124
- # Create session with metadata
125
- session_id = client.create_session(
126
- title="Support Chat",
127
- user_id="user-123",
128
- metadata={"source": "web"},
129
- mode="production"
141
+ # Use a specific model for a message
142
+ response_data = client.send(
143
+ session_id,
144
+ "Complex question here",
145
+ model="gpt-4o" # Override default model
130
146
  )
131
147
 
132
- # Get session info
133
- info = client.get_session(session_id)
134
- print(f"Messages: {info.message_count}")
148
+ response = ChatResponse.from_dict(response_data, session_id)
149
+ print(f"Used model: {response.model}")
150
+ ```
135
151
 
136
- # Get message history
137
- messages = client.get_messages(session_id, limit=20)
152
+ ### Async Webhook Pattern
138
153
 
139
- # Archive session
140
- client.delete_session(session_id)
154
+ ```python
155
+ # Send message with async processing
156
+ # Response will be delivered to your webhook URL
157
+ result = client.send_async(
158
+ session_id="existing-session",
159
+ message="Process this in the background",
160
+ webhook_url="https://your-app.com/webhook"
161
+ )
162
+
163
+ print(f"Message queued: {result['success']}")
141
164
  ```
142
165
 
143
- ### Model Override
166
+ ### Message Metadata
144
167
 
145
168
  ```python
146
- response = client.send_and_wait(
169
+ # Include custom metadata with messages
170
+ response_data = client.send(
147
171
  session_id,
148
- "Complex question here",
149
- model="gpt-4" # Override default model
172
+ "Tell me a joke",
173
+ metadata={"category": "humor", "source": "example"}
150
174
  )
151
175
  ```
152
176
 
@@ -156,18 +180,20 @@ response = client.send_and_wait(
156
180
  from olbrain import AgentClient
157
181
  from olbrain.exceptions import (
158
182
  AuthenticationError,
159
- SessionNotFoundError,
160
183
  RateLimitError,
184
+ NetworkError,
161
185
  OlbrainError
162
186
  )
163
187
 
164
188
  try:
165
189
  client = AgentClient(agent_id="...", api_key="...")
166
- response = client.send_and_wait(session_id, "Hello")
190
+ response = client.send(session_id, "Hello")
167
191
  except AuthenticationError:
168
192
  print("Invalid API key")
169
193
  except RateLimitError as e:
170
194
  print(f"Rate limited. Retry after {e.retry_after}s")
195
+ except NetworkError as e:
196
+ print(f"Network error: {e}")
171
197
  except OlbrainError as e:
172
198
  print(f"Error: {e}")
173
199
  ```
@@ -195,22 +221,35 @@ logging.basicConfig(level=logging.DEBUG)
195
221
  | Method | Description |
196
222
  |--------|-------------|
197
223
  | `create_session()` | Create a new chat session |
198
- | `send(session_id, message)` | Send message (async, use callback) |
199
- | `send_and_wait(session_id, message)` | Send message and wait for response |
200
- | `get_session(session_id)` | Get session details |
201
- | `update_session(session_id, ...)` | Update session title/metadata |
202
- | `delete_session(session_id)` | Archive a session |
203
- | `get_messages(session_id)` | Get message history |
204
- | `get_session_stats(session_id)` | Get token usage stats |
224
+ | `send(session_id, message, ...)` | Send message and get response (sync) |
225
+ | `send_async(session_id, message, webhook_url, ...)` | Send message for async processing |
205
226
  | `close()` | Clean up resources |
206
227
 
228
+ #### Deprecated Methods (v0.3.0+)
229
+
230
+ The following methods are deprecated and raise `NotImplementedError`:
231
+ - `send_and_wait()` - Use `send()` instead
232
+ - `get_session()` - Not supported by webhook API
233
+ - `update_session()` - Not supported by webhook API
234
+ - `delete_session()` - Not supported by webhook API
235
+ - `get_messages()` - Not supported by webhook API
236
+ - `get_session_stats()` - Not supported by webhook API
237
+ - `listen()` - SSE streaming not supported
238
+ - `run()` - SSE streaming not supported
239
+
207
240
  ### Response Objects
208
241
 
209
242
  **ChatResponse**
210
243
  - `text` - Response text
244
+ - `session_id` - Session identifier
211
245
  - `success` - Success status
212
246
  - `token_usage` - TokenUsage object
213
- - `model_used` - Model that generated response
247
+ - `model` - Model that generated response
248
+ - `processing_time_ms` - Processing time in milliseconds
249
+ - `cost` - Cost in USD
250
+ - `mode` - Response mode ("sync" or "session_created")
251
+ - `metadata` - Optional metadata
252
+ - `error` - Error message if failed
214
253
 
215
254
  **TokenUsage**
216
255
  - `prompt_tokens` - Input tokens
@@ -228,18 +267,27 @@ logging.basicConfig(level=logging.DEBUG)
228
267
  | `RateLimitError` | Rate limit exceeded |
229
268
  | `NetworkError` | Connection issues |
230
269
  | `ValidationError` | Invalid input |
231
- | `StreamingError` | Streaming error |
270
+
271
+ ## Migration from v0.2.x
272
+
273
+ See [MIGRATION.md](MIGRATION.md) for detailed migration guide from v0.2.x to v0.3.0.
274
+
275
+ Major changes in v0.3.0:
276
+ - Removed SSE streaming (use sync or async webhook patterns)
277
+ - Removed session management endpoints (get/update/delete)
278
+ - Removed message history retrieval
279
+ - Updated response schema field names (`model_used` → `model`, etc.)
232
280
 
233
281
  ## Examples
234
282
 
235
283
  See the [examples/](examples/) directory:
236
284
 
237
- - `basic_usage.py` - Core SDK features
238
- - `session_management.py` - Session CRUD operations
239
- - `streaming_responses.py` - Real-time streaming
285
+ - `basic_usage.py` - Core SDK features (current API)
240
286
  - `error_handling.py` - Error handling patterns
241
287
  - `advanced_features.py` - Advanced usage
242
288
 
289
+ **Note:** `streaming_responses.py` and `session_management.py` are deprecated and kept for reference only.
290
+
243
291
  ## License
244
292
 
245
293
  MIT License - see [LICENSE](LICENSE)
@@ -0,0 +1,251 @@
1
+ # Olbrain Python SDK
2
+
3
+ [![PyPI version](https://badge.fury.io/py/olbrain-python-sdk.svg)](https://pypi.org/project/olbrain-python-sdk/)
4
+ [![Python Support](https://img.shields.io/pypi/pyversions/olbrain-python-sdk.svg)](https://pypi.org/project/olbrain-python-sdk/)
5
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
+
7
+ Official Python SDK for integrating Olbrain AI agents into your applications.
8
+
9
+ ## Installation
10
+
11
+ ```bash
12
+ pip install olbrain-python-sdk
13
+ ```
14
+
15
+ ## Quick Start
16
+
17
+ ```python
18
+ from olbrain import AgentClient, ChatResponse
19
+
20
+ # Initialize client
21
+ client = AgentClient(
22
+ agent_id="your-agent-id",
23
+ api_key="sk_live_your_api_key"
24
+ )
25
+
26
+ # Create a session
27
+ session_id = client.create_session(title="My Chat")
28
+
29
+ # Send a message
30
+ response_data = client.send(session_id, "Hello!")
31
+
32
+ # Parse response
33
+ response = ChatResponse.from_dict(response_data, session_id)
34
+ print(response.text)
35
+ print(f"Cost: ${response.cost:.6f}")
36
+
37
+ client.close()
38
+ ```
39
+
40
+ ## Features
41
+
42
+ - **Simple API** - Just `agent_id` and `api_key` to get started
43
+ - **Session-based Conversations** - Maintain conversation context across messages
44
+ - **Synchronous & Async** - Both sync responses and async webhook patterns
45
+ - **Token Tracking** - Monitor usage and costs per request
46
+ - **Model Override** - Switch models per-message
47
+ - **Error Handling** - Comprehensive exception hierarchy
48
+
49
+ ## Usage
50
+
51
+ ### Basic Messaging
52
+
53
+ ```python
54
+ from olbrain import AgentClient, ChatResponse
55
+
56
+ with AgentClient(agent_id="your-agent-id", api_key="sk_live_your_key") as client:
57
+ # Create session
58
+ session_id = client.create_session(
59
+ title="Support Chat",
60
+ user_id="user-123",
61
+ mode="production"
62
+ )
63
+
64
+ # Send message and get response
65
+ response_data = client.send(session_id, "What is Python?")
66
+
67
+ # Parse response
68
+ response = ChatResponse.from_dict(response_data, session_id)
69
+ print(response.text)
70
+ print(f"Tokens: {response.token_usage.total_tokens}")
71
+ print(f"Model: {response.model}")
72
+ print(f"Cost: ${response.cost:.6f}")
73
+ ```
74
+
75
+ ### Continuing a Conversation
76
+
77
+ ```python
78
+ # Send multiple messages in the same session
79
+ session_id = client.create_session(title="Q&A Session")
80
+
81
+ # First message
82
+ response1 = client.send(session_id, "What is machine learning?")
83
+ print(ChatResponse.from_dict(response1, session_id).text)
84
+
85
+ # Follow-up message - agent remembers context
86
+ response2 = client.send(session_id, "Can you give me an example?")
87
+ print(ChatResponse.from_dict(response2, session_id).text)
88
+ ```
89
+
90
+ ### Model Override
91
+
92
+ ```python
93
+ # Use a specific model for a message
94
+ response_data = client.send(
95
+ session_id,
96
+ "Complex question here",
97
+ model="gpt-4o" # Override default model
98
+ )
99
+
100
+ response = ChatResponse.from_dict(response_data, session_id)
101
+ print(f"Used model: {response.model}")
102
+ ```
103
+
104
+ ### Async Webhook Pattern
105
+
106
+ ```python
107
+ # Send message with async processing
108
+ # Response will be delivered to your webhook URL
109
+ result = client.send_async(
110
+ session_id="existing-session",
111
+ message="Process this in the background",
112
+ webhook_url="https://your-app.com/webhook"
113
+ )
114
+
115
+ print(f"Message queued: {result['success']}")
116
+ ```
117
+
118
+ ### Message Metadata
119
+
120
+ ```python
121
+ # Include custom metadata with messages
122
+ response_data = client.send(
123
+ session_id,
124
+ "Tell me a joke",
125
+ metadata={"category": "humor", "source": "example"}
126
+ )
127
+ ```
128
+
129
+ ### Error Handling
130
+
131
+ ```python
132
+ from olbrain import AgentClient
133
+ from olbrain.exceptions import (
134
+ AuthenticationError,
135
+ RateLimitError,
136
+ NetworkError,
137
+ OlbrainError
138
+ )
139
+
140
+ try:
141
+ client = AgentClient(agent_id="...", api_key="...")
142
+ response = client.send(session_id, "Hello")
143
+ except AuthenticationError:
144
+ print("Invalid API key")
145
+ except RateLimitError as e:
146
+ print(f"Rate limited. Retry after {e.retry_after}s")
147
+ except NetworkError as e:
148
+ print(f"Network error: {e}")
149
+ except OlbrainError as e:
150
+ print(f"Error: {e}")
151
+ ```
152
+
153
+ ## Configuration
154
+
155
+ ### Environment Variables
156
+
157
+ ```bash
158
+ export OLBRAIN_API_KEY="sk_live_your_api_key"
159
+ export OLBRAIN_AGENT_ID="your-agent-id"
160
+ ```
161
+
162
+ ### Logging
163
+
164
+ ```python
165
+ import logging
166
+ logging.basicConfig(level=logging.DEBUG)
167
+ ```
168
+
169
+ ## API Reference
170
+
171
+ ### AgentClient
172
+
173
+ | Method | Description |
174
+ |--------|-------------|
175
+ | `create_session()` | Create a new chat session |
176
+ | `send(session_id, message, ...)` | Send message and get response (sync) |
177
+ | `send_async(session_id, message, webhook_url, ...)` | Send message for async processing |
178
+ | `close()` | Clean up resources |
179
+
180
+ #### Deprecated Methods (v0.3.0+)
181
+
182
+ The following methods are deprecated and raise `NotImplementedError`:
183
+ - `send_and_wait()` - Use `send()` instead
184
+ - `get_session()` - Not supported by webhook API
185
+ - `update_session()` - Not supported by webhook API
186
+ - `delete_session()` - Not supported by webhook API
187
+ - `get_messages()` - Not supported by webhook API
188
+ - `get_session_stats()` - Not supported by webhook API
189
+ - `listen()` - SSE streaming not supported
190
+ - `run()` - SSE streaming not supported
191
+
192
+ ### Response Objects
193
+
194
+ **ChatResponse**
195
+ - `text` - Response text
196
+ - `session_id` - Session identifier
197
+ - `success` - Success status
198
+ - `token_usage` - TokenUsage object
199
+ - `model` - Model that generated response
200
+ - `processing_time_ms` - Processing time in milliseconds
201
+ - `cost` - Cost in USD
202
+ - `mode` - Response mode ("sync" or "session_created")
203
+ - `metadata` - Optional metadata
204
+ - `error` - Error message if failed
205
+
206
+ **TokenUsage**
207
+ - `prompt_tokens` - Input tokens
208
+ - `completion_tokens` - Output tokens
209
+ - `total_tokens` - Total tokens
210
+ - `cost` - Cost in USD
211
+
212
+ ### Exceptions
213
+
214
+ | Exception | Description |
215
+ |-----------|-------------|
216
+ | `OlbrainError` | Base exception |
217
+ | `AuthenticationError` | Invalid API key |
218
+ | `SessionNotFoundError` | Session not found |
219
+ | `RateLimitError` | Rate limit exceeded |
220
+ | `NetworkError` | Connection issues |
221
+ | `ValidationError` | Invalid input |
222
+
223
+ ## Migration from v0.2.x
224
+
225
+ See [MIGRATION.md](MIGRATION.md) for detailed migration guide from v0.2.x to v0.3.0.
226
+
227
+ Major changes in v0.3.0:
228
+ - Removed SSE streaming (use sync or async webhook patterns)
229
+ - Removed session management endpoints (get/update/delete)
230
+ - Removed message history retrieval
231
+ - Updated response schema field names (`model_used` → `model`, etc.)
232
+
233
+ ## Examples
234
+
235
+ See the [examples/](examples/) directory:
236
+
237
+ - `basic_usage.py` - Core SDK features (current API)
238
+ - `error_handling.py` - Error handling patterns
239
+ - `advanced_features.py` - Advanced usage
240
+
241
+ **Note:** `streaming_responses.py` and `session_management.py` are deprecated and kept for reference only.
242
+
243
+ ## License
244
+
245
+ MIT License - see [LICENSE](LICENSE)
246
+
247
+ ## Links
248
+
249
+ - [PyPI](https://pypi.org/project/olbrain-python-sdk/)
250
+ - [GitHub](https://github.com/Olbrain/olbrain-python-sdk)
251
+ - [Issues](https://github.com/Olbrain/olbrain-python-sdk/issues)
@@ -0,0 +1,149 @@
1
+ """
2
+ Basic usage examples for the Olbrain Python SDK.
3
+
4
+ This example demonstrates the fundamental features of the SDK including:
5
+ - Client initialization
6
+ - Session creation
7
+ - Sending messages (synchronous)
8
+ - Using model overrides
9
+ - Error handling
10
+ """
11
+
12
+ import os
13
+ from olbrain import AgentClient, ChatResponse
14
+ from olbrain.exceptions import (
15
+ OlbrainError,
16
+ AuthenticationError,
17
+ NetworkError
18
+ )
19
+
20
+
21
+ def main():
22
+ """Basic usage example."""
23
+
24
+ # Get credentials from environment variables
25
+ api_key = os.getenv("OLBRAIN_API_KEY", "sk_live_your-api-key-here")
26
+ agent_id = os.getenv("OLBRAIN_AGENT_ID", "your-agent-id-here")
27
+ # Optional: custom agent URL (auto-constructed from agent_id if not provided)
28
+ agent_url = os.getenv("OLBRAIN_AGENT_URL")
29
+
30
+ if api_key == "sk_live_your-api-key-here" or agent_id == "your-agent-id-here":
31
+ print("Please set OLBRAIN_API_KEY and OLBRAIN_AGENT_ID environment variables")
32
+ print()
33
+ print("Example:")
34
+ print(" export OLBRAIN_API_KEY=sk_live_your_key_here")
35
+ print(" export OLBRAIN_AGENT_ID=your-agent-id")
36
+ return
37
+
38
+ try:
39
+ # Initialize the client
40
+ client = AgentClient(
41
+ agent_id=agent_id,
42
+ api_key=api_key,
43
+ agent_url=agent_url # Optional - uses default Cloud Run URL if not provided
44
+ )
45
+ print(f"Client initialized for agent {agent_id}")
46
+ print(f"Agent URL: {client.agent_url}")
47
+
48
+ # -----------------------------------------------------------------
49
+ # Example 1: Creating a session and sending messages
50
+ # -----------------------------------------------------------------
51
+ print("\n--- Example 1: Create Session and Send Messages ---")
52
+
53
+ # Create a session (first message without session_id creates the session)
54
+ session_id = client.create_session(
55
+ title="Basic Demo Chat",
56
+ user_id="demo-user-123",
57
+ mode="production"
58
+ )
59
+ print(f"Session created: {session_id}")
60
+
61
+ # Send a message and get response
62
+ response_data = client.send(session_id, "Hello! Can you introduce yourself?")
63
+
64
+ # Parse response
65
+ response = ChatResponse.from_dict(response_data, session_id)
66
+ print(f"Agent: {response.text}")
67
+ if response.token_usage:
68
+ print(f"Tokens used: {response.token_usage.total_tokens}")
69
+ if response.model:
70
+ print(f"Model: {response.model}")
71
+ if response.cost:
72
+ print(f"Cost: ${response.cost:.6f}")
73
+
74
+ # Send another message in the same session
75
+ response_data2 = client.send(session_id, "What can you help me with?")
76
+ response2 = ChatResponse.from_dict(response_data2, session_id)
77
+ print(f"\nAgent: {response2.text}")
78
+
79
+ # -----------------------------------------------------------------
80
+ # Example 2: Using model override
81
+ # -----------------------------------------------------------------
82
+ print("\n--- Example 2: Model Override ---")
83
+
84
+ # Send message with specific model
85
+ response_data = client.send(
86
+ session_id,
87
+ "Explain quantum computing in one sentence",
88
+ model="gpt-4o" # Override default model
89
+ )
90
+ response = ChatResponse.from_dict(response_data, session_id)
91
+ print(f"Response: {response.text}")
92
+ print(f"Model used: {response.model}")
93
+
94
+ # -----------------------------------------------------------------
95
+ # Example 3: Using metadata
96
+ # -----------------------------------------------------------------
97
+ print("\n--- Example 3: Message with Metadata ---")
98
+
99
+ response_data = client.send(
100
+ session_id,
101
+ "Tell me a joke",
102
+ metadata={"category": "humor", "source": "example"}
103
+ )
104
+ response = ChatResponse.from_dict(response_data, session_id)
105
+ print(f"Agent: {response.text}")
106
+
107
+ # -----------------------------------------------------------------
108
+ # Example 4: Multiple sessions
109
+ # -----------------------------------------------------------------
110
+ print("\n--- Example 4: Multiple Independent Sessions ---")
111
+
112
+ # Create a second session
113
+ session2_id = client.create_session(
114
+ title="Second Chat",
115
+ user_id="another-user"
116
+ )
117
+ print(f"Second session created: {session2_id}")
118
+
119
+ # Send message to second session
120
+ response_data = client.send(session2_id, "Hi! What's your favorite color?")
121
+ response = ChatResponse.from_dict(response_data, session2_id)
122
+ print(f"Agent (Session 2): {response.text}")
123
+
124
+ # Sessions are independent
125
+ response_data = client.send(
126
+ session2_id,
127
+ "Do you remember what I asked in my first session?"
128
+ )
129
+ response = ChatResponse.from_dict(response_data, session2_id)
130
+ print(f"Agent (Session 2): {response.text}")
131
+
132
+ print("\nNote: Sessions are independent - each maintains its own conversation context")
133
+
134
+ except AuthenticationError:
135
+ print("Authentication failed. Please check your API key.")
136
+ except NetworkError as e:
137
+ print(f"Network error: {e}")
138
+ except OlbrainError as e:
139
+ print(f"Olbrain error: {e}")
140
+ except Exception as e:
141
+ print(f"Unexpected error: {e}")
142
+ finally:
143
+ if 'client' in locals():
144
+ client.close()
145
+ print("\nClient closed")
146
+
147
+
148
+ if __name__ == "__main__":
149
+ main()
@@ -1,4 +1,20 @@
1
1
  """
2
+ DEPRECATED: Most features in this example are no longer supported.
3
+
4
+ Session management endpoints (get, update, delete, stats, message history) are not supported
5
+ by the current API (v0.3.0+). The agent API uses a webhook-only architecture.
6
+
7
+ ONLY the following operations are supported:
8
+ - create_session(): Create a new session (first message without session_id)
9
+ - send(): Send messages to existing sessions
10
+
11
+ For current usage patterns, see basic_usage.py
12
+
13
+ This file is kept for reference only and will be removed in a future release.
14
+
15
+ ---
16
+
17
+ Original documentation (DEPRECATED):
2
18
  Session management example for the Olbrain Python SDK.
3
19
 
4
20
  This example demonstrates:
@@ -1,4 +1,18 @@
1
1
  """
2
+ DEPRECATED: This example is no longer valid.
3
+
4
+ SSE streaming is not supported by the current API (v0.3.0+).
5
+ The agent API uses a webhook-only architecture and does not provide SSE streaming endpoints.
6
+
7
+ For current usage patterns, see:
8
+ - basic_usage.py: Synchronous message sending with send()
9
+ - Use send_async() with a webhook URL if you need async processing
10
+
11
+ This file is kept for reference only and will be removed in a future release.
12
+
13
+ ---
14
+
15
+ Original documentation (DEPRECATED):
2
16
  Streaming responses example for the Olbrain Python SDK.
3
17
 
4
18
  This example demonstrates: