greennode-agent-bridge 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,34 @@
1
+ """GreenNode Agent Bridge - Connect agent frameworks with GreenNode AgentBase.
2
+
3
+ This package provides adaptors for various agent frameworks (LangGraph, CrewAI, etc.)
4
+ to work with GreenNode AgentBase Memory and Runtime services.
5
+ """
6
+
7
+ __version__ = "0.1.0"
8
+
9
+ # Conditional imports for optional dependencies
10
+ try:
11
+ from greennode_agent_bridge.langgraph.memory import (
12
+ AgentBaseMemoryEvents,
13
+ AgentBaseMemoryRecords,
14
+ )
15
+
16
+ __all__ = [
17
+ "AgentBaseMemoryEvents",
18
+ "AgentBaseMemoryRecords",
19
+ ]
20
+ except ImportError as e:
21
+ # Store the import error for later use
22
+ _import_error = e
23
+ __all__ = []
24
+
25
+ # Provide helpful error message when optional dependencies are missing
26
+ def _missing_langgraph_dependencies_error(*args, **kwargs):
27
+ raise ImportError(
28
+ "LangGraph functionality requires optional dependencies. "
29
+ "Install them with: pip install 'greennode-agent-bridge[langgraph]'"
30
+ ) from _import_error
31
+
32
+ # Create placeholder classes that raise helpful error
33
+ AgentBaseMemoryEvents = _missing_langgraph_dependencies_error # type: ignore[assignment]
34
+ AgentBaseMemoryRecords = _missing_langgraph_dependencies_error # type: ignore[assignment]
@@ -0,0 +1,21 @@
1
+ """LangGraph adaptor for GreenNode AgentBase.
2
+
3
+ This package provides adaptors for LangGraph to work with GreenNode AgentBase.
4
+ """
5
+
6
+ # Re-export from memory submodule for backward compatibility and convenience
7
+ try:
8
+ from greennode_agent_bridge.langgraph.memory import (
9
+ AgentBaseMemoryEvents,
10
+ AgentBaseMemoryRecords,
11
+ langgraph_available,
12
+ )
13
+
14
+ __all__ = [
15
+ "AgentBaseMemoryEvents",
16
+ "AgentBaseMemoryRecords",
17
+ "langgraph_available",
18
+ ]
19
+ except ImportError:
20
+ # If memory module is not available, provide empty exports
21
+ __all__ = []
@@ -0,0 +1,38 @@
1
+ """LangGraph Memory adaptor for GreenNode AgentBase.
2
+
3
+ This module provides checkpoint saver and store implementations for LangGraph
4
+ using GreenNode AgentBase Memory.
5
+ """
6
+
7
+ from typing import Any
8
+
9
+ # Store the import error for later use
10
+ _import_error: ImportError | None = None
11
+
12
+ # Conditional imports for optional dependencies
13
+ try:
14
+ from greennode_agent_bridge.langgraph.memory.events import AgentBaseMemoryEvents
15
+ from greennode_agent_bridge.langgraph.memory.records import AgentBaseMemoryRecords
16
+
17
+ langgraph_available = True
18
+ except ImportError as e:
19
+ # Store the error for later use
20
+ _import_error = e
21
+ langgraph_available = False
22
+
23
+ # If dependencies are not available, provide helpful error message
24
+ def _missing_langgraph_dependencies_error(*args: Any, **kwargs: Any) -> Any:
25
+ raise ImportError(
26
+ "LangGraph functionality requires optional dependencies. "
27
+ "Install them with: pip install 'greennode-agent-bridge[langgraph]'"
28
+ ) from _import_error
29
+
30
+ # Create placeholder classes that raise helpful error
31
+ AgentBaseMemoryEvents: type[Any] = _missing_langgraph_dependencies_error # type: ignore[assignment,no-redef]
32
+ AgentBaseMemoryRecords: type[Any] = _missing_langgraph_dependencies_error # type: ignore[assignment,no-redef]
33
+
34
+ __all__ = [
35
+ "AgentBaseMemoryEvents",
36
+ "AgentBaseMemoryRecords",
37
+ "langgraph_available",
38
+ ]
@@ -0,0 +1,27 @@
1
+ """Constants and exceptions for AgentBase Memory Checkpoint Saver."""
2
+
3
+ EMPTY_CHANNEL_VALUE = "_empty"
4
+
5
+
6
+ class AgentBaseMemoryError(Exception):
7
+ """Base exception for AgentBase Memory errors."""
8
+
9
+ pass
10
+
11
+
12
+ class EventDecodingError(AgentBaseMemoryError):
13
+ """Raised when event decoding fails."""
14
+
15
+ pass
16
+
17
+
18
+ class InvalidConfigError(AgentBaseMemoryError):
19
+ """Raised when configuration is invalid."""
20
+
21
+ pass
22
+
23
+
24
+ class EventNotFoundError(AgentBaseMemoryError):
25
+ """Raised when expected event is not found."""
26
+
27
+ pass
@@ -0,0 +1,349 @@
1
+ """AgentBase Memory Checkpoint Saver implementation."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import random
7
+ from collections.abc import AsyncIterator, Iterator, Sequence
8
+ from typing import Any, TypeAlias, cast
9
+
10
+ from langchain_core.runnables import RunnableConfig, run_in_executor
11
+ from langgraph.checkpoint.base import (
12
+ BaseCheckpointSaver,
13
+ ChannelVersions,
14
+ Checkpoint,
15
+ CheckpointMetadata,
16
+ CheckpointTuple,
17
+ SerializerProtocol,
18
+ get_checkpoint_id,
19
+ get_checkpoint_metadata,
20
+ )
21
+
22
+ from greennode_agent_bridge.langgraph.memory.constants import (
23
+ EMPTY_CHANNEL_VALUE,
24
+ InvalidConfigError,
25
+ )
26
+ from greennode_agent_bridge.langgraph.memory.helpers import (
27
+ DEFAULT_INITIAL_BACKOFF,
28
+ DEFAULT_MAX_BACKOFF,
29
+ DEFAULT_MAX_RETRIES,
30
+ AgentBaseEventClient,
31
+ EventProcessor,
32
+ EventSerializer,
33
+ )
34
+ from greennode_agent_bridge.langgraph.memory.models import (
35
+ ChannelDataEvent,
36
+ CheckpointerConfig,
37
+ CheckpointEvent,
38
+ WriteItem,
39
+ WritesEvent,
40
+ )
41
+
42
+ RunnableConfigDict: TypeAlias = dict[str, Any]
43
+
44
+
45
+ class AgentBaseMemoryEvents(BaseCheckpointSaver[str]):
46
+ """
47
+ AgentBase Memory checkpoint saver.
48
+
49
+ This saver persists Checkpoints as serialized blob events in AgentBase Memory.
50
+
51
+ Args:
52
+ memory_id: the ID of the memory resource created in AgentBase Memory
53
+ serde: serialization protocol to be used. Defaults to JSONPlusSerializer
54
+ limit: maximum number of events to parse from ListEvents.
55
+ max_results: maximum number of results to retrieve from AgentBase Memory.
56
+ max_retries: maximum number of retry attempts for retryable errors.
57
+ initial_backoff: initial backoff time in seconds for exponential backoff.
58
+ max_backoff: maximum backoff time in seconds.
59
+ memory_client: Optional MemoryClient instance. If not provided, creates a new one.
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ memory_id: str,
65
+ *,
66
+ serde: SerializerProtocol | None = None,
67
+ limit: int | None = None,
68
+ max_results: int | None = 100,
69
+ max_retries: int = DEFAULT_MAX_RETRIES,
70
+ initial_backoff: float = DEFAULT_INITIAL_BACKOFF,
71
+ max_backoff: float = DEFAULT_MAX_BACKOFF,
72
+ memory_client: Any | None = None,
73
+ ) -> None:
74
+ super().__init__(serde=serde)
75
+
76
+ self.memory_id = memory_id
77
+ self.limit = limit
78
+ self.max_results = max_results
79
+ self.serializer = EventSerializer(self.serde)
80
+ self.checkpoint_event_client = AgentBaseEventClient(
81
+ memory_id,
82
+ self.serializer,
83
+ memory_client=memory_client,
84
+ max_retries=max_retries,
85
+ initial_backoff=initial_backoff,
86
+ max_backoff=max_backoff,
87
+ )
88
+ self.processor = EventProcessor()
89
+
90
+ def get_tuple(
91
+ self,
92
+ config: RunnableConfig,
93
+ ) -> CheckpointTuple | None:
94
+ """Get a checkpoint tuple from AgentBase Memory.
95
+
96
+ Args:
97
+ config: The runnable config containing checkpoint information
98
+
99
+ Returns:
100
+ CheckpointTuple if found, None otherwise
101
+ """
102
+
103
+ # TODO: There is room for caching here on the client side
104
+
105
+ checkpoint_config = CheckpointerConfig.from_runnable_config(
106
+ RunnableConfigDict(config)
107
+ )
108
+
109
+ events = self.checkpoint_event_client.get_events(
110
+ checkpoint_config.session_id,
111
+ checkpoint_config.actor_id,
112
+ self.limit,
113
+ self.max_results,
114
+ )
115
+
116
+ checkpoints, writes_by_checkpoint, channel_data = self.processor.process_events(
117
+ events
118
+ )
119
+
120
+ if not checkpoints:
121
+ return None
122
+
123
+ # Find the specific checkpoint if `checkpoint_id` is provided or return the latest one
124
+ if checkpoint_config.checkpoint_id:
125
+ checkpoint_event = checkpoints.get(checkpoint_config.checkpoint_id)
126
+ if not checkpoint_event:
127
+ return None
128
+ else:
129
+ latest_checkpoint_id = max(checkpoints.keys())
130
+ checkpoint_event = checkpoints[latest_checkpoint_id]
131
+
132
+ # Build and return checkpoint tuple
133
+ writes = writes_by_checkpoint.get(checkpoint_event.checkpoint_id, [])
134
+ return self.processor.build_checkpoint_tuple(
135
+ checkpoint_event, writes, channel_data, checkpoint_config
136
+ )
137
+
138
+ def list(
139
+ self,
140
+ config: RunnableConfig | None,
141
+ *,
142
+ filter: dict[str, Any] | None = None,
143
+ before: RunnableConfig | None = None,
144
+ limit: int | None = None,
145
+ ) -> Iterator[CheckpointTuple]:
146
+ """List checkpoints from AgentBase Memory."""
147
+
148
+ # TODO: There is room for caching here on the client side
149
+
150
+ checkpoint_config = CheckpointerConfig.from_runnable_config(
151
+ RunnableConfigDict(config) if config else {}
152
+ )
153
+ config_checkpoint_id = get_checkpoint_id(config) if config else None
154
+
155
+ events = self.checkpoint_event_client.get_events(
156
+ checkpoint_config.session_id,
157
+ checkpoint_config.actor_id,
158
+ limit,
159
+ self.max_results,
160
+ )
161
+
162
+ checkpoints, writes_by_checkpoint, channel_data = self.processor.process_events(
163
+ events
164
+ )
165
+
166
+ # Build and yield CheckpointTuples
167
+ count = 0
168
+ before_checkpoint_id = get_checkpoint_id(before) if before else None
169
+
170
+ # Sort checkpoints by ID in descending order (most recent first)
171
+ for checkpoint_id in sorted(checkpoints.keys(), reverse=True):
172
+ checkpoint_event = checkpoints[checkpoint_id]
173
+ # Apply filters
174
+ if config_checkpoint_id and checkpoint_id != config_checkpoint_id:
175
+ continue
176
+
177
+ if before_checkpoint_id and checkpoint_id >= before_checkpoint_id:
178
+ continue
179
+
180
+ if limit is not None and count >= limit:
181
+ break
182
+
183
+ writes = writes_by_checkpoint.get(checkpoint_id, [])
184
+
185
+ yield self.processor.build_checkpoint_tuple(
186
+ checkpoint_event, writes, channel_data, checkpoint_config
187
+ )
188
+
189
+ count += 1
190
+
191
+ def put(
192
+ self,
193
+ config: RunnableConfig,
194
+ checkpoint: Checkpoint,
195
+ metadata: CheckpointMetadata,
196
+ new_versions: ChannelVersions,
197
+ ) -> RunnableConfig:
198
+ """Save a checkpoint to AgentBase Memory."""
199
+ checkpoint_config = CheckpointerConfig.from_runnable_config(
200
+ RunnableConfigDict(config)
201
+ )
202
+
203
+ # Extract channel values
204
+ checkpoint_copy = dict(checkpoint)
205
+ channel_values: dict[str, Any] = {}
206
+ if "channel_values" in checkpoint_copy:
207
+ channel_values_obj = checkpoint_copy.pop("channel_values")
208
+ if isinstance(channel_values_obj, dict):
209
+ channel_values = channel_values_obj.copy()
210
+
211
+ # Create all events to be stored in a single batch
212
+ events_to_store: list[CheckpointEvent | ChannelDataEvent | WritesEvent] = []
213
+
214
+ # Create channel data events
215
+ for channel, version in new_versions.items():
216
+ channel_event = ChannelDataEvent(
217
+ channel=channel,
218
+ version=str(version),
219
+ value=channel_values.get(channel, EMPTY_CHANNEL_VALUE),
220
+ thread_id=checkpoint_config.thread_id,
221
+ checkpoint_ns=checkpoint_config.checkpoint_ns,
222
+ )
223
+ events_to_store.append(channel_event)
224
+
225
+ checkpoint_event = CheckpointEvent(
226
+ checkpoint_id=checkpoint["id"],
227
+ checkpoint_data=checkpoint_copy,
228
+ metadata=dict(get_checkpoint_metadata(config, metadata)),
229
+ parent_checkpoint_id=checkpoint_config.checkpoint_id,
230
+ thread_id=checkpoint_config.thread_id,
231
+ checkpoint_ns=checkpoint_config.checkpoint_ns,
232
+ )
233
+ events_to_store.append(checkpoint_event)
234
+ typed_events = cast(
235
+ list[CheckpointEvent | ChannelDataEvent | WritesEvent], events_to_store
236
+ )
237
+ self.checkpoint_event_client.store_blob_events_batch(
238
+ typed_events, checkpoint_config.session_id, checkpoint_config.actor_id
239
+ )
240
+
241
+ return {
242
+ "configurable": {
243
+ "thread_id": checkpoint_config.thread_id,
244
+ "actor_id": checkpoint_config.actor_id,
245
+ "checkpoint_ns": checkpoint_config.checkpoint_ns,
246
+ "checkpoint_id": checkpoint["id"],
247
+ }
248
+ }
249
+
250
+ def put_writes(
251
+ self,
252
+ config: RunnableConfig,
253
+ writes: Sequence[tuple[str, Any]],
254
+ task_id: str,
255
+ task_path: str = "",
256
+ ) -> None:
257
+ """Save pending writes to AgentBase Memory."""
258
+ checkpoint_config = CheckpointerConfig.from_runnable_config(
259
+ RunnableConfigDict(config)
260
+ )
261
+
262
+ if not checkpoint_config.checkpoint_id:
263
+ raise InvalidConfigError("checkpoint_id is required for put_writes")
264
+
265
+ # Create write items
266
+ write_items = [
267
+ WriteItem(
268
+ task_id=task_id,
269
+ channel=channel,
270
+ value=value,
271
+ task_path=task_path,
272
+ )
273
+ for channel, value in writes
274
+ ]
275
+
276
+ writes_event = WritesEvent(
277
+ checkpoint_id=checkpoint_config.checkpoint_id,
278
+ writes=write_items,
279
+ )
280
+
281
+ self.checkpoint_event_client.store_blob_event(
282
+ writes_event, checkpoint_config.session_id, checkpoint_config.actor_id
283
+ )
284
+
285
+ def delete_thread(self, thread_id: str, actor_id: str = "") -> None:
286
+ """Delete all checkpoints and writes associated with a thread."""
287
+ self.checkpoint_event_client.delete_events(thread_id, actor_id)
288
+
289
+ # ===== Async methods (Running sync methods inside executor ) =====
290
+ async def aget_tuple(self, config: RunnableConfig) -> CheckpointTuple | None:
291
+ return await run_in_executor(None, self.get_tuple, config)
292
+
293
+ async def alist(
294
+ self,
295
+ config: RunnableConfig | None,
296
+ *,
297
+ filter: dict[str, Any] | None = None,
298
+ before: RunnableConfig | None = None,
299
+ limit: int | None = None,
300
+ ) -> AsyncIterator[CheckpointTuple]:
301
+ loop = asyncio.get_running_loop()
302
+
303
+ def _sync_list():
304
+ return list(self.list(config, filter=filter, before=before, limit=limit))
305
+
306
+ items = await loop.run_in_executor(None, _sync_list)
307
+ for item in items:
308
+ yield item
309
+
310
+ async def aput(
311
+ self,
312
+ config: RunnableConfig,
313
+ checkpoint: Checkpoint,
314
+ metadata: CheckpointMetadata,
315
+ new_versions: ChannelVersions,
316
+ ) -> RunnableConfig:
317
+ return await run_in_executor(
318
+ None, self.put, config, checkpoint, metadata, new_versions
319
+ )
320
+
321
+ async def aput_writes(
322
+ self,
323
+ config: RunnableConfig,
324
+ writes: Sequence[tuple[str, Any]],
325
+ task_id: str,
326
+ task_path: str = "",
327
+ ) -> None:
328
+ return await run_in_executor(
329
+ None, self.put_writes, config, writes, task_id, task_path
330
+ )
331
+
332
+ async def adelete_thread(self, thread_id: str, actor_id: str = "") -> None:
333
+ await run_in_executor(None, self.delete_thread, thread_id, actor_id)
334
+ return None
335
+
336
+ def get_next_version(
337
+ self, current: str | int | None, channel: str | None = None
338
+ ) -> str:
339
+ """Generate next version string."""
340
+ if current is None:
341
+ current_v = 0
342
+ elif isinstance(current, int):
343
+ current_v = current
344
+ else:
345
+ current_v = int(current.split(".")[0])
346
+
347
+ next_v = current_v + 1
348
+ next_h = random.random()
349
+ return f"{next_v:032}.{next_h:016}"