langgraph-api 0.0.48__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langgraph-api might be problematic. Click here for more details.
- langgraph_api/__init__.py +1 -1
- langgraph_api/api/__init__.py +2 -2
- langgraph_api/api/assistants.py +3 -3
- langgraph_api/api/meta.py +9 -11
- langgraph_api/api/runs.py +3 -3
- langgraph_api/api/store.py +2 -2
- langgraph_api/api/threads.py +3 -3
- langgraph_api/cli.py +3 -1
- langgraph_api/config.py +3 -0
- langgraph_api/cron_scheduler.py +3 -3
- langgraph_api/graph.py +2 -2
- langgraph_api/js/remote.py +3 -3
- langgraph_api/metadata.py +7 -0
- langgraph_api/models/run.py +10 -1
- langgraph_api/queue_entrypoint.py +1 -1
- langgraph_api/server.py +2 -2
- langgraph_api/stream.py +3 -3
- langgraph_api/thread_ttl.py +2 -2
- langgraph_api/worker.py +3 -3
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.0.dist-info}/METADATA +1 -1
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.0.dist-info}/RECORD +25 -33
- langgraph_runtime/__init__.py +39 -0
- langgraph_api/lifespan.py +0 -74
- langgraph_storage/__init__.py +0 -0
- langgraph_storage/checkpoint.py +0 -123
- langgraph_storage/database.py +0 -200
- langgraph_storage/inmem_stream.py +0 -109
- langgraph_storage/ops.py +0 -2172
- langgraph_storage/queue.py +0 -186
- langgraph_storage/retry.py +0 -31
- langgraph_storage/store.py +0 -100
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.0.dist-info}/LICENSE +0 -0
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.0.dist-info}/WHEEL +0 -0
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.0.dist-info}/entry_points.txt +0 -0
langgraph_storage/checkpoint.py
DELETED
|
@@ -1,123 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import os
|
|
3
|
-
import uuid
|
|
4
|
-
|
|
5
|
-
from langchain_core.runnables import RunnableConfig
|
|
6
|
-
from langgraph.checkpoint.base import (
|
|
7
|
-
Checkpoint,
|
|
8
|
-
CheckpointMetadata,
|
|
9
|
-
CheckpointTuple,
|
|
10
|
-
SerializerProtocol,
|
|
11
|
-
)
|
|
12
|
-
from langgraph.checkpoint.memory import MemorySaver, PersistentDict
|
|
13
|
-
|
|
14
|
-
from langgraph_api.serde import Serializer
|
|
15
|
-
|
|
16
|
-
logger = logging.getLogger(__name__)
|
|
17
|
-
|
|
18
|
-
_EXCLUDED_KEYS = {"checkpoint_ns", "checkpoint_id", "run_id", "thread_id"}
|
|
19
|
-
DISABLE_FILE_PERSISTENCE = (
|
|
20
|
-
os.getenv("LANGGRAPH_DISABLE_FILE_PERSISTENCE", "false").lower() == "true"
|
|
21
|
-
)
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
class InMemorySaver(MemorySaver):
|
|
25
|
-
def __init__(
|
|
26
|
-
self,
|
|
27
|
-
*,
|
|
28
|
-
serde: SerializerProtocol | None = None,
|
|
29
|
-
) -> None:
|
|
30
|
-
self.filename = os.path.join(".langgraph_api", ".langgraph_checkpoint.")
|
|
31
|
-
i = 0
|
|
32
|
-
|
|
33
|
-
def factory(*args):
|
|
34
|
-
nonlocal i
|
|
35
|
-
i += 1
|
|
36
|
-
|
|
37
|
-
thisfname = self.filename + str(i) + ".pckl"
|
|
38
|
-
d = PersistentDict(*args, filename=thisfname)
|
|
39
|
-
if not os.path.exists(".langgraph_api"):
|
|
40
|
-
os.mkdir(".langgraph_api")
|
|
41
|
-
try:
|
|
42
|
-
d.load()
|
|
43
|
-
except FileNotFoundError:
|
|
44
|
-
pass
|
|
45
|
-
except ModuleNotFoundError:
|
|
46
|
-
logger.error(
|
|
47
|
-
"Unable to load cached data - your code has changed in a way that's incompatible with the cache."
|
|
48
|
-
"\nThis usually happens when you've:"
|
|
49
|
-
"\n - Renamed or moved classes"
|
|
50
|
-
"\n - Changed class structures"
|
|
51
|
-
"\n - Pulled updates that modified class definitions in a way that's incompatible with the cache"
|
|
52
|
-
"\n\nRemoving invalid cache data stored at path: .langgraph_api"
|
|
53
|
-
)
|
|
54
|
-
os.remove(self.filename)
|
|
55
|
-
except Exception as e:
|
|
56
|
-
logger.error("Failed to load cached data: %s", str(e))
|
|
57
|
-
os.remove(self.filename)
|
|
58
|
-
return d
|
|
59
|
-
|
|
60
|
-
super().__init__(
|
|
61
|
-
serde=serde if serde is not None else Serializer(),
|
|
62
|
-
factory=factory if not DISABLE_FILE_PERSISTENCE else None,
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
def put(
|
|
66
|
-
self,
|
|
67
|
-
config: RunnableConfig,
|
|
68
|
-
checkpoint: Checkpoint,
|
|
69
|
-
metadata: CheckpointMetadata,
|
|
70
|
-
new_versions: dict[str, str | int | float],
|
|
71
|
-
) -> RunnableConfig:
|
|
72
|
-
# TODO: Should this be done in OSS as well?
|
|
73
|
-
metadata = {
|
|
74
|
-
**{
|
|
75
|
-
k: v
|
|
76
|
-
for k, v in config["configurable"].items()
|
|
77
|
-
if not k.startswith("__") and k not in _EXCLUDED_KEYS
|
|
78
|
-
},
|
|
79
|
-
**config.get("metadata", {}),
|
|
80
|
-
**metadata,
|
|
81
|
-
}
|
|
82
|
-
if not isinstance(checkpoint["id"], uuid.UUID):
|
|
83
|
-
# Avoid type inconsistencies
|
|
84
|
-
checkpoint = checkpoint.copy()
|
|
85
|
-
checkpoint["id"] = str(checkpoint["id"])
|
|
86
|
-
return super().put(config, checkpoint, metadata, new_versions)
|
|
87
|
-
|
|
88
|
-
def get_tuple(self, config: RunnableConfig) -> CheckpointTuple | None:
|
|
89
|
-
if isinstance(config["configurable"].get("checkpoint_id"), uuid.UUID):
|
|
90
|
-
# Avoid type inconsistencies....
|
|
91
|
-
config = config.copy()
|
|
92
|
-
|
|
93
|
-
config["configurable"] = {
|
|
94
|
-
**config["configurable"],
|
|
95
|
-
"checkpoint_id": str(config["configurable"]["checkpoint_id"]),
|
|
96
|
-
}
|
|
97
|
-
return super().get_tuple(config)
|
|
98
|
-
|
|
99
|
-
def clear(self):
|
|
100
|
-
self.storage.clear()
|
|
101
|
-
self.writes.clear()
|
|
102
|
-
for suffix in ["1", "2"]:
|
|
103
|
-
file_path = f"{self.filename}{suffix}.pckl"
|
|
104
|
-
if os.path.exists(file_path):
|
|
105
|
-
os.remove(file_path)
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
MEMORY = InMemorySaver()
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
def Checkpointer(*args, unpack_hook=None, **kwargs):
|
|
112
|
-
if unpack_hook is not None:
|
|
113
|
-
saver = InMemorySaver(
|
|
114
|
-
serde=Serializer(__unpack_ext_hook__=unpack_hook), **kwargs
|
|
115
|
-
)
|
|
116
|
-
saver.writes = MEMORY.writes
|
|
117
|
-
saver.blobs = MEMORY.blobs
|
|
118
|
-
saver.storage = MEMORY.storage
|
|
119
|
-
return saver
|
|
120
|
-
return MEMORY
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
__all__ = ["Checkpointer"]
|
langgraph_storage/database.py
DELETED
|
@@ -1,200 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import os
|
|
3
|
-
import uuid
|
|
4
|
-
from collections import defaultdict
|
|
5
|
-
from collections.abc import AsyncIterator
|
|
6
|
-
from contextlib import asynccontextmanager
|
|
7
|
-
from datetime import datetime
|
|
8
|
-
from typing import Any, NotRequired, TypedDict
|
|
9
|
-
from uuid import UUID
|
|
10
|
-
|
|
11
|
-
import structlog
|
|
12
|
-
from langgraph.checkpoint.memory import PersistentDict
|
|
13
|
-
|
|
14
|
-
from langgraph_api import config as langgraph_config
|
|
15
|
-
from langgraph_api.utils import AsyncConnectionProto
|
|
16
|
-
from langgraph_storage import store
|
|
17
|
-
from langgraph_storage.inmem_stream import start_stream, stop_stream
|
|
18
|
-
|
|
19
|
-
logger = structlog.stdlib.get_logger(__name__)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class Assistant(TypedDict):
|
|
23
|
-
assistant_id: UUID
|
|
24
|
-
graph_id: str
|
|
25
|
-
name: str
|
|
26
|
-
description: str | None
|
|
27
|
-
created_at: NotRequired[datetime]
|
|
28
|
-
updated_at: NotRequired[datetime]
|
|
29
|
-
config: dict[str, Any]
|
|
30
|
-
metadata: dict[str, Any]
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
class Thread(TypedDict):
|
|
34
|
-
thread_id: UUID
|
|
35
|
-
created_at: NotRequired[datetime]
|
|
36
|
-
updated_at: NotRequired[datetime]
|
|
37
|
-
metadata: dict[str, Any]
|
|
38
|
-
status: str
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
class Run(TypedDict):
|
|
42
|
-
run_id: UUID
|
|
43
|
-
thread_id: UUID
|
|
44
|
-
assistant_id: UUID
|
|
45
|
-
created_at: NotRequired[datetime]
|
|
46
|
-
updated_at: NotRequired[datetime]
|
|
47
|
-
metadata: dict[str, Any]
|
|
48
|
-
status: str
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
class RunEvent(TypedDict):
|
|
52
|
-
event_id: UUID
|
|
53
|
-
run_id: UUID
|
|
54
|
-
received_at: NotRequired[datetime]
|
|
55
|
-
span_id: UUID
|
|
56
|
-
event: str
|
|
57
|
-
name: str
|
|
58
|
-
tags: list[Any]
|
|
59
|
-
data: dict[str, Any]
|
|
60
|
-
metadata: dict[str, Any]
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
class AssistantVersion(TypedDict):
|
|
64
|
-
assistant_id: UUID
|
|
65
|
-
version: int
|
|
66
|
-
graph_id: str
|
|
67
|
-
config: dict[str, Any]
|
|
68
|
-
metadata: dict[str, Any]
|
|
69
|
-
created_at: NotRequired[datetime]
|
|
70
|
-
name: str
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class GlobalStore(PersistentDict):
|
|
74
|
-
def __init__(self, *args: Any, filename: str, **kwargs: Any) -> None:
|
|
75
|
-
super().__init__(*args, filename=filename, **kwargs)
|
|
76
|
-
self.clear()
|
|
77
|
-
|
|
78
|
-
def clear(self):
|
|
79
|
-
assistants = self.get("assistants", [])
|
|
80
|
-
super().clear()
|
|
81
|
-
self["runs"] = []
|
|
82
|
-
self["threads"] = []
|
|
83
|
-
self["assistants"] = [
|
|
84
|
-
a for a in assistants if a["metadata"].get("created_by") == "system"
|
|
85
|
-
]
|
|
86
|
-
self["assistant_versions"] = []
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
OPS_FILENAME = os.path.join(".langgraph_api", ".langgraph_ops.pckl")
|
|
90
|
-
RETRY_COUNTER_FILENAME = os.path.join(".langgraph_api", ".langgraph_retry_counter.pckl")
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
class InMemoryRetryCounter:
|
|
94
|
-
def __init__(self):
|
|
95
|
-
self._counters: dict[uuid.UUID, int] = PersistentDict(
|
|
96
|
-
int, filename=RETRY_COUNTER_FILENAME
|
|
97
|
-
)
|
|
98
|
-
self._locks: dict[uuid.UUID, asyncio.Lock] = defaultdict(asyncio.Lock)
|
|
99
|
-
|
|
100
|
-
async def increment(self, run_id: uuid.UUID) -> int:
|
|
101
|
-
async with self._locks[run_id]:
|
|
102
|
-
self._counters[run_id] += 1
|
|
103
|
-
return self._counters[run_id]
|
|
104
|
-
|
|
105
|
-
def close(self):
|
|
106
|
-
self._counters.close()
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
# Global retry counter for in-memory implementation
|
|
110
|
-
GLOBAL_RETRY_COUNTER = InMemoryRetryCounter()
|
|
111
|
-
GLOBAL_STORE = GlobalStore(filename=OPS_FILENAME)
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
class InMemConnectionProto:
|
|
115
|
-
def __init__(self):
|
|
116
|
-
self.filename = OPS_FILENAME
|
|
117
|
-
self.store = GLOBAL_STORE
|
|
118
|
-
self.retry_counter = GLOBAL_RETRY_COUNTER
|
|
119
|
-
self.can_execute = False
|
|
120
|
-
|
|
121
|
-
@asynccontextmanager
|
|
122
|
-
async def pipeline(self):
|
|
123
|
-
yield None
|
|
124
|
-
|
|
125
|
-
async def execute(self, query: str, *args, **kwargs):
|
|
126
|
-
return None
|
|
127
|
-
|
|
128
|
-
def clear(self):
|
|
129
|
-
self.store.clear()
|
|
130
|
-
keys = list(self.retry_counter._counters)
|
|
131
|
-
for key in keys:
|
|
132
|
-
del self.retry_counter._counters[key]
|
|
133
|
-
keys = list(self.retry_counter._locks)
|
|
134
|
-
for key in keys:
|
|
135
|
-
del self.retry_counter._locks[key]
|
|
136
|
-
if os.path.exists(self.filename):
|
|
137
|
-
os.remove(self.filename)
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
@asynccontextmanager
|
|
141
|
-
async def connect(*, __test__: bool = False) -> AsyncIterator[AsyncConnectionProto]:
|
|
142
|
-
yield InMemConnectionProto()
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
async def start_pool() -> None:
|
|
146
|
-
if store._STORE_CONFIG is None:
|
|
147
|
-
if langgraph_config.STORE_CONFIG:
|
|
148
|
-
config_ = langgraph_config.STORE_CONFIG
|
|
149
|
-
store.set_store_config(config_)
|
|
150
|
-
|
|
151
|
-
if not os.path.exists(".langgraph_api"):
|
|
152
|
-
os.mkdir(".langgraph_api")
|
|
153
|
-
if os.path.exists(OPS_FILENAME):
|
|
154
|
-
try:
|
|
155
|
-
GLOBAL_STORE.load()
|
|
156
|
-
except ModuleNotFoundError:
|
|
157
|
-
logger.error(
|
|
158
|
-
"Unable to load cached data - your code has changed in a way that's incompatible with the cache."
|
|
159
|
-
"\nThis usually happens when you've:"
|
|
160
|
-
"\n - Renamed or moved classes"
|
|
161
|
-
"\n - Changed class structures"
|
|
162
|
-
"\n - Pulled updates that modified class definitions in a way that's incompatible with the cache"
|
|
163
|
-
"\n\nRemoving invalid cache data stored at path: .langgraph_api"
|
|
164
|
-
)
|
|
165
|
-
await asyncio.to_thread(os.remove, OPS_FILENAME)
|
|
166
|
-
await asyncio.to_thread(os.remove, RETRY_COUNTER_FILENAME)
|
|
167
|
-
except Exception as e:
|
|
168
|
-
logger.error("Failed to load cached data: %s", str(e))
|
|
169
|
-
await asyncio.to_thread(os.remove, OPS_FILENAME)
|
|
170
|
-
await asyncio.to_thread(os.remove, RETRY_COUNTER_FILENAME)
|
|
171
|
-
for k in ["runs", "threads", "assistants", "assistant_versions"]:
|
|
172
|
-
if not GLOBAL_STORE.get(k):
|
|
173
|
-
GLOBAL_STORE[k] = []
|
|
174
|
-
for k in ["crons"]:
|
|
175
|
-
if not GLOBAL_STORE.get(k):
|
|
176
|
-
GLOBAL_STORE[k] = {}
|
|
177
|
-
await start_stream()
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
async def stop_pool() -> None:
|
|
181
|
-
await asyncio.to_thread(GLOBAL_STORE.close)
|
|
182
|
-
await asyncio.to_thread(GLOBAL_RETRY_COUNTER.close)
|
|
183
|
-
from langgraph_storage.checkpoint import Checkpointer
|
|
184
|
-
from langgraph_storage.store import STORE
|
|
185
|
-
|
|
186
|
-
await asyncio.to_thread(STORE.close)
|
|
187
|
-
|
|
188
|
-
async with Checkpointer():
|
|
189
|
-
pass
|
|
190
|
-
await stop_stream()
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
async def healthcheck() -> None:
|
|
194
|
-
# What could possibly go wrong?
|
|
195
|
-
pass
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
def pool_stats() -> dict[str, dict[str, int]]:
|
|
199
|
-
# TODO??
|
|
200
|
-
return {}
|
|
@@ -1,109 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import logging
|
|
3
|
-
from collections import defaultdict
|
|
4
|
-
from dataclasses import dataclass
|
|
5
|
-
from uuid import UUID
|
|
6
|
-
|
|
7
|
-
logger = logging.getLogger(__name__)
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
@dataclass
|
|
11
|
-
class Message:
|
|
12
|
-
topic: bytes
|
|
13
|
-
data: bytes
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class ContextQueue(asyncio.Queue):
|
|
17
|
-
"""Queue that supports async context manager protocol"""
|
|
18
|
-
|
|
19
|
-
async def __aenter__(self):
|
|
20
|
-
return self
|
|
21
|
-
|
|
22
|
-
async def __aexit__(
|
|
23
|
-
self,
|
|
24
|
-
exc_type: type[BaseException] | None,
|
|
25
|
-
exc_val: BaseException | None,
|
|
26
|
-
exc_tb: object | None,
|
|
27
|
-
) -> None:
|
|
28
|
-
# Clear the queue
|
|
29
|
-
while not self.empty():
|
|
30
|
-
try:
|
|
31
|
-
self.get_nowait()
|
|
32
|
-
except asyncio.QueueEmpty:
|
|
33
|
-
break
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class StreamManager:
|
|
37
|
-
def __init__(self):
|
|
38
|
-
self.queues = defaultdict(list) # Dict[UUID, List[asyncio.Queue]]
|
|
39
|
-
self.control_queues = defaultdict(list)
|
|
40
|
-
|
|
41
|
-
def get_queues(self, run_id: UUID) -> list[asyncio.Queue]:
|
|
42
|
-
return self.queues[run_id]
|
|
43
|
-
|
|
44
|
-
async def put(self, run_id: UUID, message: Message) -> None:
|
|
45
|
-
topic = message.topic.decode()
|
|
46
|
-
if "control" in topic:
|
|
47
|
-
self.control_queues[run_id].append(message)
|
|
48
|
-
queues = self.queues.get(run_id, [])
|
|
49
|
-
coros = [queue.put(message) for queue in queues]
|
|
50
|
-
results = await asyncio.gather(*coros, return_exceptions=True)
|
|
51
|
-
for result in results:
|
|
52
|
-
if isinstance(result, Exception):
|
|
53
|
-
logger.exception(f"Failed to put message in queue: {result}")
|
|
54
|
-
|
|
55
|
-
async def add_queue(self, run_id: UUID) -> asyncio.Queue:
|
|
56
|
-
queue = ContextQueue()
|
|
57
|
-
self.queues[run_id].append(queue)
|
|
58
|
-
for control_msg in self.control_queues[run_id]:
|
|
59
|
-
try:
|
|
60
|
-
await queue.put(control_msg)
|
|
61
|
-
except Exception:
|
|
62
|
-
logger.exception(
|
|
63
|
-
f"Failed to put control message in queue: {control_msg}"
|
|
64
|
-
)
|
|
65
|
-
|
|
66
|
-
return queue
|
|
67
|
-
|
|
68
|
-
async def remove_queue(self, run_id: UUID, queue: asyncio.Queue):
|
|
69
|
-
if run_id in self.queues:
|
|
70
|
-
self.queues[run_id].remove(queue)
|
|
71
|
-
if not self.queues[run_id]:
|
|
72
|
-
del self.queues[run_id]
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
# Global instance
|
|
76
|
-
stream_manager = StreamManager()
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
async def start_stream() -> None:
|
|
80
|
-
"""Initialize the queue system.
|
|
81
|
-
In this in-memory implementation, we just need to ensure we have a clean StreamManager instance.
|
|
82
|
-
"""
|
|
83
|
-
global stream_manager
|
|
84
|
-
stream_manager = StreamManager()
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
async def stop_stream() -> None:
|
|
88
|
-
"""Clean up the queue system.
|
|
89
|
-
Clear all queues and stored control messages."""
|
|
90
|
-
global stream_manager
|
|
91
|
-
|
|
92
|
-
# Send 'done' message to all active queues before clearing
|
|
93
|
-
for run_id in list(stream_manager.queues.keys()):
|
|
94
|
-
control_message = Message(topic=f"run:{run_id}:control".encode(), data=b"done")
|
|
95
|
-
|
|
96
|
-
for queue in stream_manager.queues[run_id]:
|
|
97
|
-
try:
|
|
98
|
-
await queue.put(control_message)
|
|
99
|
-
except (Exception, RuntimeError):
|
|
100
|
-
pass # Ignore errors during shutdown
|
|
101
|
-
|
|
102
|
-
# Clear all stored data
|
|
103
|
-
stream_manager.queues.clear()
|
|
104
|
-
stream_manager.control_queues.clear()
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
def get_stream_manager() -> StreamManager:
|
|
108
|
-
"""Get the global stream manager instance."""
|
|
109
|
-
return stream_manager
|