langgraph-api 0.0.48__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langgraph-api might be problematic. Click here for more details.
- langgraph_api/__init__.py +1 -1
- langgraph_api/api/__init__.py +2 -2
- langgraph_api/api/assistants.py +3 -3
- langgraph_api/api/meta.py +9 -11
- langgraph_api/api/runs.py +3 -3
- langgraph_api/api/store.py +2 -2
- langgraph_api/api/threads.py +3 -3
- langgraph_api/auth/custom.py +25 -4
- langgraph_api/cli.py +3 -1
- langgraph_api/config.py +3 -0
- langgraph_api/cron_scheduler.py +3 -3
- langgraph_api/graph.py +6 -14
- langgraph_api/js/base.py +17 -0
- langgraph_api/js/build.mts +3 -3
- langgraph_api/js/client.mts +64 -3
- langgraph_api/js/global.d.ts +1 -0
- langgraph_api/js/package.json +4 -3
- langgraph_api/js/remote.py +96 -5
- langgraph_api/js/src/graph.mts +0 -6
- langgraph_api/js/src/utils/files.mts +4 -0
- langgraph_api/js/tests/api.test.mts +80 -80
- langgraph_api/js/tests/auth.test.mts +648 -0
- langgraph_api/js/tests/compose-postgres.auth.yml +59 -0
- langgraph_api/js/tests/graphs/agent_simple.mts +79 -0
- langgraph_api/js/tests/graphs/auth.mts +106 -0
- langgraph_api/js/tests/graphs/package.json +3 -1
- langgraph_api/js/tests/graphs/yarn.lock +9 -4
- langgraph_api/js/yarn.lock +18 -23
- langgraph_api/metadata.py +7 -0
- langgraph_api/models/run.py +10 -1
- langgraph_api/queue_entrypoint.py +1 -1
- langgraph_api/server.py +2 -2
- langgraph_api/stream.py +5 -4
- langgraph_api/thread_ttl.py +2 -2
- langgraph_api/worker.py +4 -25
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.2.dist-info}/METADATA +1 -2
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.2.dist-info}/RECORD +42 -44
- langgraph_runtime/__init__.py +39 -0
- langgraph_api/lifespan.py +0 -74
- langgraph_storage/checkpoint.py +0 -123
- langgraph_storage/database.py +0 -200
- langgraph_storage/inmem_stream.py +0 -109
- langgraph_storage/ops.py +0 -2172
- langgraph_storage/queue.py +0 -186
- langgraph_storage/retry.py +0 -31
- langgraph_storage/store.py +0 -100
- {langgraph_storage → langgraph_api/js}/__init__.py +0 -0
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.2.dist-info}/LICENSE +0 -0
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.2.dist-info}/WHEEL +0 -0
- {langgraph_api-0.0.48.dist-info → langgraph_api-0.1.2.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import importlib.util
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
|
|
5
|
+
import structlog
|
|
6
|
+
|
|
7
|
+
logger = structlog.stdlib.get_logger(__name__)
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
RUNTIME_EDITION = os.environ["LANGGRAPH_RUNTIME_EDITION"]
|
|
11
|
+
RUNTIME_PACKAGE = f"langgraph_runtime_{RUNTIME_EDITION}"
|
|
12
|
+
except KeyError:
|
|
13
|
+
raise ValueError(
|
|
14
|
+
"LANGGRAPH_RUNTIME_EDITION environment variable is not set."
|
|
15
|
+
" Expected LANGGRAPH_RUNTIME_EDITION to be set to one of:\n"
|
|
16
|
+
" - inmem\n"
|
|
17
|
+
" - postgres\n"
|
|
18
|
+
" - community\n"
|
|
19
|
+
) from None
|
|
20
|
+
if importlib.util.find_spec(RUNTIME_PACKAGE):
|
|
21
|
+
backend = importlib.import_module(RUNTIME_PACKAGE)
|
|
22
|
+
logger.info(f"Using {RUNTIME_PACKAGE}")
|
|
23
|
+
else:
|
|
24
|
+
raise ImportError(
|
|
25
|
+
"Langgraph runtime backend not found. Please install with "
|
|
26
|
+
f'`pip install "langgraph-runtime[{RUNTIME_EDITION}"`'
|
|
27
|
+
) from None
|
|
28
|
+
|
|
29
|
+
# All runtime backends share the same API
|
|
30
|
+
for module_name in (
|
|
31
|
+
"checkpoint",
|
|
32
|
+
"database",
|
|
33
|
+
"lifespan",
|
|
34
|
+
"ops",
|
|
35
|
+
"retry",
|
|
36
|
+
"store",
|
|
37
|
+
"metrics",
|
|
38
|
+
):
|
|
39
|
+
sys.modules["langgraph_runtime." + module_name] = getattr(backend, module_name)
|
langgraph_api/lifespan.py
DELETED
|
@@ -1,74 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
from contextlib import asynccontextmanager
|
|
3
|
-
|
|
4
|
-
import structlog
|
|
5
|
-
from langchain_core.runnables.config import var_child_runnable_config
|
|
6
|
-
from langgraph.constants import CONF, CONFIG_KEY_STORE
|
|
7
|
-
from starlette.applications import Starlette
|
|
8
|
-
|
|
9
|
-
import langgraph_api.config as config
|
|
10
|
-
from langgraph_api.asyncio import SimpleTaskGroup, set_event_loop
|
|
11
|
-
from langgraph_api.cron_scheduler import cron_scheduler
|
|
12
|
-
from langgraph_api.graph import collect_graphs_from_env, stop_remote_graphs
|
|
13
|
-
from langgraph_api.http import start_http_client, stop_http_client
|
|
14
|
-
from langgraph_api.js.ui import start_ui_bundler, stop_ui_bundler
|
|
15
|
-
from langgraph_api.metadata import metadata_loop
|
|
16
|
-
from langgraph_api.thread_ttl import thread_ttl_sweep_loop
|
|
17
|
-
from langgraph_license.validation import get_license_status, plus_features_enabled
|
|
18
|
-
from langgraph_storage.database import start_pool, stop_pool
|
|
19
|
-
from langgraph_storage.queue import queue
|
|
20
|
-
from langgraph_storage.store import Store
|
|
21
|
-
|
|
22
|
-
logger = structlog.stdlib.get_logger(__name__)
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
@asynccontextmanager
|
|
26
|
-
async def lifespan(
|
|
27
|
-
app: Starlette | None = None,
|
|
28
|
-
with_cron_scheduler: bool = True,
|
|
29
|
-
taskset: set[asyncio.Task] | None = None,
|
|
30
|
-
):
|
|
31
|
-
try:
|
|
32
|
-
current_loop = asyncio.get_running_loop()
|
|
33
|
-
set_event_loop(current_loop)
|
|
34
|
-
except RuntimeError:
|
|
35
|
-
await logger.aerror("Failed to set loop")
|
|
36
|
-
|
|
37
|
-
if not await get_license_status():
|
|
38
|
-
raise ValueError(
|
|
39
|
-
"License verification failed. Please ensure proper configuration:\n"
|
|
40
|
-
"- For local development, set a valid LANGSMITH_API_KEY for an account with LangGraph Cloud access "
|
|
41
|
-
"in the environment defined in your langgraph.json file.\n"
|
|
42
|
-
"- For production, configure the LANGGRAPH_CLOUD_LICENSE_KEY environment variable "
|
|
43
|
-
"with your LangGraph Cloud license key.\n"
|
|
44
|
-
"Review your configuration settings and try again. If issues persist, "
|
|
45
|
-
"contact support for assistance."
|
|
46
|
-
)
|
|
47
|
-
await start_http_client()
|
|
48
|
-
await start_pool()
|
|
49
|
-
await collect_graphs_from_env(True)
|
|
50
|
-
await start_ui_bundler()
|
|
51
|
-
try:
|
|
52
|
-
async with SimpleTaskGroup(
|
|
53
|
-
cancel=True, taskset=taskset, taskgroup_name="Lifespan"
|
|
54
|
-
) as tg:
|
|
55
|
-
tg.create_task(metadata_loop())
|
|
56
|
-
if config.N_JOBS_PER_WORKER > 0:
|
|
57
|
-
tg.create_task(queue())
|
|
58
|
-
if (
|
|
59
|
-
with_cron_scheduler
|
|
60
|
-
and config.FF_CRONS_ENABLED
|
|
61
|
-
and plus_features_enabled()
|
|
62
|
-
):
|
|
63
|
-
tg.create_task(cron_scheduler())
|
|
64
|
-
store = Store()
|
|
65
|
-
tg.create_task(Store().start_ttl_sweeper())
|
|
66
|
-
tg.create_task(thread_ttl_sweep_loop())
|
|
67
|
-
var_child_runnable_config.set({CONF: {CONFIG_KEY_STORE: store}})
|
|
68
|
-
|
|
69
|
-
yield
|
|
70
|
-
finally:
|
|
71
|
-
await stop_ui_bundler()
|
|
72
|
-
await stop_remote_graphs()
|
|
73
|
-
await stop_http_client()
|
|
74
|
-
await stop_pool()
|
langgraph_storage/checkpoint.py
DELETED
|
@@ -1,123 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import os
|
|
3
|
-
import uuid
|
|
4
|
-
|
|
5
|
-
from langchain_core.runnables import RunnableConfig
|
|
6
|
-
from langgraph.checkpoint.base import (
|
|
7
|
-
Checkpoint,
|
|
8
|
-
CheckpointMetadata,
|
|
9
|
-
CheckpointTuple,
|
|
10
|
-
SerializerProtocol,
|
|
11
|
-
)
|
|
12
|
-
from langgraph.checkpoint.memory import MemorySaver, PersistentDict
|
|
13
|
-
|
|
14
|
-
from langgraph_api.serde import Serializer
|
|
15
|
-
|
|
16
|
-
logger = logging.getLogger(__name__)
|
|
17
|
-
|
|
18
|
-
_EXCLUDED_KEYS = {"checkpoint_ns", "checkpoint_id", "run_id", "thread_id"}
|
|
19
|
-
DISABLE_FILE_PERSISTENCE = (
|
|
20
|
-
os.getenv("LANGGRAPH_DISABLE_FILE_PERSISTENCE", "false").lower() == "true"
|
|
21
|
-
)
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
class InMemorySaver(MemorySaver):
|
|
25
|
-
def __init__(
|
|
26
|
-
self,
|
|
27
|
-
*,
|
|
28
|
-
serde: SerializerProtocol | None = None,
|
|
29
|
-
) -> None:
|
|
30
|
-
self.filename = os.path.join(".langgraph_api", ".langgraph_checkpoint.")
|
|
31
|
-
i = 0
|
|
32
|
-
|
|
33
|
-
def factory(*args):
|
|
34
|
-
nonlocal i
|
|
35
|
-
i += 1
|
|
36
|
-
|
|
37
|
-
thisfname = self.filename + str(i) + ".pckl"
|
|
38
|
-
d = PersistentDict(*args, filename=thisfname)
|
|
39
|
-
if not os.path.exists(".langgraph_api"):
|
|
40
|
-
os.mkdir(".langgraph_api")
|
|
41
|
-
try:
|
|
42
|
-
d.load()
|
|
43
|
-
except FileNotFoundError:
|
|
44
|
-
pass
|
|
45
|
-
except ModuleNotFoundError:
|
|
46
|
-
logger.error(
|
|
47
|
-
"Unable to load cached data - your code has changed in a way that's incompatible with the cache."
|
|
48
|
-
"\nThis usually happens when you've:"
|
|
49
|
-
"\n - Renamed or moved classes"
|
|
50
|
-
"\n - Changed class structures"
|
|
51
|
-
"\n - Pulled updates that modified class definitions in a way that's incompatible with the cache"
|
|
52
|
-
"\n\nRemoving invalid cache data stored at path: .langgraph_api"
|
|
53
|
-
)
|
|
54
|
-
os.remove(self.filename)
|
|
55
|
-
except Exception as e:
|
|
56
|
-
logger.error("Failed to load cached data: %s", str(e))
|
|
57
|
-
os.remove(self.filename)
|
|
58
|
-
return d
|
|
59
|
-
|
|
60
|
-
super().__init__(
|
|
61
|
-
serde=serde if serde is not None else Serializer(),
|
|
62
|
-
factory=factory if not DISABLE_FILE_PERSISTENCE else None,
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
def put(
|
|
66
|
-
self,
|
|
67
|
-
config: RunnableConfig,
|
|
68
|
-
checkpoint: Checkpoint,
|
|
69
|
-
metadata: CheckpointMetadata,
|
|
70
|
-
new_versions: dict[str, str | int | float],
|
|
71
|
-
) -> RunnableConfig:
|
|
72
|
-
# TODO: Should this be done in OSS as well?
|
|
73
|
-
metadata = {
|
|
74
|
-
**{
|
|
75
|
-
k: v
|
|
76
|
-
for k, v in config["configurable"].items()
|
|
77
|
-
if not k.startswith("__") and k not in _EXCLUDED_KEYS
|
|
78
|
-
},
|
|
79
|
-
**config.get("metadata", {}),
|
|
80
|
-
**metadata,
|
|
81
|
-
}
|
|
82
|
-
if not isinstance(checkpoint["id"], uuid.UUID):
|
|
83
|
-
# Avoid type inconsistencies
|
|
84
|
-
checkpoint = checkpoint.copy()
|
|
85
|
-
checkpoint["id"] = str(checkpoint["id"])
|
|
86
|
-
return super().put(config, checkpoint, metadata, new_versions)
|
|
87
|
-
|
|
88
|
-
def get_tuple(self, config: RunnableConfig) -> CheckpointTuple | None:
|
|
89
|
-
if isinstance(config["configurable"].get("checkpoint_id"), uuid.UUID):
|
|
90
|
-
# Avoid type inconsistencies....
|
|
91
|
-
config = config.copy()
|
|
92
|
-
|
|
93
|
-
config["configurable"] = {
|
|
94
|
-
**config["configurable"],
|
|
95
|
-
"checkpoint_id": str(config["configurable"]["checkpoint_id"]),
|
|
96
|
-
}
|
|
97
|
-
return super().get_tuple(config)
|
|
98
|
-
|
|
99
|
-
def clear(self):
|
|
100
|
-
self.storage.clear()
|
|
101
|
-
self.writes.clear()
|
|
102
|
-
for suffix in ["1", "2"]:
|
|
103
|
-
file_path = f"{self.filename}{suffix}.pckl"
|
|
104
|
-
if os.path.exists(file_path):
|
|
105
|
-
os.remove(file_path)
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
MEMORY = InMemorySaver()
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
def Checkpointer(*args, unpack_hook=None, **kwargs):
|
|
112
|
-
if unpack_hook is not None:
|
|
113
|
-
saver = InMemorySaver(
|
|
114
|
-
serde=Serializer(__unpack_ext_hook__=unpack_hook), **kwargs
|
|
115
|
-
)
|
|
116
|
-
saver.writes = MEMORY.writes
|
|
117
|
-
saver.blobs = MEMORY.blobs
|
|
118
|
-
saver.storage = MEMORY.storage
|
|
119
|
-
return saver
|
|
120
|
-
return MEMORY
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
__all__ = ["Checkpointer"]
|
langgraph_storage/database.py
DELETED
|
@@ -1,200 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import os
|
|
3
|
-
import uuid
|
|
4
|
-
from collections import defaultdict
|
|
5
|
-
from collections.abc import AsyncIterator
|
|
6
|
-
from contextlib import asynccontextmanager
|
|
7
|
-
from datetime import datetime
|
|
8
|
-
from typing import Any, NotRequired, TypedDict
|
|
9
|
-
from uuid import UUID
|
|
10
|
-
|
|
11
|
-
import structlog
|
|
12
|
-
from langgraph.checkpoint.memory import PersistentDict
|
|
13
|
-
|
|
14
|
-
from langgraph_api import config as langgraph_config
|
|
15
|
-
from langgraph_api.utils import AsyncConnectionProto
|
|
16
|
-
from langgraph_storage import store
|
|
17
|
-
from langgraph_storage.inmem_stream import start_stream, stop_stream
|
|
18
|
-
|
|
19
|
-
logger = structlog.stdlib.get_logger(__name__)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class Assistant(TypedDict):
|
|
23
|
-
assistant_id: UUID
|
|
24
|
-
graph_id: str
|
|
25
|
-
name: str
|
|
26
|
-
description: str | None
|
|
27
|
-
created_at: NotRequired[datetime]
|
|
28
|
-
updated_at: NotRequired[datetime]
|
|
29
|
-
config: dict[str, Any]
|
|
30
|
-
metadata: dict[str, Any]
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
class Thread(TypedDict):
|
|
34
|
-
thread_id: UUID
|
|
35
|
-
created_at: NotRequired[datetime]
|
|
36
|
-
updated_at: NotRequired[datetime]
|
|
37
|
-
metadata: dict[str, Any]
|
|
38
|
-
status: str
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
class Run(TypedDict):
|
|
42
|
-
run_id: UUID
|
|
43
|
-
thread_id: UUID
|
|
44
|
-
assistant_id: UUID
|
|
45
|
-
created_at: NotRequired[datetime]
|
|
46
|
-
updated_at: NotRequired[datetime]
|
|
47
|
-
metadata: dict[str, Any]
|
|
48
|
-
status: str
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
class RunEvent(TypedDict):
|
|
52
|
-
event_id: UUID
|
|
53
|
-
run_id: UUID
|
|
54
|
-
received_at: NotRequired[datetime]
|
|
55
|
-
span_id: UUID
|
|
56
|
-
event: str
|
|
57
|
-
name: str
|
|
58
|
-
tags: list[Any]
|
|
59
|
-
data: dict[str, Any]
|
|
60
|
-
metadata: dict[str, Any]
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
class AssistantVersion(TypedDict):
|
|
64
|
-
assistant_id: UUID
|
|
65
|
-
version: int
|
|
66
|
-
graph_id: str
|
|
67
|
-
config: dict[str, Any]
|
|
68
|
-
metadata: dict[str, Any]
|
|
69
|
-
created_at: NotRequired[datetime]
|
|
70
|
-
name: str
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class GlobalStore(PersistentDict):
|
|
74
|
-
def __init__(self, *args: Any, filename: str, **kwargs: Any) -> None:
|
|
75
|
-
super().__init__(*args, filename=filename, **kwargs)
|
|
76
|
-
self.clear()
|
|
77
|
-
|
|
78
|
-
def clear(self):
|
|
79
|
-
assistants = self.get("assistants", [])
|
|
80
|
-
super().clear()
|
|
81
|
-
self["runs"] = []
|
|
82
|
-
self["threads"] = []
|
|
83
|
-
self["assistants"] = [
|
|
84
|
-
a for a in assistants if a["metadata"].get("created_by") == "system"
|
|
85
|
-
]
|
|
86
|
-
self["assistant_versions"] = []
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
OPS_FILENAME = os.path.join(".langgraph_api", ".langgraph_ops.pckl")
|
|
90
|
-
RETRY_COUNTER_FILENAME = os.path.join(".langgraph_api", ".langgraph_retry_counter.pckl")
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
class InMemoryRetryCounter:
|
|
94
|
-
def __init__(self):
|
|
95
|
-
self._counters: dict[uuid.UUID, int] = PersistentDict(
|
|
96
|
-
int, filename=RETRY_COUNTER_FILENAME
|
|
97
|
-
)
|
|
98
|
-
self._locks: dict[uuid.UUID, asyncio.Lock] = defaultdict(asyncio.Lock)
|
|
99
|
-
|
|
100
|
-
async def increment(self, run_id: uuid.UUID) -> int:
|
|
101
|
-
async with self._locks[run_id]:
|
|
102
|
-
self._counters[run_id] += 1
|
|
103
|
-
return self._counters[run_id]
|
|
104
|
-
|
|
105
|
-
def close(self):
|
|
106
|
-
self._counters.close()
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
# Global retry counter for in-memory implementation
|
|
110
|
-
GLOBAL_RETRY_COUNTER = InMemoryRetryCounter()
|
|
111
|
-
GLOBAL_STORE = GlobalStore(filename=OPS_FILENAME)
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
class InMemConnectionProto:
|
|
115
|
-
def __init__(self):
|
|
116
|
-
self.filename = OPS_FILENAME
|
|
117
|
-
self.store = GLOBAL_STORE
|
|
118
|
-
self.retry_counter = GLOBAL_RETRY_COUNTER
|
|
119
|
-
self.can_execute = False
|
|
120
|
-
|
|
121
|
-
@asynccontextmanager
|
|
122
|
-
async def pipeline(self):
|
|
123
|
-
yield None
|
|
124
|
-
|
|
125
|
-
async def execute(self, query: str, *args, **kwargs):
|
|
126
|
-
return None
|
|
127
|
-
|
|
128
|
-
def clear(self):
|
|
129
|
-
self.store.clear()
|
|
130
|
-
keys = list(self.retry_counter._counters)
|
|
131
|
-
for key in keys:
|
|
132
|
-
del self.retry_counter._counters[key]
|
|
133
|
-
keys = list(self.retry_counter._locks)
|
|
134
|
-
for key in keys:
|
|
135
|
-
del self.retry_counter._locks[key]
|
|
136
|
-
if os.path.exists(self.filename):
|
|
137
|
-
os.remove(self.filename)
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
@asynccontextmanager
|
|
141
|
-
async def connect(*, __test__: bool = False) -> AsyncIterator[AsyncConnectionProto]:
|
|
142
|
-
yield InMemConnectionProto()
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
async def start_pool() -> None:
|
|
146
|
-
if store._STORE_CONFIG is None:
|
|
147
|
-
if langgraph_config.STORE_CONFIG:
|
|
148
|
-
config_ = langgraph_config.STORE_CONFIG
|
|
149
|
-
store.set_store_config(config_)
|
|
150
|
-
|
|
151
|
-
if not os.path.exists(".langgraph_api"):
|
|
152
|
-
os.mkdir(".langgraph_api")
|
|
153
|
-
if os.path.exists(OPS_FILENAME):
|
|
154
|
-
try:
|
|
155
|
-
GLOBAL_STORE.load()
|
|
156
|
-
except ModuleNotFoundError:
|
|
157
|
-
logger.error(
|
|
158
|
-
"Unable to load cached data - your code has changed in a way that's incompatible with the cache."
|
|
159
|
-
"\nThis usually happens when you've:"
|
|
160
|
-
"\n - Renamed or moved classes"
|
|
161
|
-
"\n - Changed class structures"
|
|
162
|
-
"\n - Pulled updates that modified class definitions in a way that's incompatible with the cache"
|
|
163
|
-
"\n\nRemoving invalid cache data stored at path: .langgraph_api"
|
|
164
|
-
)
|
|
165
|
-
await asyncio.to_thread(os.remove, OPS_FILENAME)
|
|
166
|
-
await asyncio.to_thread(os.remove, RETRY_COUNTER_FILENAME)
|
|
167
|
-
except Exception as e:
|
|
168
|
-
logger.error("Failed to load cached data: %s", str(e))
|
|
169
|
-
await asyncio.to_thread(os.remove, OPS_FILENAME)
|
|
170
|
-
await asyncio.to_thread(os.remove, RETRY_COUNTER_FILENAME)
|
|
171
|
-
for k in ["runs", "threads", "assistants", "assistant_versions"]:
|
|
172
|
-
if not GLOBAL_STORE.get(k):
|
|
173
|
-
GLOBAL_STORE[k] = []
|
|
174
|
-
for k in ["crons"]:
|
|
175
|
-
if not GLOBAL_STORE.get(k):
|
|
176
|
-
GLOBAL_STORE[k] = {}
|
|
177
|
-
await start_stream()
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
async def stop_pool() -> None:
|
|
181
|
-
await asyncio.to_thread(GLOBAL_STORE.close)
|
|
182
|
-
await asyncio.to_thread(GLOBAL_RETRY_COUNTER.close)
|
|
183
|
-
from langgraph_storage.checkpoint import Checkpointer
|
|
184
|
-
from langgraph_storage.store import STORE
|
|
185
|
-
|
|
186
|
-
await asyncio.to_thread(STORE.close)
|
|
187
|
-
|
|
188
|
-
async with Checkpointer():
|
|
189
|
-
pass
|
|
190
|
-
await stop_stream()
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
async def healthcheck() -> None:
|
|
194
|
-
# What could possibly go wrong?
|
|
195
|
-
pass
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
def pool_stats() -> dict[str, dict[str, int]]:
|
|
199
|
-
# TODO??
|
|
200
|
-
return {}
|
|
@@ -1,109 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import logging
|
|
3
|
-
from collections import defaultdict
|
|
4
|
-
from dataclasses import dataclass
|
|
5
|
-
from uuid import UUID
|
|
6
|
-
|
|
7
|
-
logger = logging.getLogger(__name__)
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
@dataclass
|
|
11
|
-
class Message:
|
|
12
|
-
topic: bytes
|
|
13
|
-
data: bytes
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class ContextQueue(asyncio.Queue):
|
|
17
|
-
"""Queue that supports async context manager protocol"""
|
|
18
|
-
|
|
19
|
-
async def __aenter__(self):
|
|
20
|
-
return self
|
|
21
|
-
|
|
22
|
-
async def __aexit__(
|
|
23
|
-
self,
|
|
24
|
-
exc_type: type[BaseException] | None,
|
|
25
|
-
exc_val: BaseException | None,
|
|
26
|
-
exc_tb: object | None,
|
|
27
|
-
) -> None:
|
|
28
|
-
# Clear the queue
|
|
29
|
-
while not self.empty():
|
|
30
|
-
try:
|
|
31
|
-
self.get_nowait()
|
|
32
|
-
except asyncio.QueueEmpty:
|
|
33
|
-
break
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class StreamManager:
|
|
37
|
-
def __init__(self):
|
|
38
|
-
self.queues = defaultdict(list) # Dict[UUID, List[asyncio.Queue]]
|
|
39
|
-
self.control_queues = defaultdict(list)
|
|
40
|
-
|
|
41
|
-
def get_queues(self, run_id: UUID) -> list[asyncio.Queue]:
|
|
42
|
-
return self.queues[run_id]
|
|
43
|
-
|
|
44
|
-
async def put(self, run_id: UUID, message: Message) -> None:
|
|
45
|
-
topic = message.topic.decode()
|
|
46
|
-
if "control" in topic:
|
|
47
|
-
self.control_queues[run_id].append(message)
|
|
48
|
-
queues = self.queues.get(run_id, [])
|
|
49
|
-
coros = [queue.put(message) for queue in queues]
|
|
50
|
-
results = await asyncio.gather(*coros, return_exceptions=True)
|
|
51
|
-
for result in results:
|
|
52
|
-
if isinstance(result, Exception):
|
|
53
|
-
logger.exception(f"Failed to put message in queue: {result}")
|
|
54
|
-
|
|
55
|
-
async def add_queue(self, run_id: UUID) -> asyncio.Queue:
|
|
56
|
-
queue = ContextQueue()
|
|
57
|
-
self.queues[run_id].append(queue)
|
|
58
|
-
for control_msg in self.control_queues[run_id]:
|
|
59
|
-
try:
|
|
60
|
-
await queue.put(control_msg)
|
|
61
|
-
except Exception:
|
|
62
|
-
logger.exception(
|
|
63
|
-
f"Failed to put control message in queue: {control_msg}"
|
|
64
|
-
)
|
|
65
|
-
|
|
66
|
-
return queue
|
|
67
|
-
|
|
68
|
-
async def remove_queue(self, run_id: UUID, queue: asyncio.Queue):
|
|
69
|
-
if run_id in self.queues:
|
|
70
|
-
self.queues[run_id].remove(queue)
|
|
71
|
-
if not self.queues[run_id]:
|
|
72
|
-
del self.queues[run_id]
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
# Global instance
|
|
76
|
-
stream_manager = StreamManager()
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
async def start_stream() -> None:
|
|
80
|
-
"""Initialize the queue system.
|
|
81
|
-
In this in-memory implementation, we just need to ensure we have a clean StreamManager instance.
|
|
82
|
-
"""
|
|
83
|
-
global stream_manager
|
|
84
|
-
stream_manager = StreamManager()
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
async def stop_stream() -> None:
|
|
88
|
-
"""Clean up the queue system.
|
|
89
|
-
Clear all queues and stored control messages."""
|
|
90
|
-
global stream_manager
|
|
91
|
-
|
|
92
|
-
# Send 'done' message to all active queues before clearing
|
|
93
|
-
for run_id in list(stream_manager.queues.keys()):
|
|
94
|
-
control_message = Message(topic=f"run:{run_id}:control".encode(), data=b"done")
|
|
95
|
-
|
|
96
|
-
for queue in stream_manager.queues[run_id]:
|
|
97
|
-
try:
|
|
98
|
-
await queue.put(control_message)
|
|
99
|
-
except (Exception, RuntimeError):
|
|
100
|
-
pass # Ignore errors during shutdown
|
|
101
|
-
|
|
102
|
-
# Clear all stored data
|
|
103
|
-
stream_manager.queues.clear()
|
|
104
|
-
stream_manager.control_queues.clear()
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
def get_stream_manager() -> StreamManager:
|
|
108
|
-
"""Get the global stream manager instance."""
|
|
109
|
-
return stream_manager
|