llama-deploy-appserver 0.3.0a23__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_deploy/appserver/app.py +12 -16
- llama_deploy/appserver/deployment.py +73 -3
- llama_deploy/appserver/settings.py +30 -0
- llama_deploy/appserver/workflow_store/agent_data_store.py +100 -0
- llama_deploy/appserver/workflow_store/keyed_lock.py +32 -0
- llama_deploy/appserver/workflow_store/lru_cache.py +49 -0
- {llama_deploy_appserver-0.3.0a23.dist-info → llama_deploy_appserver-0.3.1.dist-info}/METADATA +6 -5
- {llama_deploy_appserver-0.3.0a23.dist-info → llama_deploy_appserver-0.3.1.dist-info}/RECORD +9 -6
- {llama_deploy_appserver-0.3.0a23.dist-info → llama_deploy_appserver-0.3.1.dist-info}/WHEEL +0 -0
llama_deploy/appserver/app.py
CHANGED
|
@@ -7,7 +7,7 @@ import webbrowser
|
|
|
7
7
|
from contextlib import asynccontextmanager
|
|
8
8
|
from importlib.metadata import version
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
from typing import Any, AsyncGenerator, cast
|
|
10
|
+
from typing import Any, AsyncGenerator, Literal, cast
|
|
11
11
|
|
|
12
12
|
import uvicorn
|
|
13
13
|
from fastapi import FastAPI
|
|
@@ -40,7 +40,7 @@ from llama_deploy.appserver.workflow_loader import (
|
|
|
40
40
|
)
|
|
41
41
|
from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
|
|
42
42
|
from prometheus_fastapi_instrumentator import Instrumentator
|
|
43
|
-
from starlette.
|
|
43
|
+
from starlette.applications import Starlette
|
|
44
44
|
from workflows.server import WorkflowServer
|
|
45
45
|
|
|
46
46
|
from .deployment import Deployment
|
|
@@ -66,19 +66,7 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, Any]:
|
|
|
66
66
|
deployment = Deployment(workflows)
|
|
67
67
|
base_router = create_base_router(config.name)
|
|
68
68
|
deploy_router = create_deployments_router(config.name, deployment)
|
|
69
|
-
server = deployment.
|
|
70
|
-
|
|
71
|
-
for route in server.app.routes:
|
|
72
|
-
# add routes directly rather than mounting, so that we can share a root
|
|
73
|
-
if isinstance(route, Route):
|
|
74
|
-
app.add_api_route(
|
|
75
|
-
f"/deployments/{config.name}{route.path}",
|
|
76
|
-
route.endpoint,
|
|
77
|
-
name=f"{config.name}_{route.name}",
|
|
78
|
-
methods=route.methods,
|
|
79
|
-
include_in_schema=True, # change to false when schemas are added to workflow server
|
|
80
|
-
tags=["workflows"],
|
|
81
|
-
)
|
|
69
|
+
server = deployment.mount_workflow_server(app)
|
|
82
70
|
|
|
83
71
|
app.include_router(base_router)
|
|
84
72
|
app.include_router(deploy_router)
|
|
@@ -106,7 +94,9 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, Any]:
|
|
|
106
94
|
return RedirectResponse(f"/deployments/{config.name}/docs")
|
|
107
95
|
|
|
108
96
|
apiserver_state.state("running")
|
|
109
|
-
|
|
97
|
+
# terrible sad cludge
|
|
98
|
+
async with server._lifespan(cast(Starlette, {})):
|
|
99
|
+
yield
|
|
110
100
|
|
|
111
101
|
apiserver_state.state("stopped")
|
|
112
102
|
|
|
@@ -242,6 +232,9 @@ def start_server_in_target_venv(
|
|
|
242
232
|
ui_port: int | None = None,
|
|
243
233
|
log_level: str | None = None,
|
|
244
234
|
log_format: str | None = None,
|
|
235
|
+
persistence: Literal["memory", "local", "cloud"] | None = None,
|
|
236
|
+
local_persistence_path: str | None = None,
|
|
237
|
+
cloud_persistence_name: str | None = None,
|
|
245
238
|
) -> None:
|
|
246
239
|
# Ensure settings reflect the intended working directory before computing paths
|
|
247
240
|
|
|
@@ -250,6 +243,9 @@ def start_server_in_target_venv(
|
|
|
250
243
|
deployment_file_path=deployment_file,
|
|
251
244
|
reload=reload,
|
|
252
245
|
proxy_ui=proxy_ui,
|
|
246
|
+
persistence=persistence,
|
|
247
|
+
local_persistence_path=local_persistence_path,
|
|
248
|
+
cloud_persistence_name=cloud_persistence_name,
|
|
253
249
|
)
|
|
254
250
|
base_dir = cwd or Path.cwd()
|
|
255
251
|
path = settings.resolved_config_parent.relative_to(base_dir)
|
|
@@ -1,13 +1,24 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import json
|
|
3
3
|
import logging
|
|
4
|
+
import os
|
|
5
|
+
from pathlib import Path
|
|
4
6
|
from typing import Any, Tuple
|
|
5
7
|
|
|
8
|
+
from fastapi import FastAPI
|
|
9
|
+
from fastapi.responses import RedirectResponse
|
|
10
|
+
from llama_deploy.appserver.deployment_config_parser import get_deployment_config
|
|
11
|
+
from llama_deploy.appserver.settings import ApiserverSettings, settings
|
|
6
12
|
from llama_deploy.appserver.types import generate_id
|
|
7
13
|
from llama_deploy.appserver.workflow_loader import DEFAULT_SERVICE_ID
|
|
14
|
+
from llama_deploy.appserver.workflow_store.agent_data_store import AgentDataStore
|
|
15
|
+
from llama_deploy.core.deployment_config import DeploymentConfig
|
|
16
|
+
from starlette.routing import Route
|
|
17
|
+
from starlette.staticfiles import StaticFiles
|
|
8
18
|
from workflows import Context, Workflow
|
|
9
19
|
from workflows.handler import WorkflowHandler
|
|
10
|
-
from workflows.server import WorkflowServer
|
|
20
|
+
from workflows.server import SqliteWorkflowStore, WorkflowServer
|
|
21
|
+
from workflows.server.abstract_workflow_store import EmptyWorkflowStore
|
|
11
22
|
|
|
12
23
|
logger = logging.getLogger()
|
|
13
24
|
|
|
@@ -80,8 +91,67 @@ class Deployment:
|
|
|
80
91
|
self._handler_inputs[handler_id] = json.dumps(run_kwargs)
|
|
81
92
|
return handler_id, session_id
|
|
82
93
|
|
|
83
|
-
def create_workflow_server(
|
|
84
|
-
|
|
94
|
+
def create_workflow_server(
|
|
95
|
+
self, deployment_config: DeploymentConfig, settings: ApiserverSettings
|
|
96
|
+
) -> WorkflowServer:
|
|
97
|
+
persistence = EmptyWorkflowStore()
|
|
98
|
+
if settings.persistence == "local":
|
|
99
|
+
logger.info("Using local sqlite persistence for workflows")
|
|
100
|
+
persistence = SqliteWorkflowStore(
|
|
101
|
+
settings.local_persistence_path or "workflows.db"
|
|
102
|
+
)
|
|
103
|
+
elif settings.persistence == "cloud" or (
|
|
104
|
+
# default to cloud if api key is present to use
|
|
105
|
+
settings.persistence is None and os.getenv("LLAMA_CLOUD_API_KEY")
|
|
106
|
+
):
|
|
107
|
+
logger.info("Using agent data cloud persistence for workflows")
|
|
108
|
+
persistence = AgentDataStore(deployment_config, settings)
|
|
109
|
+
else:
|
|
110
|
+
logger.info("Not persisting workflows")
|
|
111
|
+
server = WorkflowServer(workflow_store=persistence)
|
|
85
112
|
for service_id, workflow in self._workflow_services.items():
|
|
86
113
|
server.add_workflow(service_id, workflow)
|
|
87
114
|
return server
|
|
115
|
+
|
|
116
|
+
def mount_workflow_server(self, app: FastAPI) -> WorkflowServer:
|
|
117
|
+
config = get_deployment_config()
|
|
118
|
+
server = self.create_workflow_server(config, settings)
|
|
119
|
+
|
|
120
|
+
for route in server.app.routes:
|
|
121
|
+
# add routes directly rather than mounting, so that we can share a root (only one ASGI app can be mounted at a path)
|
|
122
|
+
if isinstance(route, Route):
|
|
123
|
+
logger.info(f"Adding route {route.path} to app")
|
|
124
|
+
app.add_api_route(
|
|
125
|
+
f"/deployments/{config.name}{route.path}",
|
|
126
|
+
route.endpoint,
|
|
127
|
+
name=f"{config.name}_{route.name}",
|
|
128
|
+
methods=route.methods,
|
|
129
|
+
include_in_schema=True, # change to false when schemas are added to workflow server
|
|
130
|
+
tags=["workflows"],
|
|
131
|
+
)
|
|
132
|
+
# kludge, temporarily make it accessible to the debugger, which hard codes
|
|
133
|
+
app.add_api_route(
|
|
134
|
+
f"{route.path}",
|
|
135
|
+
route.endpoint,
|
|
136
|
+
name=f"_kludge_{config.name}_{route.name}",
|
|
137
|
+
methods=route.methods,
|
|
138
|
+
include_in_schema=False,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# be defensive since this is external and private
|
|
142
|
+
server_debugger = getattr(server, "_assets_path", None)
|
|
143
|
+
if isinstance(server_debugger, Path):
|
|
144
|
+
|
|
145
|
+
@app.get(f"/deployments/{config.name}/debugger", include_in_schema=False)
|
|
146
|
+
@app.get(f"/deployments/{config.name}/debugger/", include_in_schema=False)
|
|
147
|
+
def redirect_to_debugger() -> RedirectResponse:
|
|
148
|
+
return RedirectResponse(
|
|
149
|
+
f"/deployments/{config.name}/debugger/index.html"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
app.mount(
|
|
153
|
+
f"/deployments/{config.name}/debugger",
|
|
154
|
+
StaticFiles(directory=server_debugger),
|
|
155
|
+
name=f"debugger-{config.name}",
|
|
156
|
+
)
|
|
157
|
+
return server
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from pathlib import Path
|
|
3
|
+
from typing import Literal
|
|
3
4
|
|
|
4
5
|
from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
|
|
5
6
|
from llama_deploy.core.deployment_config import resolve_config_parent
|
|
@@ -73,6 +74,19 @@ class ApiserverSettings(BaseSettings):
|
|
|
73
74
|
description="If true, reload the workflow modules, for use in a dev server environment",
|
|
74
75
|
)
|
|
75
76
|
|
|
77
|
+
persistence: Literal["memory", "local", "cloud"] | None = Field(
|
|
78
|
+
default=None,
|
|
79
|
+
description="The persistence mode to use for the workflow server",
|
|
80
|
+
)
|
|
81
|
+
local_persistence_path: str | None = Field(
|
|
82
|
+
default=None,
|
|
83
|
+
description="The path to the sqlite database to use for the workflow server",
|
|
84
|
+
)
|
|
85
|
+
cloud_persistence_name: str | None = Field(
|
|
86
|
+
default=None,
|
|
87
|
+
description="Agent Data deployment name to use for workflow persistence. May optionally include a `:` delimited collection name, e.g. 'my_agent:my_collection'. Leave none to use the current deployment name. Recommended to override with _public if running locally, and specify a collection name",
|
|
88
|
+
)
|
|
89
|
+
|
|
76
90
|
@property
|
|
77
91
|
def resolved_config_parent(self) -> Path:
|
|
78
92
|
return resolve_config_parent(self.app_root, self.deployment_file_path)
|
|
@@ -86,6 +100,9 @@ def configure_settings(
|
|
|
86
100
|
deployment_file_path: Path | None = None,
|
|
87
101
|
app_root: Path | None = None,
|
|
88
102
|
reload: bool | None = None,
|
|
103
|
+
persistence: Literal["memory", "local", "cloud"] | None = None,
|
|
104
|
+
local_persistence_path: str | None = None,
|
|
105
|
+
cloud_persistence_name: str | None = None,
|
|
89
106
|
) -> None:
|
|
90
107
|
if proxy_ui is not None:
|
|
91
108
|
settings.proxy_ui = proxy_ui
|
|
@@ -101,3 +118,16 @@ def configure_settings(
|
|
|
101
118
|
if reload is not None:
|
|
102
119
|
settings.reload = reload
|
|
103
120
|
os.environ["LLAMA_DEPLOY_APISERVER_RELOAD"] = "true" if reload else "false"
|
|
121
|
+
if persistence is not None:
|
|
122
|
+
settings.persistence = persistence
|
|
123
|
+
os.environ["LLAMA_DEPLOY_APISERVER_PERSISTENCE"] = persistence
|
|
124
|
+
if local_persistence_path is not None:
|
|
125
|
+
settings.local_persistence_path = local_persistence_path
|
|
126
|
+
os.environ["LLAMA_DEPLOY_APISERVER_LOCAL_PERSISTENCE_PATH"] = (
|
|
127
|
+
local_persistence_path
|
|
128
|
+
)
|
|
129
|
+
if cloud_persistence_name is not None:
|
|
130
|
+
settings.cloud_persistence_name = cloud_persistence_name
|
|
131
|
+
os.environ["LLAMA_DEPLOY_APISERVER_CLOUD_PERSISTENCE_NAME"] = (
|
|
132
|
+
cloud_persistence_name
|
|
133
|
+
)
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from llama_cloud.client import AsyncLlamaCloud, httpx
|
|
6
|
+
from llama_cloud_services.beta.agent_data import AsyncAgentDataClient
|
|
7
|
+
from llama_deploy.appserver.settings import ApiserverSettings
|
|
8
|
+
from llama_deploy.core.deployment_config import DeploymentConfig
|
|
9
|
+
from typing_extensions import override
|
|
10
|
+
from workflows.server import AbstractWorkflowStore, HandlerQuery, PersistentHandler
|
|
11
|
+
|
|
12
|
+
from .keyed_lock import AsyncKeyedLock
|
|
13
|
+
from .lru_cache import LRUCache
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AgentDataStore(AbstractWorkflowStore):
|
|
19
|
+
def __init__(
|
|
20
|
+
self, settings: DeploymentConfig, server_settings: ApiserverSettings
|
|
21
|
+
) -> None:
|
|
22
|
+
agent_url_id: str | None = server_settings.cloud_persistence_name
|
|
23
|
+
collection = "workflow_contexts"
|
|
24
|
+
if agent_url_id is not None:
|
|
25
|
+
parts = agent_url_id.split(":")
|
|
26
|
+
if len(parts) > 1:
|
|
27
|
+
collection = parts[1]
|
|
28
|
+
agent_url_id = parts[0]
|
|
29
|
+
else:
|
|
30
|
+
agent_url_id = settings.name
|
|
31
|
+
|
|
32
|
+
self.settings = settings
|
|
33
|
+
project_id = os.getenv("LLAMA_DEPLOY_PROJECT_ID")
|
|
34
|
+
self.client = AsyncAgentDataClient(
|
|
35
|
+
type=PersistentHandler,
|
|
36
|
+
collection=collection,
|
|
37
|
+
agent_url_id=agent_url_id,
|
|
38
|
+
client=AsyncLlamaCloud(
|
|
39
|
+
base_url=os.getenv("LLAMA_CLOUD_BASE_URL"),
|
|
40
|
+
token=os.getenv("LLAMA_CLOUD_API_KEY"),
|
|
41
|
+
httpx_client=httpx.AsyncClient(
|
|
42
|
+
headers={"Project-Id": project_id} if project_id else None,
|
|
43
|
+
),
|
|
44
|
+
),
|
|
45
|
+
)
|
|
46
|
+
self.lock = AsyncKeyedLock()
|
|
47
|
+
# workflow id -> agent data id
|
|
48
|
+
self.cache = LRUCache[str, str](maxsize=1024)
|
|
49
|
+
|
|
50
|
+
@override
|
|
51
|
+
async def query(self, query: HandlerQuery) -> List[PersistentHandler]:
|
|
52
|
+
filters = {}
|
|
53
|
+
if query.handler_id_in is not None:
|
|
54
|
+
filters["handler_id"] = {
|
|
55
|
+
"includes": query.handler_id_in,
|
|
56
|
+
}
|
|
57
|
+
if query.workflow_name_in is not None:
|
|
58
|
+
filters["workflow_name"] = {
|
|
59
|
+
"includes": query.workflow_name_in,
|
|
60
|
+
}
|
|
61
|
+
if query.status_in is not None:
|
|
62
|
+
filters["status"] = {
|
|
63
|
+
"includes": query.status_in,
|
|
64
|
+
}
|
|
65
|
+
results = await self.client.search(
|
|
66
|
+
filter=filters,
|
|
67
|
+
page_size=1000,
|
|
68
|
+
)
|
|
69
|
+
return [x.data for x in results.items]
|
|
70
|
+
|
|
71
|
+
@override
|
|
72
|
+
async def update(self, handler: PersistentHandler) -> None:
|
|
73
|
+
async with self.lock.acquire(handler.handler_id):
|
|
74
|
+
id = await self._get_item_id(handler)
|
|
75
|
+
if id is None:
|
|
76
|
+
item = await self.client.create_item(
|
|
77
|
+
data=handler,
|
|
78
|
+
)
|
|
79
|
+
if item.id is None:
|
|
80
|
+
raise ValueError(f"Failed to create handler {handler.handler_id}")
|
|
81
|
+
self.cache.set(handler.handler_id, item.id)
|
|
82
|
+
else:
|
|
83
|
+
await self.client.update_item(
|
|
84
|
+
item_id=id,
|
|
85
|
+
data=handler,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
async def _get_item_id(self, handler: PersistentHandler) -> str | None:
|
|
89
|
+
cached_id = self.cache.get(handler.handler_id, None)
|
|
90
|
+
if cached_id is not None:
|
|
91
|
+
return cached_id
|
|
92
|
+
results = await self.client.search(
|
|
93
|
+
filter={"handler_id": {"eq": handler.handler_id}},
|
|
94
|
+
page_size=1,
|
|
95
|
+
)
|
|
96
|
+
if not results.items:
|
|
97
|
+
return None
|
|
98
|
+
id = results.items[0].id
|
|
99
|
+
self.cache.set(handler.handler_id, id)
|
|
100
|
+
return id
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from collections import Counter
|
|
3
|
+
from contextlib import asynccontextmanager
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class AsyncKeyedLock:
|
|
7
|
+
def __init__(self):
|
|
8
|
+
self._locks: dict[str, asyncio.Lock] = {}
|
|
9
|
+
self._refcnt = Counter()
|
|
10
|
+
self._registry_lock = asyncio.Lock() # protects _locks/_refcnt
|
|
11
|
+
|
|
12
|
+
@asynccontextmanager
|
|
13
|
+
async def acquire(self, key: str):
|
|
14
|
+
async with self._registry_lock:
|
|
15
|
+
lock = self._locks.get(key)
|
|
16
|
+
if lock is None:
|
|
17
|
+
lock = asyncio.Lock()
|
|
18
|
+
self._locks[key] = lock
|
|
19
|
+
self._refcnt[key] += 1
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
await lock.acquire()
|
|
23
|
+
try:
|
|
24
|
+
yield
|
|
25
|
+
finally:
|
|
26
|
+
lock.release()
|
|
27
|
+
finally:
|
|
28
|
+
async with self._registry_lock:
|
|
29
|
+
self._refcnt[key] -= 1
|
|
30
|
+
if self._refcnt[key] == 0:
|
|
31
|
+
self._locks.pop(key, None)
|
|
32
|
+
del self._refcnt[key]
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from collections import OrderedDict
|
|
2
|
+
from typing import Generic, TypeVar, overload
|
|
3
|
+
|
|
4
|
+
K = TypeVar("K")
|
|
5
|
+
V = TypeVar("V")
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class LRUCache(Generic[K, V]):
|
|
9
|
+
def __init__(self, maxsize: int = 128):
|
|
10
|
+
self.maxsize = maxsize
|
|
11
|
+
self._store: OrderedDict[K, V] = OrderedDict()
|
|
12
|
+
|
|
13
|
+
@overload
|
|
14
|
+
def get(self, key: K) -> V | None: ...
|
|
15
|
+
|
|
16
|
+
@overload
|
|
17
|
+
def get(self, key: K, default: V) -> V: ...
|
|
18
|
+
|
|
19
|
+
def get(self, key: K, default: V | None = None) -> V | None:
|
|
20
|
+
if key not in self._store:
|
|
21
|
+
return default
|
|
22
|
+
# mark as recently used
|
|
23
|
+
value = self._store.pop(key)
|
|
24
|
+
self._store[key] = value
|
|
25
|
+
return value
|
|
26
|
+
|
|
27
|
+
def set(self, key: K, value: V):
|
|
28
|
+
if key in self._store:
|
|
29
|
+
# remove old so we can push to end
|
|
30
|
+
self._store.pop(key)
|
|
31
|
+
elif len(self._store) >= self.maxsize:
|
|
32
|
+
# evict least recently used (first item)
|
|
33
|
+
self._store.popitem(last=False)
|
|
34
|
+
self._store[key] = value
|
|
35
|
+
|
|
36
|
+
def __contains__(self, key: K) -> bool:
|
|
37
|
+
return key in self._store
|
|
38
|
+
|
|
39
|
+
def __getitem__(self, key: K) -> V:
|
|
40
|
+
return self.get(key)
|
|
41
|
+
|
|
42
|
+
def __setitem__(self, key: K, value: V):
|
|
43
|
+
self.set(key, value)
|
|
44
|
+
|
|
45
|
+
def __len__(self) -> int:
|
|
46
|
+
return len(self._store)
|
|
47
|
+
|
|
48
|
+
def __iter__(self):
|
|
49
|
+
return iter(self._store)
|
{llama_deploy_appserver-0.3.0a23.dist-info → llama_deploy_appserver-0.3.1.dist-info}/METADATA
RENAMED
|
@@ -1,22 +1,23 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: llama-deploy-appserver
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.1
|
|
4
4
|
Summary: Application server components for LlamaDeploy
|
|
5
|
-
Author: Massimiliano Pippi
|
|
6
|
-
Author-email: Massimiliano Pippi <mpippi@gmail.com>
|
|
5
|
+
Author: Massimiliano Pippi, Adrian Lyjak
|
|
6
|
+
Author-email: Massimiliano Pippi <mpippi@gmail.com>, Adrian Lyjak <adrianlyjak@gmail.com>
|
|
7
7
|
License: MIT
|
|
8
|
-
Requires-Dist: llama-index-workflows[server]>=2.0
|
|
8
|
+
Requires-Dist: llama-index-workflows[server]>=2.2.0
|
|
9
9
|
Requires-Dist: pydantic-settings>=2.10.1
|
|
10
10
|
Requires-Dist: uvicorn>=0.24.0
|
|
11
11
|
Requires-Dist: fastapi>=0.100.0
|
|
12
12
|
Requires-Dist: websockets>=12.0
|
|
13
|
-
Requires-Dist: llama-deploy-core>=0.3.
|
|
13
|
+
Requires-Dist: llama-deploy-core>=0.3.1,<0.4.0
|
|
14
14
|
Requires-Dist: httpx>=0.24.0,<1.0.0
|
|
15
15
|
Requires-Dist: prometheus-fastapi-instrumentator>=7.1.0
|
|
16
16
|
Requires-Dist: packaging>=25.0
|
|
17
17
|
Requires-Dist: structlog>=25.4.0
|
|
18
18
|
Requires-Dist: rich>=14.1.0
|
|
19
19
|
Requires-Dist: pyyaml>=6.0.2
|
|
20
|
+
Requires-Dist: llama-cloud-services>=0.6.60
|
|
20
21
|
Requires-Python: >=3.11, <4
|
|
21
22
|
Description-Content-Type: text/markdown
|
|
22
23
|
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
llama_deploy/appserver/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
|
|
2
|
-
llama_deploy/appserver/app.py,sha256=
|
|
2
|
+
llama_deploy/appserver/app.py,sha256=1150ac9d6b2e4bd0bbe4cc70a16249381887e86f49c68492a3ab61024d4567b7,9520
|
|
3
3
|
llama_deploy/appserver/bootstrap.py,sha256=fa32be007f18b4b3af92c878bac417416c9afb09b1beddf51b5cd73115e6b7c6,2453
|
|
4
4
|
llama_deploy/appserver/configure_logging.py,sha256=194dd1ebed3c1d9065d9174f7828d557a577eaac8fb0443b3102430b1f578c19,6329
|
|
5
5
|
llama_deploy/appserver/correlation_id.py,sha256=8ac5bc6160c707b93a9fb818b64dd369a4ef7a53f9f91a6b3d90c4cf446f7327,572
|
|
6
|
-
llama_deploy/appserver/deployment.py,sha256=
|
|
6
|
+
llama_deploy/appserver/deployment.py,sha256=c129ccc6e4c6899861c2e7d1e45ae77c5a9d48a9356f9cec4de923f58c7f293f,6306
|
|
7
7
|
llama_deploy/appserver/deployment_config_parser.py,sha256=e2b6c483203d96ab795c4e55df15c694c20458d5a03fab89c2b71e481291a2d3,510
|
|
8
8
|
llama_deploy/appserver/interrupts.py,sha256=14f262a0cedc00bb3aecd3d6c14c41ba0e88e7d2a6df02cd35b5bea1940822a2,1622
|
|
9
9
|
llama_deploy/appserver/process_utils.py,sha256=befee4918c6cf72082dca8bf807afb61ad3d6c83f01bc0c007594b47930570d8,6056
|
|
@@ -12,10 +12,13 @@ llama_deploy/appserver/routers/__init__.py,sha256=ee2d14ebf4b067c844947ed1cc9818
|
|
|
12
12
|
llama_deploy/appserver/routers/deployments.py,sha256=e7bafd72c1b4b809e5ad57442594a997c85ecab998b8430da65899faa910db1c,7572
|
|
13
13
|
llama_deploy/appserver/routers/status.py,sha256=2af74bc40e52dc5944af2df98c6a021fea7b0cfcda88b56ac124dc383120758c,282
|
|
14
14
|
llama_deploy/appserver/routers/ui_proxy.py,sha256=f63c36c201070594a4011320192d724b1c534d0ec655c49ba65c4e9911dbdd97,8633
|
|
15
|
-
llama_deploy/appserver/settings.py,sha256=
|
|
15
|
+
llama_deploy/appserver/settings.py,sha256=279dad9d80f4b54215cb8073bc46ee2beebfbc8ed75f40bccfbb387593f6975a,4984
|
|
16
16
|
llama_deploy/appserver/stats.py,sha256=1f3989f6705a6de3e4d61ee8cdd189fbe04a2c53ec5e720b2e5168acc331427f,691
|
|
17
17
|
llama_deploy/appserver/types.py,sha256=4edc991aafb6b8497f068d12387455df292da3ff8440223637641ab1632553ec,2133
|
|
18
18
|
llama_deploy/appserver/workflow_loader.py,sha256=c15890a00976e022edcdf2af04bf699c02fba020bb06c47960a4911e08255501,14146
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
19
|
+
llama_deploy/appserver/workflow_store/agent_data_store.py,sha256=7b8d1b8cb6f741ff631d668fc955ca76a82e8da0bf8a27ee3bc9a8ef71123701,3594
|
|
20
|
+
llama_deploy/appserver/workflow_store/keyed_lock.py,sha256=bb1504d9de09d51a8f60721cc77b14d4051ac5a897ace6f9d9cba494f068465e,950
|
|
21
|
+
llama_deploy/appserver/workflow_store/lru_cache.py,sha256=7511231b6aba81ea96044cf644cd9c1f11d78190b7b7f578b1b5a55e2c218f9f,1323
|
|
22
|
+
llama_deploy_appserver-0.3.1.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
|
|
23
|
+
llama_deploy_appserver-0.3.1.dist-info/METADATA,sha256=88e9cc386a7b625ebd34560670ac98573d41ad5f6ad379a228da9749ad41b1ef,974
|
|
24
|
+
llama_deploy_appserver-0.3.1.dist-info/RECORD,,
|
|
File without changes
|