llama-deploy-appserver 0.3.10__tar.gz → 0.3.12__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/PKG-INFO +3 -3
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/pyproject.toml +3 -3
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/app.py +2 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/deployment.py +0 -1
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/routers/ui_proxy.py +2 -1
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/settings.py +4 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/workflow_store/agent_data_store.py +30 -14
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/README.md +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/__init__.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/bootstrap.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/configure_logging.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/correlation_id.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/deployment_config_parser.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/interrupts.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/process_utils.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/py.typed +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/routers/__init__.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/routers/deployments.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/routers/status.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/stats.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/types.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/workflow_loader.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/workflow_store/keyed_lock.py +0 -0
- {llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/workflow_store/lru_cache.py +0 -0
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: llama-deploy-appserver
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.12
|
|
4
4
|
Summary: Application server components for LlamaDeploy
|
|
5
5
|
Author: Massimiliano Pippi, Adrian Lyjak
|
|
6
6
|
Author-email: Massimiliano Pippi <mpippi@gmail.com>, Adrian Lyjak <adrianlyjak@gmail.com>
|
|
7
7
|
License: MIT
|
|
8
|
-
Requires-Dist: llama-index-workflows[server]>=2.
|
|
8
|
+
Requires-Dist: llama-index-workflows[server]>=2.6.0
|
|
9
9
|
Requires-Dist: pydantic-settings>=2.10.1
|
|
10
10
|
Requires-Dist: fastapi>=0.100.0
|
|
11
11
|
Requires-Dist: websockets>=12.0
|
|
12
|
-
Requires-Dist: llama-deploy-core>=0.3.
|
|
12
|
+
Requires-Dist: llama-deploy-core>=0.3.12,<0.4.0
|
|
13
13
|
Requires-Dist: httpx>=0.24.0,<1.0.0
|
|
14
14
|
Requires-Dist: prometheus-fastapi-instrumentator>=7.1.0
|
|
15
15
|
Requires-Dist: packaging>=25.0
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "llama-deploy-appserver"
|
|
3
|
-
version = "0.3.
|
|
3
|
+
version = "0.3.12"
|
|
4
4
|
description = "Application server components for LlamaDeploy"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
license = { text = "MIT" }
|
|
@@ -10,11 +10,11 @@ authors = [
|
|
|
10
10
|
]
|
|
11
11
|
requires-python = ">=3.11, <4"
|
|
12
12
|
dependencies = [
|
|
13
|
-
"llama-index-workflows[server]>=2.
|
|
13
|
+
"llama-index-workflows[server]>=2.6.0",
|
|
14
14
|
"pydantic-settings>=2.10.1",
|
|
15
15
|
"fastapi>=0.100.0",
|
|
16
16
|
"websockets>=12.0",
|
|
17
|
-
"llama-deploy-core>=0.3.
|
|
17
|
+
"llama-deploy-core>=0.3.12,<0.4.0",
|
|
18
18
|
"httpx>=0.24.0,<1.0.0",
|
|
19
19
|
"prometheus-fastapi-instrumentator>=7.1.0",
|
|
20
20
|
"packaging>=25.0",
|
{llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/app.py
RENAMED
|
@@ -236,6 +236,7 @@ def start_server_in_target_venv(
|
|
|
236
236
|
persistence: Literal["memory", "local", "cloud"] | None = None,
|
|
237
237
|
local_persistence_path: str | None = None,
|
|
238
238
|
cloud_persistence_name: str | None = None,
|
|
239
|
+
host: str | None = None,
|
|
239
240
|
) -> None:
|
|
240
241
|
# Ensure settings reflect the intended working directory before computing paths
|
|
241
242
|
|
|
@@ -247,6 +248,7 @@ def start_server_in_target_venv(
|
|
|
247
248
|
persistence=persistence,
|
|
248
249
|
local_persistence_path=local_persistence_path,
|
|
249
250
|
cloud_persistence_name=cloud_persistence_name,
|
|
251
|
+
host=host,
|
|
250
252
|
)
|
|
251
253
|
base_dir = cwd or Path.cwd()
|
|
252
254
|
path = settings.resolved_config_parent.relative_to(base_dir)
|
|
@@ -121,7 +121,6 @@ class Deployment:
|
|
|
121
121
|
for route in server.app.routes:
|
|
122
122
|
# add routes directly rather than mounting, so that we can share a root (only one ASGI app can be mounted at a path)
|
|
123
123
|
if isinstance(route, Route):
|
|
124
|
-
logger.info(f"Adding route {route.path} to app")
|
|
125
124
|
app.add_api_route(
|
|
126
125
|
f"/deployments/{config.name}{route.path}",
|
|
127
126
|
route.endpoint,
|
|
@@ -21,6 +21,7 @@ from llama_deploy.appserver.interrupts import (
|
|
|
21
21
|
wait_or_abort,
|
|
22
22
|
)
|
|
23
23
|
from llama_deploy.appserver.settings import ApiserverSettings
|
|
24
|
+
from llama_deploy.core.client.ssl_util import get_httpx_verify_param
|
|
24
25
|
from llama_deploy.core.deployment_config import DeploymentConfig
|
|
25
26
|
|
|
26
27
|
logger = logging.getLogger(__name__)
|
|
@@ -193,7 +194,7 @@ def create_ui_proxy_router(name: str, port: int) -> APIRouter:
|
|
|
193
194
|
}
|
|
194
195
|
|
|
195
196
|
try:
|
|
196
|
-
client = httpx.AsyncClient(timeout=None)
|
|
197
|
+
client = httpx.AsyncClient(timeout=None, verify=get_httpx_verify_param())
|
|
197
198
|
|
|
198
199
|
req = client.build_request(
|
|
199
200
|
request.method,
|
|
@@ -103,6 +103,7 @@ def configure_settings(
|
|
|
103
103
|
persistence: Literal["memory", "local", "cloud"] | None = None,
|
|
104
104
|
local_persistence_path: str | None = None,
|
|
105
105
|
cloud_persistence_name: str | None = None,
|
|
106
|
+
host: str | None = None,
|
|
106
107
|
) -> None:
|
|
107
108
|
if proxy_ui is not None:
|
|
108
109
|
settings.proxy_ui = proxy_ui
|
|
@@ -131,3 +132,6 @@ def configure_settings(
|
|
|
131
132
|
os.environ["LLAMA_DEPLOY_APISERVER_CLOUD_PERSISTENCE_NAME"] = (
|
|
132
133
|
cloud_persistence_name
|
|
133
134
|
)
|
|
135
|
+
if host is not None:
|
|
136
|
+
settings.host = host
|
|
137
|
+
os.environ["LLAMA_DEPLOY_APISERVER_HOST"] = host
|
|
@@ -1,10 +1,12 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import logging
|
|
2
3
|
import os
|
|
3
|
-
from typing import List
|
|
4
|
+
from typing import Any, List
|
|
4
5
|
|
|
5
6
|
from llama_cloud.client import AsyncLlamaCloud, httpx
|
|
6
7
|
from llama_cloud_services.beta.agent_data import AsyncAgentDataClient
|
|
7
8
|
from llama_deploy.appserver.settings import ApiserverSettings
|
|
9
|
+
from llama_deploy.core.client.ssl_util import get_httpx_verify_param
|
|
8
10
|
from llama_deploy.core.deployment_config import DeploymentConfig
|
|
9
11
|
from typing_extensions import override
|
|
10
12
|
from workflows.server import AbstractWorkflowStore, HandlerQuery, PersistentHandler
|
|
@@ -40,6 +42,7 @@ class AgentDataStore(AbstractWorkflowStore):
|
|
|
40
42
|
token=os.getenv("LLAMA_CLOUD_API_KEY"),
|
|
41
43
|
httpx_client=httpx.AsyncClient(
|
|
42
44
|
headers={"Project-Id": project_id} if project_id else None,
|
|
45
|
+
verify=get_httpx_verify_param(),
|
|
43
46
|
),
|
|
44
47
|
),
|
|
45
48
|
)
|
|
@@ -49,19 +52,7 @@ class AgentDataStore(AbstractWorkflowStore):
|
|
|
49
52
|
|
|
50
53
|
@override
|
|
51
54
|
async def query(self, query: HandlerQuery) -> List[PersistentHandler]:
|
|
52
|
-
filters =
|
|
53
|
-
if query.handler_id_in is not None:
|
|
54
|
-
filters["handler_id"] = {
|
|
55
|
-
"includes": query.handler_id_in,
|
|
56
|
-
}
|
|
57
|
-
if query.workflow_name_in is not None:
|
|
58
|
-
filters["workflow_name"] = {
|
|
59
|
-
"includes": query.workflow_name_in,
|
|
60
|
-
}
|
|
61
|
-
if query.status_in is not None:
|
|
62
|
-
filters["status"] = {
|
|
63
|
-
"includes": query.status_in,
|
|
64
|
-
}
|
|
55
|
+
filters = self._build_filters(query)
|
|
65
56
|
results = await self.client.search(
|
|
66
57
|
filter=filters,
|
|
67
58
|
page_size=1000,
|
|
@@ -85,6 +76,15 @@ class AgentDataStore(AbstractWorkflowStore):
|
|
|
85
76
|
data=handler,
|
|
86
77
|
)
|
|
87
78
|
|
|
79
|
+
@override
|
|
80
|
+
async def delete(self, handler: HandlerQuery) -> int:
|
|
81
|
+
filters = self._build_filters(handler)
|
|
82
|
+
results = await self.client.search(filter=filters, page_size=1000)
|
|
83
|
+
await asyncio.gather(
|
|
84
|
+
*[self.client.delete_item(item_id=x.id) for x in results.items]
|
|
85
|
+
)
|
|
86
|
+
return len(results.items)
|
|
87
|
+
|
|
88
88
|
async def _get_item_id(self, handler: PersistentHandler) -> str | None:
|
|
89
89
|
cached_id = self.cache.get(handler.handler_id, None)
|
|
90
90
|
if cached_id is not None:
|
|
@@ -98,3 +98,19 @@ class AgentDataStore(AbstractWorkflowStore):
|
|
|
98
98
|
id = results.items[0].id
|
|
99
99
|
self.cache.set(handler.handler_id, id)
|
|
100
100
|
return id
|
|
101
|
+
|
|
102
|
+
def _build_filters(self, query: HandlerQuery) -> dict[str, Any]:
|
|
103
|
+
filters = {}
|
|
104
|
+
if query.handler_id_in is not None:
|
|
105
|
+
filters["handler_id"] = {
|
|
106
|
+
"includes": query.handler_id_in,
|
|
107
|
+
}
|
|
108
|
+
if query.workflow_name_in is not None:
|
|
109
|
+
filters["workflow_name"] = {
|
|
110
|
+
"includes": query.workflow_name_in,
|
|
111
|
+
}
|
|
112
|
+
if query.status_in is not None:
|
|
113
|
+
filters["status"] = {
|
|
114
|
+
"includes": query.status_in,
|
|
115
|
+
}
|
|
116
|
+
return filters
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/py.typed
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/stats.py
RENAMED
|
File without changes
|
{llama_deploy_appserver-0.3.10 → llama_deploy_appserver-0.3.12}/src/llama_deploy/appserver/types.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|