llama-deploy-appserver 0.3.16__tar.gz → 0.3.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/PKG-INFO +3 -3
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/pyproject.toml +3 -3
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/deployment.py +3 -3
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/process_utils.py +22 -14
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/workflow_store/agent_data_store.py +2 -2
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/workflow_store/lru_cache.py +7 -5
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/README.md +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/__init__.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/app.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/bootstrap.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/configure_logging.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/correlation_id.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/deployment_config_parser.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/interrupts.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/py.typed +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/routers/__init__.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/routers/deployments.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/routers/status.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/routers/ui_proxy.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/settings.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/stats.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/types.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/workflow_loader.py +0 -0
- {llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/workflow_store/keyed_lock.py +0 -0
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: llama-deploy-appserver
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.18
|
|
4
4
|
Summary: Application server components for LlamaDeploy
|
|
5
5
|
Author: Massimiliano Pippi, Adrian Lyjak
|
|
6
6
|
Author-email: Massimiliano Pippi <mpippi@gmail.com>, Adrian Lyjak <adrianlyjak@gmail.com>
|
|
7
7
|
License: MIT
|
|
8
|
-
Requires-Dist: llama-index-workflows[server]>=2.
|
|
8
|
+
Requires-Dist: llama-index-workflows[server]>=2.9.1
|
|
9
9
|
Requires-Dist: pydantic-settings>=2.10.1
|
|
10
10
|
Requires-Dist: fastapi>=0.100.0
|
|
11
11
|
Requires-Dist: websockets>=12.0
|
|
12
|
-
Requires-Dist: llama-deploy-core>=0.3.
|
|
12
|
+
Requires-Dist: llama-deploy-core>=0.3.18,<0.4.0
|
|
13
13
|
Requires-Dist: httpx>=0.24.0,<1.0.0
|
|
14
14
|
Requires-Dist: prometheus-fastapi-instrumentator>=7.1.0
|
|
15
15
|
Requires-Dist: packaging>=25.0
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "llama-deploy-appserver"
|
|
3
|
-
version = "0.3.
|
|
3
|
+
version = "0.3.18"
|
|
4
4
|
description = "Application server components for LlamaDeploy"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
license = { text = "MIT" }
|
|
@@ -10,11 +10,11 @@ authors = [
|
|
|
10
10
|
]
|
|
11
11
|
requires-python = ">=3.11, <4"
|
|
12
12
|
dependencies = [
|
|
13
|
-
"llama-index-workflows[server]>=2.
|
|
13
|
+
"llama-index-workflows[server]>=2.9.1",
|
|
14
14
|
"pydantic-settings>=2.10.1",
|
|
15
15
|
"fastapi>=0.100.0",
|
|
16
16
|
"websockets>=12.0",
|
|
17
|
-
"llama-deploy-core>=0.3.
|
|
17
|
+
"llama-deploy-core>=0.3.18,<0.4.0",
|
|
18
18
|
"httpx>=0.24.0,<1.0.0",
|
|
19
19
|
"prometheus-fastapi-instrumentator>=7.1.0",
|
|
20
20
|
"packaging>=25.0",
|
|
@@ -19,7 +19,7 @@ from starlette.routing import Route
|
|
|
19
19
|
from workflows import Context, Workflow
|
|
20
20
|
from workflows.handler import WorkflowHandler
|
|
21
21
|
from workflows.server import SqliteWorkflowStore, WorkflowServer
|
|
22
|
-
from workflows.server.
|
|
22
|
+
from workflows.server.memory_workflow_store import MemoryWorkflowStore
|
|
23
23
|
|
|
24
24
|
logger = logging.getLogger()
|
|
25
25
|
|
|
@@ -95,7 +95,7 @@ class Deployment:
|
|
|
95
95
|
def create_workflow_server(
|
|
96
96
|
self, deployment_config: DeploymentConfig, settings: ApiserverSettings
|
|
97
97
|
) -> WorkflowServer:
|
|
98
|
-
persistence =
|
|
98
|
+
persistence = MemoryWorkflowStore()
|
|
99
99
|
if settings.persistence == "local":
|
|
100
100
|
logger.info("Using local sqlite persistence for workflows")
|
|
101
101
|
persistence = SqliteWorkflowStore(
|
|
@@ -125,7 +125,7 @@ class Deployment:
|
|
|
125
125
|
f"/deployments/{config.name}{route.path}",
|
|
126
126
|
route.endpoint,
|
|
127
127
|
name=f"{config.name}_{route.name}",
|
|
128
|
-
methods=route.methods,
|
|
128
|
+
methods=list(route.methods) if route.methods else None,
|
|
129
129
|
include_in_schema=True, # change to false when schemas are added to workflow server
|
|
130
130
|
tags=["workflows"],
|
|
131
131
|
)
|
|
@@ -4,7 +4,8 @@ import platform
|
|
|
4
4
|
import subprocess
|
|
5
5
|
import sys
|
|
6
6
|
import threading
|
|
7
|
-
from
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import Callable, TextIO, Tuple, cast
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
def run_process(
|
|
@@ -25,13 +26,13 @@ def run_process(
|
|
|
25
26
|
use_pty = _should_use_pty(use_tty)
|
|
26
27
|
prefixer = _make_prefixer(prefix, color_code, line_transform)
|
|
27
28
|
|
|
28
|
-
|
|
29
|
+
spawned = _spawn_process(cmd, cwd=cwd, env=env, use_pty=use_pty)
|
|
29
30
|
threads: list[threading.Thread] = []
|
|
30
31
|
try:
|
|
31
|
-
cleanup()
|
|
32
|
+
spawned.cleanup()
|
|
32
33
|
_log_command(cmd, prefixer)
|
|
33
|
-
threads = _start_stream_threads(sources, prefixer)
|
|
34
|
-
ret = process.wait()
|
|
34
|
+
threads = _start_stream_threads(spawned.sources, prefixer)
|
|
35
|
+
ret = spawned.process.wait()
|
|
35
36
|
if ret != 0:
|
|
36
37
|
raise subprocess.CalledProcessError(ret, cmd)
|
|
37
38
|
finally:
|
|
@@ -56,11 +57,11 @@ def spawn_process(
|
|
|
56
57
|
use_pty = _should_use_pty(use_tty)
|
|
57
58
|
prefixer = _make_prefixer(prefix, color_code, line_transform)
|
|
58
59
|
|
|
59
|
-
|
|
60
|
-
cleanup()
|
|
60
|
+
spawned = _spawn_process(cmd, cwd=cwd, env=env, use_pty=use_pty)
|
|
61
|
+
spawned.cleanup()
|
|
61
62
|
_log_command(cmd, prefixer)
|
|
62
|
-
_start_stream_threads(sources, prefixer)
|
|
63
|
-
return process
|
|
63
|
+
_start_stream_threads(spawned.sources, prefixer)
|
|
64
|
+
return spawned.process
|
|
64
65
|
|
|
65
66
|
|
|
66
67
|
@functools.cache
|
|
@@ -109,13 +110,20 @@ def should_use_color() -> bool:
|
|
|
109
110
|
return _should_use_pty(None)
|
|
110
111
|
|
|
111
112
|
|
|
113
|
+
@dataclass
|
|
114
|
+
class SpawnProcessResult:
|
|
115
|
+
process: subprocess.Popen
|
|
116
|
+
sources: list[Tuple[int | TextIO, TextIO]]
|
|
117
|
+
cleanup: Callable[[], None]
|
|
118
|
+
|
|
119
|
+
|
|
112
120
|
def _spawn_process(
|
|
113
121
|
cmd: list[str],
|
|
114
122
|
*,
|
|
115
123
|
cwd: os.PathLike | None,
|
|
116
124
|
env: dict[str, str] | None,
|
|
117
125
|
use_pty: bool,
|
|
118
|
-
) ->
|
|
126
|
+
) -> SpawnProcessResult:
|
|
119
127
|
if use_pty:
|
|
120
128
|
import pty
|
|
121
129
|
|
|
@@ -139,7 +147,7 @@ def _spawn_process(
|
|
|
139
147
|
sources: list[tuple[int | TextIO, TextIO]] = [
|
|
140
148
|
(master_fd, cast(TextIO, sys.stdout)),
|
|
141
149
|
]
|
|
142
|
-
return process, sources, cleanup
|
|
150
|
+
return SpawnProcessResult(process, sources, cleanup)
|
|
143
151
|
|
|
144
152
|
use_shell = False
|
|
145
153
|
if platform.system() == "Windows":
|
|
@@ -161,10 +169,10 @@ def _spawn_process(
|
|
|
161
169
|
|
|
162
170
|
assert process.stdout is not None and process.stderr is not None
|
|
163
171
|
sources = [
|
|
164
|
-
(process.stdout, cast(TextIO, sys.stdout)),
|
|
165
|
-
(process.stderr, cast(TextIO, sys.stderr)),
|
|
172
|
+
(cast(int | TextIO, process.stdout), cast(TextIO, sys.stdout)),
|
|
173
|
+
(cast(int | TextIO, process.stderr), cast(TextIO, sys.stderr)),
|
|
166
174
|
]
|
|
167
|
-
return process, sources, cleanup
|
|
175
|
+
return SpawnProcessResult(process, sources, cleanup)
|
|
168
176
|
|
|
169
177
|
|
|
170
178
|
def _stream_source(
|
|
@@ -81,12 +81,12 @@ class AgentDataStore(AbstractWorkflowStore):
|
|
|
81
81
|
filters = self._build_filters(handler)
|
|
82
82
|
results = await self.client.search(filter=filters, page_size=1000)
|
|
83
83
|
await asyncio.gather(
|
|
84
|
-
*[self.client.delete_item(item_id=x.id) for x in results.items]
|
|
84
|
+
*[self.client.delete_item(item_id=x.id) for x in results.items if x.id]
|
|
85
85
|
)
|
|
86
86
|
return len(results.items)
|
|
87
87
|
|
|
88
88
|
async def _get_item_id(self, handler: PersistentHandler) -> str | None:
|
|
89
|
-
cached_id = self.cache.get(handler.handler_id
|
|
89
|
+
cached_id = self.cache.get(handler.handler_id)
|
|
90
90
|
if cached_id is not None:
|
|
91
91
|
return cached_id
|
|
92
92
|
results = await self.client.search(
|
|
@@ -19,10 +19,7 @@ class LRUCache(Generic[K, V]):
|
|
|
19
19
|
def get(self, key: K, default: V | None = None) -> V | None:
|
|
20
20
|
if key not in self._store:
|
|
21
21
|
return default
|
|
22
|
-
|
|
23
|
-
value = self._store.pop(key)
|
|
24
|
-
self._store[key] = value
|
|
25
|
-
return value
|
|
22
|
+
return self[key]
|
|
26
23
|
|
|
27
24
|
def set(self, key: K, value: V):
|
|
28
25
|
if key in self._store:
|
|
@@ -37,7 +34,12 @@ class LRUCache(Generic[K, V]):
|
|
|
37
34
|
return key in self._store
|
|
38
35
|
|
|
39
36
|
def __getitem__(self, key: K) -> V:
|
|
40
|
-
|
|
37
|
+
# mark as recently used
|
|
38
|
+
if key not in self._store:
|
|
39
|
+
raise KeyError(key)
|
|
40
|
+
value = self._store.pop(key)
|
|
41
|
+
self._store[key] = value
|
|
42
|
+
return value
|
|
41
43
|
|
|
42
44
|
def __setitem__(self, key: K, value: V):
|
|
43
45
|
self.set(key, value)
|
|
File without changes
|
|
File without changes
|
{llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/app.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/py.typed
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/stats.py
RENAMED
|
File without changes
|
{llama_deploy_appserver-0.3.16 → llama_deploy_appserver-0.3.18}/src/llama_deploy/appserver/types.py
RENAMED
|
File without changes
|
|
File without changes
|