ai-pipeline-core 0.2.9__py3-none-any.whl → 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_pipeline_core/__init__.py +32 -5
- ai_pipeline_core/debug/__init__.py +26 -0
- ai_pipeline_core/debug/config.py +91 -0
- ai_pipeline_core/debug/content.py +705 -0
- ai_pipeline_core/debug/processor.py +99 -0
- ai_pipeline_core/debug/summary.py +236 -0
- ai_pipeline_core/debug/writer.py +913 -0
- ai_pipeline_core/deployment/__init__.py +46 -0
- ai_pipeline_core/deployment/base.py +681 -0
- ai_pipeline_core/deployment/contract.py +84 -0
- ai_pipeline_core/deployment/helpers.py +98 -0
- ai_pipeline_core/documents/flow_document.py +1 -1
- ai_pipeline_core/documents/task_document.py +1 -1
- ai_pipeline_core/documents/temporary_document.py +1 -1
- ai_pipeline_core/flow/config.py +13 -2
- ai_pipeline_core/flow/options.py +4 -4
- ai_pipeline_core/images/__init__.py +362 -0
- ai_pipeline_core/images/_processing.py +157 -0
- ai_pipeline_core/llm/ai_messages.py +25 -4
- ai_pipeline_core/llm/client.py +15 -19
- ai_pipeline_core/llm/model_response.py +5 -5
- ai_pipeline_core/llm/model_types.py +10 -13
- ai_pipeline_core/logging/logging_mixin.py +2 -2
- ai_pipeline_core/pipeline.py +1 -1
- ai_pipeline_core/progress.py +127 -0
- ai_pipeline_core/prompt_builder/__init__.py +5 -0
- ai_pipeline_core/prompt_builder/documents_prompt.jinja2 +23 -0
- ai_pipeline_core/prompt_builder/global_cache.py +78 -0
- ai_pipeline_core/prompt_builder/new_core_documents_prompt.jinja2 +6 -0
- ai_pipeline_core/prompt_builder/prompt_builder.py +253 -0
- ai_pipeline_core/prompt_builder/system_prompt.jinja2 +41 -0
- ai_pipeline_core/tracing.py +54 -2
- ai_pipeline_core/utils/deploy.py +214 -6
- ai_pipeline_core/utils/remote_deployment.py +37 -187
- {ai_pipeline_core-0.2.9.dist-info → ai_pipeline_core-0.3.3.dist-info}/METADATA +96 -27
- ai_pipeline_core-0.3.3.dist-info/RECORD +57 -0
- {ai_pipeline_core-0.2.9.dist-info → ai_pipeline_core-0.3.3.dist-info}/WHEEL +1 -1
- ai_pipeline_core/simple_runner/__init__.py +0 -14
- ai_pipeline_core/simple_runner/cli.py +0 -254
- ai_pipeline_core/simple_runner/simple_runner.py +0 -247
- ai_pipeline_core-0.2.9.dist-info/RECORD +0 -41
- {ai_pipeline_core-0.2.9.dist-info → ai_pipeline_core-0.3.3.dist-info}/licenses/LICENSE +0 -0
ai_pipeline_core/utils/deploy.py
CHANGED
|
@@ -18,10 +18,13 @@ Usage:
|
|
|
18
18
|
|
|
19
19
|
import argparse
|
|
20
20
|
import asyncio
|
|
21
|
+
import json
|
|
21
22
|
import subprocess
|
|
22
23
|
import sys
|
|
24
|
+
import tempfile
|
|
23
25
|
import tomllib
|
|
24
26
|
import traceback
|
|
27
|
+
from datetime import datetime, timezone
|
|
25
28
|
from pathlib import Path
|
|
26
29
|
from typing import Any, Optional
|
|
27
30
|
|
|
@@ -70,6 +73,8 @@ class Deployer:
|
|
|
70
73
|
with open(pyproject_path, "rb") as f:
|
|
71
74
|
data = tomllib.load(f)
|
|
72
75
|
|
|
76
|
+
self._pyproject_data = data
|
|
77
|
+
|
|
73
78
|
project = data.get("project", {})
|
|
74
79
|
name = project.get("name")
|
|
75
80
|
version = project.get("version")
|
|
@@ -160,6 +165,192 @@ class Deployer:
|
|
|
160
165
|
self._success(f"Built {tarball_path.name} ({tarball_path.stat().st_size // 1024} KB)")
|
|
161
166
|
return tarball_path
|
|
162
167
|
|
|
168
|
+
# -- Agent build/upload support --
|
|
169
|
+
|
|
170
|
+
def _load_agent_config(self) -> dict[str, dict[str, Any]]:
|
|
171
|
+
"""Load [tool.deploy.agents] from pyproject.toml.
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
Dict mapping agent name to config (path, extra_vendor).
|
|
175
|
+
Empty dict if no agents configured.
|
|
176
|
+
"""
|
|
177
|
+
return self._pyproject_data.get("tool", {}).get("deploy", {}).get("agents", {})
|
|
178
|
+
|
|
179
|
+
def _get_cli_agents_source(self) -> str | None:
|
|
180
|
+
"""Get cli_agents_source path from [tool.deploy]."""
|
|
181
|
+
return self._pyproject_data.get("tool", {}).get("deploy", {}).get("cli_agents_source")
|
|
182
|
+
|
|
183
|
+
def _build_wheel_from_source(self, source_dir: Path) -> Path:
|
|
184
|
+
"""Build a wheel from a source directory.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
source_dir: Directory containing pyproject.toml
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
Path to built .whl file in a temp dist directory
|
|
191
|
+
"""
|
|
192
|
+
if not (source_dir / "pyproject.toml").exists():
|
|
193
|
+
self._die(f"No pyproject.toml in {source_dir}")
|
|
194
|
+
|
|
195
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
196
|
+
tmp_dist = Path(tmpdir) / "dist"
|
|
197
|
+
result = subprocess.run(
|
|
198
|
+
[sys.executable, "-m", "build", "--wheel", "--outdir", str(tmp_dist)],
|
|
199
|
+
cwd=source_dir,
|
|
200
|
+
capture_output=True,
|
|
201
|
+
text=True,
|
|
202
|
+
)
|
|
203
|
+
if result.returncode != 0:
|
|
204
|
+
self._die(f"Wheel build failed for {source_dir.name}:\n{result.stderr}")
|
|
205
|
+
|
|
206
|
+
wheels = list(tmp_dist.glob("*.whl"))
|
|
207
|
+
if not wheels:
|
|
208
|
+
self._die(f"No wheel produced for {source_dir.name}")
|
|
209
|
+
|
|
210
|
+
# Copy to persistent dist/ under source_dir
|
|
211
|
+
dist_dir = source_dir / "dist"
|
|
212
|
+
dist_dir.mkdir(exist_ok=True)
|
|
213
|
+
output = dist_dir / wheels[0].name
|
|
214
|
+
output.write_bytes(wheels[0].read_bytes())
|
|
215
|
+
return output
|
|
216
|
+
|
|
217
|
+
def _build_agents(self) -> dict[str, dict[str, Any]]:
|
|
218
|
+
"""Build agent wheels and manifests for all configured agents.
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
Dict mapping agent name to build info:
|
|
222
|
+
{name: {"manifest_json": str, "files": {filename: Path}}}
|
|
223
|
+
Empty dict if no agents configured.
|
|
224
|
+
"""
|
|
225
|
+
agent_config = self._load_agent_config()
|
|
226
|
+
if not agent_config:
|
|
227
|
+
return {}
|
|
228
|
+
|
|
229
|
+
cli_agents_source = self._get_cli_agents_source()
|
|
230
|
+
if not cli_agents_source:
|
|
231
|
+
self._die(
|
|
232
|
+
"Agents configured in [tool.deploy.agents] but "
|
|
233
|
+
"[tool.deploy].cli_agents_source is not set.\n"
|
|
234
|
+
"Add to pyproject.toml:\n"
|
|
235
|
+
' [tool.deploy]\n cli_agents_source = "vendor/cli-agents"'
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
self._info(f"Building {len(agent_config)} agent(s): {', '.join(agent_config)}")
|
|
239
|
+
|
|
240
|
+
# Build cli-agents wheel once (shared across all agents)
|
|
241
|
+
cli_agents_dir = Path(cli_agents_source).resolve()
|
|
242
|
+
if not (cli_agents_dir / "pyproject.toml").exists():
|
|
243
|
+
self._die(f"cli-agents source not found at {cli_agents_dir}")
|
|
244
|
+
|
|
245
|
+
cli_agents_wheel = self._build_wheel_from_source(cli_agents_dir)
|
|
246
|
+
self._success(f"Built cli-agents wheel: {cli_agents_wheel.name}")
|
|
247
|
+
|
|
248
|
+
builds: dict[str, dict[str, Any]] = {}
|
|
249
|
+
|
|
250
|
+
for agent_name, config in agent_config.items():
|
|
251
|
+
agent_path = Path(config["path"]).resolve()
|
|
252
|
+
if not (agent_path / "pyproject.toml").exists():
|
|
253
|
+
self._die(
|
|
254
|
+
f"Agent '{agent_name}' path not found: {agent_path}\n"
|
|
255
|
+
f"Check [tool.deploy.agents.{agent_name}].path in pyproject.toml"
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
# Read module_name from agent's pyproject.toml
|
|
259
|
+
with open(agent_path / "pyproject.toml", "rb") as f:
|
|
260
|
+
agent_pyproject = tomllib.load(f)
|
|
261
|
+
|
|
262
|
+
module_name = agent_pyproject.get("tool", {}).get("agent", {}).get("module")
|
|
263
|
+
if not module_name:
|
|
264
|
+
self._die(
|
|
265
|
+
f"Agent '{agent_name}' missing [tool.agent].module in "
|
|
266
|
+
f"{agent_path / 'pyproject.toml'}\n"
|
|
267
|
+
f'Add:\n [tool.agent]\n module = "agent_{agent_name}"'
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
# Build agent wheel
|
|
271
|
+
agent_wheel = self._build_wheel_from_source(agent_path)
|
|
272
|
+
self._success(f"Built agent wheel: {agent_wheel.name}")
|
|
273
|
+
|
|
274
|
+
# Collect all files for this agent bundle
|
|
275
|
+
files: dict[str, Path] = {
|
|
276
|
+
agent_wheel.name: agent_wheel,
|
|
277
|
+
cli_agents_wheel.name: cli_agents_wheel,
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
# Build extra_vendor packages from repo root
|
|
281
|
+
vendor_packages: list[str] = []
|
|
282
|
+
extra_built: set[str] = set()
|
|
283
|
+
for vendor_name in config.get("extra_vendor", []):
|
|
284
|
+
extra_source_dir = Path(vendor_name).resolve()
|
|
285
|
+
if not (extra_source_dir / "pyproject.toml").exists():
|
|
286
|
+
self._die(
|
|
287
|
+
f"Extra vendor '{vendor_name}' for agent '{agent_name}' "
|
|
288
|
+
f"not found at {extra_source_dir}\n"
|
|
289
|
+
f"Ensure the directory exists at repo root with pyproject.toml"
|
|
290
|
+
)
|
|
291
|
+
vendor_wheel = self._build_wheel_from_source(extra_source_dir)
|
|
292
|
+
files[vendor_wheel.name] = vendor_wheel
|
|
293
|
+
vendor_packages.append(vendor_wheel.name)
|
|
294
|
+
extra_built.add(extra_source_dir.name.replace("-", "_"))
|
|
295
|
+
self._success(f"Built vendor wheel: {vendor_wheel.name}")
|
|
296
|
+
|
|
297
|
+
# Collect existing vendor/*.whl and vendor/*.tar.gz from agent directory,
|
|
298
|
+
# skipping packages already built from extra_vendor
|
|
299
|
+
agent_vendor_dir = agent_path / "vendor"
|
|
300
|
+
if agent_vendor_dir.exists():
|
|
301
|
+
for pkg in list(agent_vendor_dir.glob("*.whl")) + list(
|
|
302
|
+
agent_vendor_dir.glob("*.tar.gz")
|
|
303
|
+
):
|
|
304
|
+
pkg_base = pkg.name.split("-")[0].replace("-", "_")
|
|
305
|
+
if pkg.name not in files and pkg_base not in extra_built:
|
|
306
|
+
files[pkg.name] = pkg
|
|
307
|
+
vendor_packages.append(pkg.name)
|
|
308
|
+
|
|
309
|
+
# Write manifest (plain JSON dict, compatible with AgentManifest schema)
|
|
310
|
+
manifest = {
|
|
311
|
+
"module_name": module_name,
|
|
312
|
+
"agent_wheel": agent_wheel.name,
|
|
313
|
+
"cli_agents_wheel": cli_agents_wheel.name,
|
|
314
|
+
"vendor_packages": vendor_packages,
|
|
315
|
+
"built_at": datetime.now(timezone.utc).isoformat(),
|
|
316
|
+
}
|
|
317
|
+
manifest_json = json.dumps(manifest, indent=2)
|
|
318
|
+
|
|
319
|
+
builds[agent_name] = {"manifest_json": manifest_json, "files": files}
|
|
320
|
+
self._success(f"Agent '{agent_name}' bundle ready ({module_name}, {len(files)} files)")
|
|
321
|
+
|
|
322
|
+
return builds
|
|
323
|
+
|
|
324
|
+
async def _upload_agents(self, agent_builds: dict[str, dict[str, Any]]):
|
|
325
|
+
"""Upload agent bundles to GCS.
|
|
326
|
+
|
|
327
|
+
Args:
|
|
328
|
+
agent_builds: Output from _build_agents()
|
|
329
|
+
"""
|
|
330
|
+
if not agent_builds:
|
|
331
|
+
return
|
|
332
|
+
|
|
333
|
+
flow_folder = self.config["folder"].split("/", 1)[1] if "/" in self.config["folder"] else ""
|
|
334
|
+
base_uri = f"gs://{self.config['bucket']}/flows"
|
|
335
|
+
base_storage = await Storage.from_uri(base_uri)
|
|
336
|
+
base_storage = base_storage.with_base(flow_folder)
|
|
337
|
+
|
|
338
|
+
for agent_name, build_info in agent_builds.items():
|
|
339
|
+
agent_storage = base_storage.with_base(f"agents/{agent_name}")
|
|
340
|
+
self._info(f"Uploading agent '{agent_name}' bundle to {agent_storage.url_for('')}")
|
|
341
|
+
|
|
342
|
+
# Upload manifest
|
|
343
|
+
await agent_storage.write_bytes(
|
|
344
|
+
"manifest.json",
|
|
345
|
+
build_info["manifest_json"].encode(),
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
# Upload wheels
|
|
349
|
+
for filename, filepath in build_info["files"].items():
|
|
350
|
+
await agent_storage.write_bytes(filename, filepath.read_bytes())
|
|
351
|
+
|
|
352
|
+
self._success(f"Agent '{agent_name}' uploaded ({len(build_info['files'])} files)")
|
|
353
|
+
|
|
163
354
|
async def _upload_package(self, tarball: Path):
|
|
164
355
|
"""Upload package tarball to Google Cloud Storage using Storage abstraction.
|
|
165
356
|
|
|
@@ -184,13 +375,17 @@ class Deployer:
|
|
|
184
375
|
|
|
185
376
|
self._success(f"Package uploaded to {self.config['folder']}/{tarball.name}")
|
|
186
377
|
|
|
187
|
-
async def _deploy_via_api(self):
|
|
378
|
+
async def _deploy_via_api(self, agent_builds: dict[str, dict[str, Any]] | None = None):
|
|
188
379
|
"""Create or update Prefect deployment using RunnerDeployment pattern.
|
|
189
380
|
|
|
190
381
|
This is the official Prefect approach that:
|
|
191
382
|
1. Automatically creates/updates the flow registration
|
|
192
383
|
2. Handles deployment create vs update logic
|
|
193
384
|
3. Properly formats all parameters for the API
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
agent_builds: Output from _build_agents(). If non-empty, sets
|
|
388
|
+
AGENT_BUNDLES_URI env var on the deployment.
|
|
194
389
|
"""
|
|
195
390
|
# Define entrypoint (assumes flow function has same name as package)
|
|
196
391
|
entrypoint = f"{self.config['package']}:{self.config['package']}"
|
|
@@ -244,6 +439,13 @@ class Deployer:
|
|
|
244
439
|
# This is the official Prefect pattern that handles all the complexity
|
|
245
440
|
self._info(f"Creating deployment for flow '{flow.name}'")
|
|
246
441
|
|
|
442
|
+
# Set AGENT_BUNDLES_URI env var if agents were built
|
|
443
|
+
job_variables: dict[str, Any] = {}
|
|
444
|
+
if agent_builds:
|
|
445
|
+
bundles_uri = f"gs://{self.config['bucket']}/{self.config['folder']}/agents"
|
|
446
|
+
job_variables["env"] = {"AGENT_BUNDLES_URI": bundles_uri}
|
|
447
|
+
self._info(f"Setting AGENT_BUNDLES_URI={bundles_uri}")
|
|
448
|
+
|
|
247
449
|
deployment = RunnerDeployment(
|
|
248
450
|
name=self.config["package"],
|
|
249
451
|
flow_name=flow.name,
|
|
@@ -256,7 +458,7 @@ class Deployer:
|
|
|
256
458
|
or f"Deployment for {self.config['package']} v{self.config['version']}",
|
|
257
459
|
storage=_PullStepStorage(pull_steps),
|
|
258
460
|
parameters={},
|
|
259
|
-
job_variables=
|
|
461
|
+
job_variables=job_variables,
|
|
260
462
|
paused=False,
|
|
261
463
|
)
|
|
262
464
|
|
|
@@ -296,14 +498,20 @@ class Deployer:
|
|
|
296
498
|
print("=" * 70)
|
|
297
499
|
print()
|
|
298
500
|
|
|
299
|
-
# Phase 1: Build
|
|
501
|
+
# Phase 1: Build flow package
|
|
300
502
|
tarball = self._build_package()
|
|
301
503
|
|
|
302
|
-
# Phase 2:
|
|
504
|
+
# Phase 2: Build agent bundles (if configured)
|
|
505
|
+
agent_builds = self._build_agents()
|
|
506
|
+
|
|
507
|
+
# Phase 3: Upload flow package
|
|
303
508
|
await self._upload_package(tarball)
|
|
304
509
|
|
|
305
|
-
# Phase
|
|
306
|
-
await self.
|
|
510
|
+
# Phase 4: Upload agent bundles
|
|
511
|
+
await self._upload_agents(agent_builds)
|
|
512
|
+
|
|
513
|
+
# Phase 5: Create/update Prefect deployment
|
|
514
|
+
await self._deploy_via_api(agent_builds)
|
|
307
515
|
|
|
308
516
|
print()
|
|
309
517
|
print("=" * 70)
|
|
@@ -1,12 +1,8 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
EXPERIMENTAL: This module provides utilities for calling remotely deployed Prefect flows.
|
|
4
|
-
Subject to change in future versions.
|
|
5
|
-
"""
|
|
1
|
+
"""@public Remote deployment utilities for calling PipelineDeployment flows via Prefect."""
|
|
6
2
|
|
|
7
3
|
import inspect
|
|
8
4
|
from functools import wraps
|
|
9
|
-
from typing import Any, Callable, ParamSpec,
|
|
5
|
+
from typing import Any, Callable, ParamSpec, TypeVar, cast
|
|
10
6
|
|
|
11
7
|
from prefect import get_client
|
|
12
8
|
from prefect.client.orchestration import PrefectClient
|
|
@@ -15,85 +11,26 @@ from prefect.context import AsyncClientContext
|
|
|
15
11
|
from prefect.deployments.flow_runs import run_deployment
|
|
16
12
|
from prefect.exceptions import ObjectNotFound
|
|
17
13
|
|
|
18
|
-
from ai_pipeline_core import
|
|
14
|
+
from ai_pipeline_core.deployment import DeploymentContext, DeploymentResult, PipelineDeployment
|
|
15
|
+
from ai_pipeline_core.flow.options import FlowOptions
|
|
19
16
|
from ai_pipeline_core.settings import settings
|
|
20
17
|
from ai_pipeline_core.tracing import TraceLevel, set_trace_cost, trace
|
|
21
18
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def _callable_name(obj: Any, fallback: str) -> str:
|
|
28
|
-
"""Safely extract callable's name for error messages.
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
obj: Any object that might have a __name__ attribute.
|
|
32
|
-
fallback: Default name if extraction fails.
|
|
33
|
-
|
|
34
|
-
Returns:
|
|
35
|
-
The callable's __name__ if available, fallback otherwise.
|
|
36
|
-
|
|
37
|
-
Note:
|
|
38
|
-
Internal helper that never raises exceptions.
|
|
39
|
-
"""
|
|
40
|
-
try:
|
|
41
|
-
n = getattr(obj, "__name__", None)
|
|
42
|
-
return n if isinstance(n, str) else fallback
|
|
43
|
-
except Exception:
|
|
44
|
-
return fallback
|
|
19
|
+
P = ParamSpec("P")
|
|
20
|
+
TOptions = TypeVar("TOptions", bound=FlowOptions)
|
|
21
|
+
TResult = TypeVar("TResult", bound=DeploymentResult)
|
|
45
22
|
|
|
46
23
|
|
|
47
24
|
def _is_already_traced(func: Callable[..., Any]) -> bool:
|
|
48
|
-
"""Check if
|
|
49
|
-
|
|
50
|
-
This checks both for the explicit __is_traced__ marker and walks
|
|
51
|
-
the __wrapped__ chain to detect nested trace decorations.
|
|
52
|
-
|
|
53
|
-
Args:
|
|
54
|
-
func: Function to check for existing trace decoration.
|
|
55
|
-
|
|
56
|
-
Returns:
|
|
57
|
-
True if the function is already traced, False otherwise.
|
|
58
|
-
"""
|
|
59
|
-
# Check for explicit marker
|
|
60
|
-
if hasattr(func, "__is_traced__") and func.__is_traced__: # type: ignore[attr-defined]
|
|
25
|
+
"""Check if function or its __wrapped__ has __is_traced__ attribute."""
|
|
26
|
+
if getattr(func, "__is_traced__", False):
|
|
61
27
|
return True
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
current = func
|
|
65
|
-
depth = 0
|
|
66
|
-
max_depth = 10 # Prevent infinite loops
|
|
67
|
-
|
|
68
|
-
while hasattr(current, "__wrapped__") and depth < max_depth:
|
|
69
|
-
wrapped = current.__wrapped__ # type: ignore[attr-defined]
|
|
70
|
-
# Check if the wrapped function has the trace marker
|
|
71
|
-
if hasattr(wrapped, "__is_traced__") and wrapped.__is_traced__: # type: ignore[attr-defined]
|
|
72
|
-
return True
|
|
73
|
-
current = wrapped
|
|
74
|
-
depth += 1
|
|
75
|
-
|
|
76
|
-
return False
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
# --------------------------------------------------------------------------- #
|
|
80
|
-
# Remote deployment execution
|
|
81
|
-
# --------------------------------------------------------------------------- #
|
|
28
|
+
wrapped = getattr(func, "__wrapped__", None)
|
|
29
|
+
return getattr(wrapped, "__is_traced__", False) if wrapped else False
|
|
82
30
|
|
|
83
31
|
|
|
84
32
|
async def run_remote_deployment(deployment_name: str, parameters: dict[str, Any]) -> Any:
|
|
85
|
-
"""Run a remote Prefect deployment.
|
|
86
|
-
|
|
87
|
-
Args:
|
|
88
|
-
deployment_name: Name of the deployment to run.
|
|
89
|
-
parameters: Parameters to pass to the deployment.
|
|
90
|
-
|
|
91
|
-
Returns:
|
|
92
|
-
Result from the deployment execution.
|
|
93
|
-
|
|
94
|
-
Raises:
|
|
95
|
-
ValueError: If deployment is not found in local or remote Prefect API.
|
|
96
|
-
"""
|
|
33
|
+
"""Run a remote Prefect deployment, trying local client first then remote."""
|
|
97
34
|
|
|
98
35
|
async def _run(client: PrefectClient, as_subflow: bool) -> Any:
|
|
99
36
|
fr: FlowRun = await run_deployment(
|
|
@@ -109,7 +46,7 @@ async def run_remote_deployment(deployment_name: str, parameters: dict[str, Any]
|
|
|
109
46
|
pass
|
|
110
47
|
|
|
111
48
|
if not settings.prefect_api_url:
|
|
112
|
-
raise ValueError(f"{deployment_name}
|
|
49
|
+
raise ValueError(f"{deployment_name} not found, PREFECT_API_URL not set")
|
|
113
50
|
|
|
114
51
|
async with PrefectClient(
|
|
115
52
|
api=settings.prefect_api_url,
|
|
@@ -118,9 +55,10 @@ async def run_remote_deployment(deployment_name: str, parameters: dict[str, Any]
|
|
|
118
55
|
) as client:
|
|
119
56
|
try:
|
|
120
57
|
await client.read_deployment_by_name(name=deployment_name)
|
|
121
|
-
|
|
58
|
+
ctx = AsyncClientContext.model_construct(
|
|
122
59
|
client=client, _httpx_settings=None, _context_stack=0
|
|
123
|
-
)
|
|
60
|
+
)
|
|
61
|
+
with ctx:
|
|
124
62
|
return await _run(client, False)
|
|
125
63
|
except ObjectNotFound:
|
|
126
64
|
pass
|
|
@@ -128,142 +66,54 @@ async def run_remote_deployment(deployment_name: str, parameters: dict[str, Any]
|
|
|
128
66
|
raise ValueError(f"{deployment_name} deployment not found")
|
|
129
67
|
|
|
130
68
|
|
|
131
|
-
P = ParamSpec("P")
|
|
132
|
-
T = TypeVar("T")
|
|
133
|
-
|
|
134
|
-
|
|
135
69
|
def remote_deployment(
|
|
136
|
-
|
|
70
|
+
deployment_class: type[PipelineDeployment[TOptions, TResult]],
|
|
137
71
|
*,
|
|
138
|
-
|
|
72
|
+
deployment_name: str | None = None,
|
|
139
73
|
name: str | None = None,
|
|
140
74
|
trace_level: TraceLevel = "always",
|
|
141
|
-
trace_ignore_input: bool = False,
|
|
142
|
-
trace_ignore_output: bool = False,
|
|
143
|
-
trace_ignore_inputs: list[str] | None = None,
|
|
144
|
-
trace_input_formatter: Callable[..., str] | None = None,
|
|
145
|
-
trace_output_formatter: Callable[..., str] | None = None,
|
|
146
75
|
trace_cost: float | None = None,
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
"""Decorator for calling remote Prefect deployments with automatic tracing.
|
|
150
|
-
|
|
151
|
-
EXPERIMENTAL: Decorator for calling remote Prefect deployments with automatic
|
|
152
|
-
parameter serialization, result deserialization, and LMNR tracing.
|
|
153
|
-
|
|
154
|
-
IMPORTANT: Never combine with @trace decorator - this includes tracing automatically.
|
|
155
|
-
The framework will raise TypeError if you try to use both decorators together.
|
|
156
|
-
|
|
157
|
-
Best Practice - Use Defaults:
|
|
158
|
-
For most use cases, only specify output_document_type. The defaults provide
|
|
159
|
-
automatic tracing with optimal settings.
|
|
160
|
-
|
|
161
|
-
Args:
|
|
162
|
-
output_document_type: The FlowDocument type to deserialize results into.
|
|
163
|
-
name: Custom trace name (defaults to function name).
|
|
164
|
-
trace_level: When to trace ("always", "debug", "off").
|
|
165
|
-
- "always": Always trace (default)
|
|
166
|
-
- "debug": Only trace when LMNR_DEBUG="true"
|
|
167
|
-
- "off": Disable tracing
|
|
168
|
-
trace_ignore_input: Don't trace input arguments.
|
|
169
|
-
trace_ignore_output: Don't trace return value.
|
|
170
|
-
trace_ignore_inputs: List of parameter names to exclude from tracing.
|
|
171
|
-
trace_input_formatter: Custom formatter for input tracing.
|
|
172
|
-
trace_output_formatter: Custom formatter for output tracing.
|
|
173
|
-
trace_cost: Optional cost value to track in metadata. When provided and > 0,
|
|
174
|
-
sets gen_ai.usage.output_cost, gen_ai.usage.cost, and cost metadata.
|
|
175
|
-
trace_trim_documents: Trim document content in traces to first 100 chars (default True).
|
|
176
|
-
Reduces trace size with large documents.
|
|
76
|
+
) -> Callable[[Callable[P, TResult]], Callable[P, TResult]]:
|
|
77
|
+
"""@public Decorator to call PipelineDeployment flows remotely with automatic serialization."""
|
|
177
78
|
|
|
178
|
-
|
|
179
|
-
|
|
79
|
+
def decorator(func: Callable[P, TResult]) -> Callable[P, TResult]:
|
|
80
|
+
fname = getattr(func, "__name__", deployment_class.name)
|
|
180
81
|
|
|
181
|
-
Example:
|
|
182
|
-
>>> # RECOMMENDED - Minimal usage
|
|
183
|
-
>>> @remote_deployment(output_document_type=OutputDoc)
|
|
184
|
-
>>> async def process_remotely(
|
|
185
|
-
... project_name: str,
|
|
186
|
-
... documents: DocumentList,
|
|
187
|
-
... flow_options: FlowOptions
|
|
188
|
-
>>> ) -> DocumentList:
|
|
189
|
-
... pass # This stub is replaced by remote call
|
|
190
|
-
>>>
|
|
191
|
-
>>> # With custom tracing
|
|
192
|
-
>>> @remote_deployment(
|
|
193
|
-
... output_document_type=OutputDoc,
|
|
194
|
-
... trace_cost=0.05, # Track cost of remote execution
|
|
195
|
-
... trace_level="debug" # Only trace in debug mode
|
|
196
|
-
>>> )
|
|
197
|
-
>>> async def debug_remote_flow(...) -> DocumentList:
|
|
198
|
-
... pass
|
|
199
|
-
|
|
200
|
-
Note:
|
|
201
|
-
- Remote calls are automatically traced with LMNR
|
|
202
|
-
- The decorated function's body is never executed - it serves as a signature template
|
|
203
|
-
- Deployment name is auto-derived from function name
|
|
204
|
-
- DocumentList parameters are automatically serialized/deserialized
|
|
205
|
-
|
|
206
|
-
Raises:
|
|
207
|
-
TypeError: If function is already decorated with @trace.
|
|
208
|
-
ValueError: If deployment is not found.
|
|
209
|
-
"""
|
|
210
|
-
|
|
211
|
-
def decorator(func: Callable[P, T]) -> Callable[P, T]:
|
|
212
|
-
fname = _callable_name(func, "remote_deployment")
|
|
213
|
-
|
|
214
|
-
# Check if function is already traced
|
|
215
82
|
if _is_already_traced(func):
|
|
216
|
-
raise TypeError(
|
|
217
|
-
f"@remote_deployment target '{fname}' is already decorated "
|
|
218
|
-
f"with @trace. Remove the @trace decorator - @remote_deployment includes "
|
|
219
|
-
f"tracing automatically."
|
|
220
|
-
)
|
|
83
|
+
raise TypeError(f"@remote_deployment target '{fname}' already has @trace")
|
|
221
84
|
|
|
222
85
|
@wraps(func)
|
|
223
|
-
async def _wrapper(*args: P.args, **kwargs: P.kwargs) ->
|
|
86
|
+
async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> TResult:
|
|
224
87
|
sig = inspect.signature(func)
|
|
225
88
|
bound = sig.bind(*args, **kwargs)
|
|
226
89
|
bound.apply_defaults()
|
|
227
90
|
|
|
228
|
-
#
|
|
229
|
-
parameters = {}
|
|
91
|
+
# Pass parameters with proper types - Prefect handles Pydantic serialization
|
|
92
|
+
parameters: dict[str, Any] = {}
|
|
230
93
|
for pname, value in bound.arguments.items():
|
|
231
|
-
if
|
|
232
|
-
parameters[pname] =
|
|
94
|
+
if value is None and pname == "context":
|
|
95
|
+
parameters[pname] = DeploymentContext()
|
|
233
96
|
else:
|
|
234
97
|
parameters[pname] = value
|
|
235
98
|
|
|
236
|
-
|
|
237
|
-
deployment_name = f"{func.__name__.replace('_', '-')}/{func.__name__}"
|
|
99
|
+
full_name = f"{deployment_class.name}/{deployment_name or deployment_class.name}"
|
|
238
100
|
|
|
239
|
-
result = await run_remote_deployment(
|
|
240
|
-
deployment_name=deployment_name, parameters=parameters
|
|
241
|
-
)
|
|
101
|
+
result = await run_remote_deployment(full_name, parameters)
|
|
242
102
|
|
|
243
|
-
# Set trace cost if provided
|
|
244
103
|
if trace_cost is not None and trace_cost > 0:
|
|
245
104
|
set_trace_cost(trace_cost)
|
|
246
105
|
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
assert return_type is DocumentList, "Return type must be a DocumentList"
|
|
253
|
-
return DocumentList([output_document_type(**item) for item in result]) # type: ignore
|
|
106
|
+
if isinstance(result, DeploymentResult):
|
|
107
|
+
return cast(TResult, result)
|
|
108
|
+
if isinstance(result, dict):
|
|
109
|
+
return cast(TResult, deployment_class.result_type(**result))
|
|
110
|
+
raise TypeError(f"Expected DeploymentResult, got {type(result).__name__}")
|
|
254
111
|
|
|
255
|
-
# Apply trace decorator
|
|
256
112
|
traced_wrapper = trace(
|
|
257
113
|
level=trace_level,
|
|
258
|
-
name=name or
|
|
259
|
-
ignore_input=trace_ignore_input,
|
|
260
|
-
ignore_output=trace_ignore_output,
|
|
261
|
-
ignore_inputs=trace_ignore_inputs,
|
|
262
|
-
input_formatter=trace_input_formatter,
|
|
263
|
-
output_formatter=trace_output_formatter,
|
|
264
|
-
trim_documents=trace_trim_documents,
|
|
114
|
+
name=name or deployment_class.name,
|
|
265
115
|
)(_wrapper)
|
|
266
116
|
|
|
267
|
-
return traced_wrapper # type: ignore
|
|
117
|
+
return traced_wrapper # type: ignore[return-value]
|
|
268
118
|
|
|
269
119
|
return decorator
|