toposync-ext-ai 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- toposync_ext_ai-0.1.0/.gitignore +39 -0
- toposync_ext_ai-0.1.0/LICENSE +21 -0
- toposync_ext_ai-0.1.0/PKG-INFO +25 -0
- toposync_ext_ai-0.1.0/README.md +12 -0
- toposync_ext_ai-0.1.0/pyproject.toml +24 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/__init__.py +5 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/catalog.py +44 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/constants.py +7 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/extension.json +13 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/pipelines/__init__.py +5 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/pipelines/image_utils.py +104 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/pipelines/operators.py +111 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/pipelines/runtime.py +829 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/pipelines/schemas.py +97 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/plugin.py +210 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/providers.py +655 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/router.py +351 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/settings.py +205 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/static/326.js +2 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/static/326.js.LICENSE.txt +9 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/static/387.js +1 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/static/754.js +2 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/static/754.js.LICENSE.txt +9 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/static/main.js +1 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/static/remoteEntry.js +1 -0
- toposync_ext_ai-0.1.0/src/toposync_ext_ai/usage.py +137 -0
- toposync_ext_ai-0.1.0/ui/package.json +22 -0
- toposync_ext_ai-0.1.0/ui/src/activate.tsx +12 -0
- toposync_ext_ai-0.1.0/ui/src/api/aiApi.ts +85 -0
- toposync_ext_ai-0.1.0/ui/src/constants.ts +5 -0
- toposync_ext_ai-0.1.0/ui/src/entry.ts +1 -0
- toposync_ext_ai-0.1.0/ui/src/operators/AiOperatorPanels.tsx +419 -0
- toposync_ext_ai-0.1.0/ui/src/settings/AiSettingsPanel.tsx +1036 -0
- toposync_ext_ai-0.1.0/ui/src/translations.ts +210 -0
- toposync_ext_ai-0.1.0/ui/src/types.ts +95 -0
- toposync_ext_ai-0.1.0/ui/tsconfig.json +14 -0
- toposync_ext_ai-0.1.0/ui/webpack.config.js +42 -0
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
.DS_Store
|
|
2
|
+
.env
|
|
3
|
+
.venv
|
|
4
|
+
__pycache__/
|
|
5
|
+
*.pyc
|
|
6
|
+
.pytest_cache/
|
|
7
|
+
.ruff_cache/
|
|
8
|
+
|
|
9
|
+
node_modules/
|
|
10
|
+
yarn.lock
|
|
11
|
+
.pnp.cjs
|
|
12
|
+
.pnp.loader.mjs
|
|
13
|
+
.yarn/
|
|
14
|
+
dist/
|
|
15
|
+
build/
|
|
16
|
+
.parcel-cache/
|
|
17
|
+
|
|
18
|
+
.toposync-data/
|
|
19
|
+
.toposync-processor-data
|
|
20
|
+
toposync-data/
|
|
21
|
+
|
|
22
|
+
*.log
|
|
23
|
+
|
|
24
|
+
*.ignore.md
|
|
25
|
+
ignore/
|
|
26
|
+
|
|
27
|
+
# Playwright
|
|
28
|
+
test-results/
|
|
29
|
+
playwright-report/
|
|
30
|
+
yolo*.pt
|
|
31
|
+
|
|
32
|
+
# Streaming extension engines are downloaded at runtime.
|
|
33
|
+
extensions/streaming/src/toposync_ext_streaming/bin/mediamtx/**/mediamtx*
|
|
34
|
+
extensions/streaming/src/toposync_ext_streaming/bin/ffmpeg/**/ffmpeg*
|
|
35
|
+
|
|
36
|
+
# Vision model artifacts are provisioned locally and must not be committed.
|
|
37
|
+
extensions/vision/models/
|
|
38
|
+
|
|
39
|
+
*.temporary.*
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Mateus Calza
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: toposync-ext-ai
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Toposync first-party extension: AI-assisted image operators and provider routing.
|
|
5
|
+
License-Expression: MIT
|
|
6
|
+
License-File: LICENSE
|
|
7
|
+
Requires-Python: >=3.11
|
|
8
|
+
Requires-Dist: httpx>=0.27
|
|
9
|
+
Requires-Dist: litellm<2,>=1.74
|
|
10
|
+
Requires-Dist: pillow<13,>=10
|
|
11
|
+
Requires-Dist: toposync-core>=0.3.6
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
|
|
14
|
+
# Toposync AI Extension
|
|
15
|
+
|
|
16
|
+
First-party extension for AI-assisted pipeline operators.
|
|
17
|
+
|
|
18
|
+
This first phase focuses on image workflows:
|
|
19
|
+
|
|
20
|
+
- `ai.smart_crop`: locates an object/region from a natural-language description and crops the frame.
|
|
21
|
+
- `ai.condition_filter`: evaluates a natural-language visual condition and emits packets only when it matches.
|
|
22
|
+
|
|
23
|
+
The extension is designed around local-first usage. Ollama is the default provider target, with
|
|
24
|
+
`qwen3-vl:30b` as the initial high-quality local vision recommendation. Cloud providers can be
|
|
25
|
+
added through provider profiles and explicit fallback chains.
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
# Toposync AI Extension
|
|
2
|
+
|
|
3
|
+
First-party extension for AI-assisted pipeline operators.
|
|
4
|
+
|
|
5
|
+
This first phase focuses on image workflows:
|
|
6
|
+
|
|
7
|
+
- `ai.smart_crop`: locates an object/region from a natural-language description and crops the frame.
|
|
8
|
+
- `ai.condition_filter`: evaluates a natural-language visual condition and emits packets only when it matches.
|
|
9
|
+
|
|
10
|
+
The extension is designed around local-first usage. Ollama is the default provider target, with
|
|
11
|
+
`qwen3-vl:30b` as the initial high-quality local vision recommendation. Cloud providers can be
|
|
12
|
+
added through provider profiles and explicit fallback chains.
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "toposync-ext-ai"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Toposync first-party extension: AI-assisted image operators and provider routing."
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
license = "MIT"
|
|
7
|
+
license-files = ["LICENSE"]
|
|
8
|
+
requires-python = ">=3.11"
|
|
9
|
+
dependencies = [
|
|
10
|
+
"toposync-core>=0.3.6",
|
|
11
|
+
"httpx>=0.27",
|
|
12
|
+
"litellm>=1.74,<2",
|
|
13
|
+
"pillow>=10,<13",
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
[project.entry-points."toposync.extensions"]
|
|
17
|
+
ai = "toposync_ext_ai.plugin:AiExtension"
|
|
18
|
+
|
|
19
|
+
[build-system]
|
|
20
|
+
requires = ["hatchling>=1.25"]
|
|
21
|
+
build-backend = "hatchling.build"
|
|
22
|
+
|
|
23
|
+
[tool.hatch.build.targets.wheel]
|
|
24
|
+
packages = ["src/toposync_ext_ai"]
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from copy import deepcopy
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from .constants import DEFAULT_OLLAMA_MODEL
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
BUILTIN_MODEL_CATALOG: list[dict[str, Any]] = [
|
|
10
|
+
{
|
|
11
|
+
"id": "ollama_qwen3_vl_30b",
|
|
12
|
+
"provider": "ollama",
|
|
13
|
+
"model": DEFAULT_OLLAMA_MODEL,
|
|
14
|
+
"name": "Qwen3-VL 30B",
|
|
15
|
+
"recommendation": "best_local_quality",
|
|
16
|
+
"tasks": ["image_region", "image_condition"],
|
|
17
|
+
"capabilities": ["vision", "structured_json", "bbox", "boolean_filter"],
|
|
18
|
+
"input_modalities": ["text", "image"],
|
|
19
|
+
"local": True,
|
|
20
|
+
"estimated_size": "20GB",
|
|
21
|
+
"min_ollama_version": "0.12.7",
|
|
22
|
+
"last_verified_at": "2026-05-02",
|
|
23
|
+
"notes": "Initial high-quality local recommendation for image reasoning through Ollama.",
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
"id": "ollama_qwen3_vl_8b",
|
|
27
|
+
"provider": "ollama",
|
|
28
|
+
"model": "qwen3-vl:8b",
|
|
29
|
+
"name": "Qwen3-VL 8B",
|
|
30
|
+
"recommendation": "lighter_local",
|
|
31
|
+
"tasks": ["image_region", "image_condition"],
|
|
32
|
+
"capabilities": ["vision", "structured_json", "bbox", "boolean_filter"],
|
|
33
|
+
"input_modalities": ["text", "image"],
|
|
34
|
+
"local": True,
|
|
35
|
+
"estimated_size": "6.1GB",
|
|
36
|
+
"min_ollama_version": "0.12.7",
|
|
37
|
+
"last_verified_at": "2026-05-02",
|
|
38
|
+
"notes": "Lighter local fallback when the 30B variant is too heavy for the machine.",
|
|
39
|
+
},
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def list_builtin_model_catalog() -> list[dict[str, Any]]:
|
|
44
|
+
return deepcopy(BUILTIN_MODEL_CATALOG)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
{
|
|
2
|
+
"schema_version": 1,
|
|
3
|
+
"id": "com.toposync.ai",
|
|
4
|
+
"name": "AI",
|
|
5
|
+
"version": "0.1.0",
|
|
6
|
+
"requires_core_version": ">=0.3.6",
|
|
7
|
+
"frontend": {
|
|
8
|
+
"kind": "module-federation",
|
|
9
|
+
"remote_entry": "remoteEntry.js",
|
|
10
|
+
"scope": "ai",
|
|
11
|
+
"module": "./activate"
|
|
12
|
+
}
|
|
13
|
+
}
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def image_size(image: Any) -> tuple[int, int] | None:
|
|
7
|
+
shape = getattr(image, "shape", None)
|
|
8
|
+
if shape and len(shape) >= 2:
|
|
9
|
+
try:
|
|
10
|
+
return int(shape[1]), int(shape[0])
|
|
11
|
+
except Exception:
|
|
12
|
+
return None
|
|
13
|
+
size = getattr(image, "size", None)
|
|
14
|
+
if isinstance(size, tuple) and len(size) >= 2:
|
|
15
|
+
try:
|
|
16
|
+
return int(size[0]), int(size[1])
|
|
17
|
+
except Exception:
|
|
18
|
+
return None
|
|
19
|
+
return None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def normalize_bbox01(value: Any) -> tuple[float, float, float, float] | None:
|
|
23
|
+
if not isinstance(value, (list, tuple)) or len(value) < 4:
|
|
24
|
+
return None
|
|
25
|
+
try:
|
|
26
|
+
x1, y1, x2, y2 = [float(value[i]) for i in range(4)]
|
|
27
|
+
except Exception:
|
|
28
|
+
return None
|
|
29
|
+
x1 = max(0.0, min(1.0, x1))
|
|
30
|
+
y1 = max(0.0, min(1.0, y1))
|
|
31
|
+
x2 = max(0.0, min(1.0, x2))
|
|
32
|
+
y2 = max(0.0, min(1.0, y2))
|
|
33
|
+
if x2 <= x1 or y2 <= y1:
|
|
34
|
+
return None
|
|
35
|
+
return x1, y1, x2, y2
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def expand_bbox01(
|
|
39
|
+
bbox01: tuple[float, float, float, float],
|
|
40
|
+
*,
|
|
41
|
+
padding_ratio: float,
|
|
42
|
+
) -> tuple[float, float, float, float]:
|
|
43
|
+
x1, y1, x2, y2 = bbox01
|
|
44
|
+
pad = max(0.0, float(padding_ratio or 0.0))
|
|
45
|
+
if pad <= 0:
|
|
46
|
+
return bbox01
|
|
47
|
+
width = max(0.0, x2 - x1)
|
|
48
|
+
height = max(0.0, y2 - y1)
|
|
49
|
+
px = width * pad
|
|
50
|
+
py = height * pad
|
|
51
|
+
return (
|
|
52
|
+
max(0.0, x1 - px),
|
|
53
|
+
max(0.0, y1 - py),
|
|
54
|
+
min(1.0, x2 + px),
|
|
55
|
+
min(1.0, y2 + py),
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def bbox01_to_px(
|
|
60
|
+
bbox01: tuple[float, float, float, float],
|
|
61
|
+
*,
|
|
62
|
+
width: int,
|
|
63
|
+
height: int,
|
|
64
|
+
) -> tuple[int, int, int, int]:
|
|
65
|
+
x1, y1, x2, y2 = bbox01
|
|
66
|
+
left = int(round(max(0.0, min(1.0, x1)) * width))
|
|
67
|
+
top = int(round(max(0.0, min(1.0, y1)) * height))
|
|
68
|
+
right = int(round(max(0.0, min(1.0, x2)) * width))
|
|
69
|
+
bottom = int(round(max(0.0, min(1.0, y2)) * height))
|
|
70
|
+
right = max(left + 1, min(width, right))
|
|
71
|
+
bottom = max(top + 1, min(height, bottom))
|
|
72
|
+
return left, top, right, bottom
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def crop_bbox01(
|
|
76
|
+
image: Any,
|
|
77
|
+
*,
|
|
78
|
+
bbox01: tuple[float, float, float, float],
|
|
79
|
+
min_crop_size_px: int,
|
|
80
|
+
) -> Any | None:
|
|
81
|
+
size = image_size(image)
|
|
82
|
+
if size is None:
|
|
83
|
+
return None
|
|
84
|
+
width, height = size
|
|
85
|
+
if width <= 1 or height <= 1:
|
|
86
|
+
return None
|
|
87
|
+
left, top, right, bottom = bbox01_to_px(bbox01, width=width, height=height)
|
|
88
|
+
if (right - left) < int(min_crop_size_px) or (bottom - top) < int(min_crop_size_px):
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
if hasattr(image, "crop") and callable(image.crop):
|
|
92
|
+
return image.crop((left, top, right, bottom))
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
cropped = image[top:bottom, left:right]
|
|
96
|
+
except Exception:
|
|
97
|
+
return None
|
|
98
|
+
copy = getattr(cropped, "copy", None)
|
|
99
|
+
if callable(copy):
|
|
100
|
+
try:
|
|
101
|
+
return copy()
|
|
102
|
+
except Exception:
|
|
103
|
+
return cropped
|
|
104
|
+
return cropped
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from toposync.runtime.pipelines.operator_registry import OperatorRegistry, payload_path_hint
|
|
6
|
+
|
|
7
|
+
from toposync_ext_ai.constants import EXTENSION_ID
|
|
8
|
+
|
|
9
|
+
from .runtime import AiConditionFilterRuntime, AiSmartCropRuntime
|
|
10
|
+
from .schemas import AiConditionFilterConfig, AiSmartCropConfig
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _ai_expression_hints(*, task: str) -> list[Any]:
|
|
14
|
+
hints: list[Any] = [
|
|
15
|
+
payload_path_hint("payload.ai", value_type="object", description="AI annotations attached to the packet."),
|
|
16
|
+
]
|
|
17
|
+
if task == "smart_crop":
|
|
18
|
+
hints.extend(
|
|
19
|
+
[
|
|
20
|
+
payload_path_hint("payload.ai.smart_crop", value_type="object", description="AI smart-crop result."),
|
|
21
|
+
payload_path_hint("payload.ai.smart_crop.status", value_type="string", description="Smart-crop status."),
|
|
22
|
+
payload_path_hint("payload.ai.smart_crop.confidence", value_type="number", description="AI region confidence."),
|
|
23
|
+
payload_path_hint("payload.ai.smart_crop.bbox01", value_type="array", description="Detected normalized region."),
|
|
24
|
+
payload_path_hint("payload.ai.smart_crop.detections", value_type="array", description="All AI detections used by the smart crop."),
|
|
25
|
+
payload_path_hint("payload.ai.smart_crop.selected_detection", value_type="object", description="Detection selected by the crop strategy."),
|
|
26
|
+
payload_path_hint("payload.object_bbox01", value_type="array", description="Primary AI-detected bbox."),
|
|
27
|
+
payload_path_hint("payload.object_confidence", value_type="number", description="Primary AI-detected confidence."),
|
|
28
|
+
payload_path_hint("payload.object_category_label", value_type="string", description="Primary AI target label."),
|
|
29
|
+
payload_path_hint("payload.frame_crop", value_type="object", description="Applied frame crop metadata."),
|
|
30
|
+
]
|
|
31
|
+
)
|
|
32
|
+
if task == "condition_filter":
|
|
33
|
+
hints.extend(
|
|
34
|
+
[
|
|
35
|
+
payload_path_hint(
|
|
36
|
+
"payload.ai.condition_filter",
|
|
37
|
+
value_type="object",
|
|
38
|
+
description="AI condition-filter result.",
|
|
39
|
+
),
|
|
40
|
+
payload_path_hint(
|
|
41
|
+
"payload.ai.condition_filter.matches",
|
|
42
|
+
value_type="boolean",
|
|
43
|
+
description="Whether the AI condition matched.",
|
|
44
|
+
),
|
|
45
|
+
payload_path_hint(
|
|
46
|
+
"payload.ai.condition_filter.confidence",
|
|
47
|
+
value_type="number",
|
|
48
|
+
description="AI condition confidence.",
|
|
49
|
+
),
|
|
50
|
+
]
|
|
51
|
+
)
|
|
52
|
+
return hints
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def register_ai_pipeline_operators(registry: OperatorRegistry) -> None:
|
|
56
|
+
if registry.get("ai.smart_crop") is None:
|
|
57
|
+
registry.register_operator(
|
|
58
|
+
operator_id="ai.smart_crop",
|
|
59
|
+
description=(
|
|
60
|
+
"AI-guided image crop. Locates a region from a text description, emits a crop artifact, "
|
|
61
|
+
"and exposes object_bbox01/object_confidence for downstream camera and filter operators."
|
|
62
|
+
),
|
|
63
|
+
config_model=AiSmartCropConfig,
|
|
64
|
+
inputs=[{"name": "in", "required": True}],
|
|
65
|
+
outputs=[{"name": "out"}],
|
|
66
|
+
capabilities=["ai", "vision", "crop", "heavy_compute"],
|
|
67
|
+
defaults=AiSmartCropConfig().model_dump(),
|
|
68
|
+
execution_mode="in_event_loop",
|
|
69
|
+
max_concurrency=1,
|
|
70
|
+
requires_artifacts=["frame_original"],
|
|
71
|
+
produces_payload_keys=[
|
|
72
|
+
"ai",
|
|
73
|
+
"object_bbox01",
|
|
74
|
+
"object_confidence",
|
|
75
|
+
"object_category_label",
|
|
76
|
+
"detected_object",
|
|
77
|
+
"detected_objects",
|
|
78
|
+
"frame_crop",
|
|
79
|
+
],
|
|
80
|
+
produces_artifacts=["ai_crop"],
|
|
81
|
+
input_modalities=["image"],
|
|
82
|
+
output_modalities=["image"],
|
|
83
|
+
expression_hints=_ai_expression_hints(task="smart_crop"),
|
|
84
|
+
share_strategy="never",
|
|
85
|
+
owner=EXTENSION_ID,
|
|
86
|
+
runtime_factory=lambda config, deps: AiSmartCropRuntime(config, deps),
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
if registry.get("ai.condition_filter") is None:
|
|
90
|
+
registry.register_operator(
|
|
91
|
+
operator_id="ai.condition_filter",
|
|
92
|
+
description=(
|
|
93
|
+
"AI visual condition filter. Evaluates a text condition against the frame and emits only "
|
|
94
|
+
"matching packets, attaching boolean/confidence metadata."
|
|
95
|
+
),
|
|
96
|
+
config_model=AiConditionFilterConfig,
|
|
97
|
+
inputs=[{"name": "in", "required": True}],
|
|
98
|
+
outputs=[{"name": "out"}],
|
|
99
|
+
capabilities=["ai", "vision", "filter", "heavy_compute"],
|
|
100
|
+
defaults=AiConditionFilterConfig().model_dump(),
|
|
101
|
+
execution_mode="in_event_loop",
|
|
102
|
+
max_concurrency=1,
|
|
103
|
+
requires_artifacts=["frame_original"],
|
|
104
|
+
produces_payload_keys=["ai"],
|
|
105
|
+
input_modalities=["image"],
|
|
106
|
+
output_modalities=["image"],
|
|
107
|
+
expression_hints=_ai_expression_hints(task="condition_filter"),
|
|
108
|
+
share_strategy="never",
|
|
109
|
+
owner=EXTENSION_ID,
|
|
110
|
+
runtime_factory=lambda config, deps: AiConditionFilterRuntime(config, deps),
|
|
111
|
+
)
|