holoscan 3.5.0__cp313-cp313-manylinux_2_35_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- holoscan-3.5.0.data/purelib/holoscan/__init__.py +135 -0
- holoscan-3.5.0.data/purelib/holoscan/cli/__main__.py +26 -0
- holoscan-3.5.0.data/purelib/holoscan/conditions/__init__.py +75 -0
- holoscan-3.5.0.data/purelib/holoscan/conditions/_conditions.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/core/__init__.py +793 -0
- holoscan-3.5.0.data/purelib/holoscan/core/_core.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/data_loggers/__init__.py +76 -0
- holoscan-3.5.0.data/purelib/holoscan/data_loggers/async_console_logger/__init__.py +22 -0
- holoscan-3.5.0.data/purelib/holoscan/data_loggers/async_console_logger/_async_console_logger.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/data_loggers/basic_console_logger/__init__.py +22 -0
- holoscan-3.5.0.data/purelib/holoscan/data_loggers/basic_console_logger/_basic_console_logger.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/decorator.py +662 -0
- holoscan-3.5.0.data/purelib/holoscan/executors/__init__.py +26 -0
- holoscan-3.5.0.data/purelib/holoscan/executors/_executors.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/graphs/__init__.py +32 -0
- holoscan-3.5.0.data/purelib/holoscan/graphs/_graphs.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/gxf/__init__.py +60 -0
- holoscan-3.5.0.data/purelib/holoscan/gxf/_gxf.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_holoscan_wrapper.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_holoscan_wrapper_lib.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_ucx_holoscan.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_ucx_holoscan_lib.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_app.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_core.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_cuda.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_http.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_logger.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_multimedia.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_rmm.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_sample.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_serialization.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_std.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libgxf_ucx.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_core.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_data_loggers_async_console_logger.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_data_loggers_basic_console_logger.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_infer.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_infer_onnx_runtime.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_infer_torch.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_infer_utils.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_logger.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_async_ping_rx.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_async_ping_tx.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_bayer_demosaic.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_format_converter.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_gxf_codelet.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_holoviz.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_inference.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_inference_processor.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_ping_rx.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_ping_tensor_rx.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_ping_tensor_tx.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_ping_tx.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_segmentation_postprocessor.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_v4l2.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_video_stream_recorder.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_op_video_stream_replayer.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_pose_tree.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_profiler.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_spdlog_logger.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libholoscan_viz.so.3.5.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libucm.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libucp.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libucs.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libucs_signal.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libuct.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/libucxx.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/ucx/libucm_cuda.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/ucx/libucs_fuse.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/ucx/libuct_cma.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/ucx/libuct_cuda.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/ucx/libuct_cuda_gdrcopy.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/ucx/libuct_ib.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/ucx/libuct_rdmacm.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/ucx/libuct_xpmem.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/lib/ucx/libucx_perftest_cuda.so.0.0.0 +0 -0
- holoscan-3.5.0.data/purelib/holoscan/logger/__init__.py +37 -0
- holoscan-3.5.0.data/purelib/holoscan/logger/_logger.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/network_contexts/__init__.py +28 -0
- holoscan-3.5.0.data/purelib/holoscan/network_contexts/_network_contexts.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/__init__.py +95 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/bayer_demosaic/__init__.py +24 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/bayer_demosaic/_bayer_demosaic.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/format_converter/__init__.py +23 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/format_converter/_format_converter.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/gxf_codelet/__init__.py +67 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/gxf_codelet/_gxf_codelet.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/holoviz/__init__.py +423 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/holoviz/_holoviz.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/inference/__init__.py +28 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/inference/_inference.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/inference_processor/__init__.py +23 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/inference_processor/_inference_processor.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/ping_rx/__init__.py +45 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/ping_tensor_rx/__init__.py +22 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/ping_tensor_rx/_ping_tensor_rx.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/ping_tensor_tx/__init__.py +22 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/ping_tensor_tx/_ping_tensor_tx.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/ping_tx/__init__.py +46 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/segmentation_postprocessor/__init__.py +23 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/segmentation_postprocessor/_segmentation_postprocessor.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/v4l2_video_capture/__init__.py +23 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/v4l2_video_capture/_v4l2_video_capture.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/video_stream_recorder/__init__.py +22 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/video_stream_recorder/_video_stream_recorder.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/video_stream_replayer/__init__.py +22 -0
- holoscan-3.5.0.data/purelib/holoscan/operators/video_stream_replayer/_video_stream_replayer.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/pose_tree/__init__.py +271 -0
- holoscan-3.5.0.data/purelib/holoscan/pose_tree/_pose_tree.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/resources/__init__.py +162 -0
- holoscan-3.5.0.data/purelib/holoscan/resources/_resources.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan/schedulers/__init__.py +32 -0
- holoscan-3.5.0.data/purelib/holoscan/schedulers/_schedulers.cpython-313-aarch64-linux-gnu.so +0 -0
- holoscan-3.5.0.data/purelib/holoscan-3.5.0.pth +1 -0
- holoscan-3.5.0.dist-info/LICENSE.txt +202 -0
- holoscan-3.5.0.dist-info/METADATA +123 -0
- holoscan-3.5.0.dist-info/NOTICE.txt +187 -0
- holoscan-3.5.0.dist-info/NVIDIA-AI-PRODUCT-EULA.txt +243 -0
- holoscan-3.5.0.dist-info/README.md +35 -0
- holoscan-3.5.0.dist-info/RECORD +125 -0
- holoscan-3.5.0.dist-info/WHEEL +5 -0
- holoscan-3.5.0.dist-info/axle.lck +0 -0
- holoscan-3.5.0.dist-info/entry_points.txt +3 -0
- holoscan-3.5.0.dist-info/symlinks.txt +84 -0
- holoscan-3.5.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,793 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
"""This module provides a Python API for the core C++ API classes.
|
|
16
|
+
|
|
17
|
+
The `Application` class is the primary class that should be derived from to
|
|
18
|
+
create a custom application.
|
|
19
|
+
|
|
20
|
+
.. autosummary::
|
|
21
|
+
|
|
22
|
+
holoscan.core.Application
|
|
23
|
+
holoscan.core.Arg
|
|
24
|
+
holoscan.core.ArgContainerType
|
|
25
|
+
holoscan.core.ArgElementType
|
|
26
|
+
holoscan.core.ArgList
|
|
27
|
+
holoscan.core.ArgType
|
|
28
|
+
holoscan.core.AsyncDataLoggerResource
|
|
29
|
+
holoscan.core.AsyncQueuePolicy
|
|
30
|
+
holoscan.core.CLIOptions
|
|
31
|
+
holoscan.core.Component
|
|
32
|
+
holoscan.core.ComponentSpec
|
|
33
|
+
holoscan.core.Condition
|
|
34
|
+
holoscan.core.ConditionType
|
|
35
|
+
holoscan.core.Config
|
|
36
|
+
holoscan.core.DataFlowMetric
|
|
37
|
+
holoscan.core.DataFlowTracker
|
|
38
|
+
holoscan.core.DataLogger
|
|
39
|
+
holoscan.core.DataLoggerResource
|
|
40
|
+
holoscan.core.DefaultFragmentService
|
|
41
|
+
holoscan.core.DLDevice
|
|
42
|
+
holoscan.core.DLDeviceType
|
|
43
|
+
holoscan.core.ExecutionContext
|
|
44
|
+
holoscan.core.Executor
|
|
45
|
+
holoscan.core.Fragment
|
|
46
|
+
holoscan.core.FlowInfo
|
|
47
|
+
holoscan.core.Fragment
|
|
48
|
+
holoscan.core.Graph
|
|
49
|
+
holoscan.core.FragmentService
|
|
50
|
+
holoscan.core.InputContext
|
|
51
|
+
holoscan.core.IOSpec
|
|
52
|
+
holoscan.core.Message
|
|
53
|
+
holoscan.core.MetadataDictionary
|
|
54
|
+
holoscan.core.MetadataPolicy
|
|
55
|
+
holoscan.core.MultiMessageConditionInfo
|
|
56
|
+
holoscan.core.NetworkContext
|
|
57
|
+
holoscan.core.Operator
|
|
58
|
+
holoscan.core.OperatorSpec
|
|
59
|
+
holoscan.core.OperatorStatus
|
|
60
|
+
holoscan.core.OutputContext
|
|
61
|
+
holoscan.core.ParameterFlag
|
|
62
|
+
holoscan.core.arg_to_py_object
|
|
63
|
+
holoscan.core.arglist_to_kwargs
|
|
64
|
+
holoscan.core.Resource
|
|
65
|
+
holoscan.core.SchedulingStatusType
|
|
66
|
+
holoscan.core.ServiceDriverEndpoint
|
|
67
|
+
holoscan.core.ServiceWorkerEndpoint
|
|
68
|
+
holoscan.core.Tensor
|
|
69
|
+
holoscan.core.Tracker
|
|
70
|
+
holoscan.core.kwargs_to_arglist
|
|
71
|
+
holoscan.core.py_object_to_arg
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
import logging
|
|
75
|
+
import sys
|
|
76
|
+
|
|
77
|
+
# Note: Python 3.7+ expects the threading module to be initialized (imported) before additional
|
|
78
|
+
# threads are created (by C++ modules using pybind11).
|
|
79
|
+
# Otherwise you will get an assert tlock.locked() error on exit.
|
|
80
|
+
# (CLARAHOLOS-765)
|
|
81
|
+
import threading as _threading # noqa: F401, I001
|
|
82
|
+
|
|
83
|
+
# Add ThreadPoolExecutor to imports if not already there
|
|
84
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
85
|
+
|
|
86
|
+
# Import statements for the C++ API classes
|
|
87
|
+
from ..graphs._graphs import FragmentGraph, OperatorGraph
|
|
88
|
+
from ._core import Application as _Application
|
|
89
|
+
from ._core import (
|
|
90
|
+
Arg,
|
|
91
|
+
ArgContainerType,
|
|
92
|
+
ArgElementType,
|
|
93
|
+
ArgList,
|
|
94
|
+
ArgType,
|
|
95
|
+
AsyncDataLoggerResource,
|
|
96
|
+
AsyncQueuePolicy,
|
|
97
|
+
CLIOptions,
|
|
98
|
+
Component,
|
|
99
|
+
ConditionType,
|
|
100
|
+
Config,
|
|
101
|
+
DataFlowMetric,
|
|
102
|
+
DataFlowTracker,
|
|
103
|
+
DataLogger,
|
|
104
|
+
DataLoggerResource,
|
|
105
|
+
DistributedAppService,
|
|
106
|
+
DLDevice,
|
|
107
|
+
DLDeviceType,
|
|
108
|
+
Executor,
|
|
109
|
+
FlowInfo,
|
|
110
|
+
FragmentService,
|
|
111
|
+
IOSpec,
|
|
112
|
+
Message,
|
|
113
|
+
MetadataDictionary,
|
|
114
|
+
MetadataPolicy,
|
|
115
|
+
MultiMessageConditionInfo,
|
|
116
|
+
NetworkContext,
|
|
117
|
+
OperatorStatus,
|
|
118
|
+
ParameterFlag,
|
|
119
|
+
Scheduler,
|
|
120
|
+
SchedulingStatusType,
|
|
121
|
+
ServiceDriverEndpoint,
|
|
122
|
+
ServiceWorkerEndpoint,
|
|
123
|
+
arg_to_py_object,
|
|
124
|
+
arglist_to_kwargs,
|
|
125
|
+
kwargs_to_arglist,
|
|
126
|
+
py_object_to_arg,
|
|
127
|
+
)
|
|
128
|
+
from ._core import Condition as _Condition
|
|
129
|
+
from ._core import DefaultFragmentService as _DefaultFragmentService
|
|
130
|
+
from ._core import Fragment as _Fragment
|
|
131
|
+
from ._core import Operator as _Operator
|
|
132
|
+
from ._core import PyComponentSpec as ComponentSpec
|
|
133
|
+
from ._core import PyExecutionContext as ExecutionContext
|
|
134
|
+
from ._core import PyInputContext as InputContext
|
|
135
|
+
from ._core import PyOperatorSpec as OperatorSpec
|
|
136
|
+
from ._core import PyOutputContext as OutputContext
|
|
137
|
+
from ._core import PyRegistryContext as _RegistryContext
|
|
138
|
+
from ._core import PyTensor as Tensor
|
|
139
|
+
from ._core import Resource as _Resource
|
|
140
|
+
from ._core import register_types as _register_types
|
|
141
|
+
|
|
142
|
+
# Get a logger instance for this module
|
|
143
|
+
logger = logging.getLogger(__name__)
|
|
144
|
+
|
|
145
|
+
Graph = OperatorGraph # define alias for backward compatibility
|
|
146
|
+
|
|
147
|
+
__all__ = [
|
|
148
|
+
"Application",
|
|
149
|
+
"Arg",
|
|
150
|
+
"ArgContainerType",
|
|
151
|
+
"ArgElementType",
|
|
152
|
+
"ArgList",
|
|
153
|
+
"ArgType",
|
|
154
|
+
"AsyncDataLoggerResource",
|
|
155
|
+
"AsyncQueuePolicy",
|
|
156
|
+
"CLIOptions",
|
|
157
|
+
"Component",
|
|
158
|
+
"ComponentSpec",
|
|
159
|
+
"ConditionType",
|
|
160
|
+
"Condition",
|
|
161
|
+
"Config",
|
|
162
|
+
"DataFlowMetric",
|
|
163
|
+
"DataFlowTracker",
|
|
164
|
+
"DataLogger",
|
|
165
|
+
"DataLoggerResource",
|
|
166
|
+
"DefaultFragmentService",
|
|
167
|
+
"DistributedAppService",
|
|
168
|
+
"DLDevice",
|
|
169
|
+
"DLDeviceType",
|
|
170
|
+
"ExecutionContext",
|
|
171
|
+
"Executor",
|
|
172
|
+
"FlowInfo",
|
|
173
|
+
"Fragment",
|
|
174
|
+
"FragmentGraph",
|
|
175
|
+
"FragmentService",
|
|
176
|
+
"Graph",
|
|
177
|
+
"InputContext",
|
|
178
|
+
"IOSpec",
|
|
179
|
+
"Message",
|
|
180
|
+
"MetadataDictionary",
|
|
181
|
+
"MetadataPolicy",
|
|
182
|
+
"MultiMessageConditionInfo",
|
|
183
|
+
"NetworkContext",
|
|
184
|
+
"Operator",
|
|
185
|
+
"OperatorSpec",
|
|
186
|
+
"OperatorStatus",
|
|
187
|
+
"OperatorGraph",
|
|
188
|
+
"OutputContext",
|
|
189
|
+
"ParameterFlag",
|
|
190
|
+
"Resource",
|
|
191
|
+
"Scheduler",
|
|
192
|
+
"SchedulingStatusType",
|
|
193
|
+
"ServiceDriverEndpoint",
|
|
194
|
+
"ServiceWorkerEndpoint",
|
|
195
|
+
"START_OPERATOR_NAME",
|
|
196
|
+
"Tensor",
|
|
197
|
+
"Tracker",
|
|
198
|
+
"arg_to_py_object",
|
|
199
|
+
"arglist_to_kwargs",
|
|
200
|
+
"io_type_registry",
|
|
201
|
+
"kwargs_to_arglist",
|
|
202
|
+
"py_object_to_arg",
|
|
203
|
+
]
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
# Define custom __repr__ method for MetadataDictionary
|
|
207
|
+
def metadata_repr(self):
|
|
208
|
+
items = {k: v for k, v in self.items()}
|
|
209
|
+
return f"{items}"
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
# need these imports for ThreadPool return type of Fragment.make_thread_pool to work
|
|
213
|
+
from ..gxf._gxf import GXFResource as _GXFResource # noqa: E402, F401, I001
|
|
214
|
+
from ..resources import ThreadPool as _ThreadPool # noqa: E402, F401, I001
|
|
215
|
+
|
|
216
|
+
MetadataDictionary.__repr__ = metadata_repr
|
|
217
|
+
|
|
218
|
+
# Defines the special operator name used to initiate application execution.
|
|
219
|
+
# The GXF framework requires entity names to not begin with double underscores,
|
|
220
|
+
# so this distinctive name pattern is chosen to prevent naming collisions.
|
|
221
|
+
# This constant mirrors the C++ definition of `holoscan::kStartOperatorName`
|
|
222
|
+
# found in holoscan/core/fragment.hpp
|
|
223
|
+
START_OPERATOR_NAME = "<|start|>"
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class Application(_Application):
|
|
227
|
+
def __init__(self, argv=None, *args, **kwargs):
|
|
228
|
+
# If no arguments are provided, instead of letting the C++ API initialize the application
|
|
229
|
+
# from the command line (through '/proc/self/cmdline'), we initialize the application
|
|
230
|
+
# with the command line arguments retrieved from the Python interpreter.
|
|
231
|
+
# This is because the C++ API will not be able to discard arguments that are not meant for
|
|
232
|
+
# the Python application.
|
|
233
|
+
# For example, if the user runs the application with the following
|
|
234
|
+
# command line arguments:
|
|
235
|
+
# /usr/bin/python3 -m pytest -v -k test_init /workspace/holoscan-sdk/public/python/tests
|
|
236
|
+
# then the C++ API will get the following arguments:
|
|
237
|
+
# ['/usr/bin/python3', '-m', 'pytest', '-v', '-k', 'test_init',
|
|
238
|
+
# '/workspace/holoscan-sdk/public/python/tests']
|
|
239
|
+
# whereas the Python interpreter (sys.argv) will get the following arguments:
|
|
240
|
+
# ['/usr/lib/python3/dist-packages/pytest.py', '-v', '-k', 'test_init',
|
|
241
|
+
# '/workspace/holoscan-sdk/public/python/tests']
|
|
242
|
+
# For the reason above, we initialize the application with the arguments:
|
|
243
|
+
# [sys.executable, *sys.argv]
|
|
244
|
+
# which will be equivalent to the following command line arguments:
|
|
245
|
+
# ['/usr/bin/python3', '/usr/lib/python3/dist-packages/pytest.py', '-v', '-k',
|
|
246
|
+
# 'test_init', '/workspace/holoscan-sdk/public/python/tests']
|
|
247
|
+
# and ``Application().argv`` will return the same arguments as ``sys.argv``.
|
|
248
|
+
|
|
249
|
+
if not argv:
|
|
250
|
+
argv = [sys.executable, *sys.argv]
|
|
251
|
+
|
|
252
|
+
# It is recommended to not use super()
|
|
253
|
+
# (https://pybind11.readthedocs.io/en/stable/advanced/classes.html#overriding-virtual-functions-in-python)
|
|
254
|
+
_Application.__init__(self, argv, *args, **kwargs)
|
|
255
|
+
self._async_executor = None
|
|
256
|
+
self._async_executor_lock = _threading.Lock()
|
|
257
|
+
self._start_op = None
|
|
258
|
+
|
|
259
|
+
def run_async(self):
|
|
260
|
+
"""Run the application asynchronously using a shared executor.
|
|
261
|
+
|
|
262
|
+
This method uses a shared ThreadPoolExecutor associated with this
|
|
263
|
+
Application instance. The executor is created on the first call.
|
|
264
|
+
Call `shutdown_async_executor()` when done with async runs
|
|
265
|
+
to clean up resources.
|
|
266
|
+
|
|
267
|
+
Returns
|
|
268
|
+
-------
|
|
269
|
+
future : ``concurrent.futures.Future`` object
|
|
270
|
+
"""
|
|
271
|
+
# Ensure only one thread creates the executor
|
|
272
|
+
with self._async_executor_lock:
|
|
273
|
+
if self._async_executor is None:
|
|
274
|
+
# Create the executor ONCE
|
|
275
|
+
self._async_executor = ThreadPoolExecutor(
|
|
276
|
+
max_workers=1, thread_name_prefix=f"HoloscanApp_{self.name}_Async"
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
# Submit the job to the shared executor
|
|
280
|
+
return self._async_executor.submit(self.run)
|
|
281
|
+
|
|
282
|
+
def shutdown_async_executor(self, wait=True):
|
|
283
|
+
"""Shuts down the shared asynchronous executor.
|
|
284
|
+
|
|
285
|
+
Call this method when the application instance is no longer needed
|
|
286
|
+
and asynchronous runs initiated by `run_async` should terminate.
|
|
287
|
+
|
|
288
|
+
Parameters
|
|
289
|
+
----------
|
|
290
|
+
wait : bool
|
|
291
|
+
If True (default), wait for running tasks to complete before shutting down.
|
|
292
|
+
If False, shut down immediately.
|
|
293
|
+
"""
|
|
294
|
+
# Use the lock to prevent race conditions with run_async
|
|
295
|
+
with self._async_executor_lock:
|
|
296
|
+
if self._async_executor is not None:
|
|
297
|
+
# Shutting down async executor
|
|
298
|
+
self._async_executor.shutdown(wait=wait)
|
|
299
|
+
self._async_executor = None
|
|
300
|
+
|
|
301
|
+
def start_op(self):
|
|
302
|
+
"""Get or create the start operator for this application.
|
|
303
|
+
|
|
304
|
+
This operator is nothing but the first operator that was added to the application.
|
|
305
|
+
It has the name of `<|start|>` and has a condition of `CountCondition(1)`.
|
|
306
|
+
This Operator is used to start the execution of the application.
|
|
307
|
+
Entry operators who want to start the execution of the application should connect to this
|
|
308
|
+
operator.
|
|
309
|
+
|
|
310
|
+
If this method is not called, no start operator is created.
|
|
311
|
+
Otherwise, the start operator is created if it does not exist, and the start operator is
|
|
312
|
+
returned.
|
|
313
|
+
|
|
314
|
+
Returns
|
|
315
|
+
-------
|
|
316
|
+
Operator
|
|
317
|
+
The start operator instance. If it doesn't exist, it will be created with
|
|
318
|
+
a CountCondition(1).
|
|
319
|
+
"""
|
|
320
|
+
from ..conditions import CountCondition # noqa: PLC0415
|
|
321
|
+
|
|
322
|
+
if not self._start_op:
|
|
323
|
+
self._start_op = Operator(self, CountCondition(self, 1), name=START_OPERATOR_NAME)
|
|
324
|
+
self.add_operator(self._start_op)
|
|
325
|
+
return self._start_op
|
|
326
|
+
|
|
327
|
+
# If we created a context via `gxf.context_create` then we would need to
|
|
328
|
+
# call `gxf.context_destroy` in a destructor. However, in the __init__
|
|
329
|
+
# here, the C++ API creates the GXFExecutor and its context and the
|
|
330
|
+
# C++ object will also take care of the context deletion.
|
|
331
|
+
|
|
332
|
+
# def __del__(self):
|
|
333
|
+
# context_destroy(self._context)
|
|
334
|
+
|
|
335
|
+
def __del__(self):
|
|
336
|
+
# This is best-effort cleanup, not guaranteed to be called reliably.
|
|
337
|
+
# Avoid potentially blocking calls or complex logic here.
|
|
338
|
+
if self._async_executor is not None:
|
|
339
|
+
# Non-blocking shutdown is safer in __del__ if possible,
|
|
340
|
+
# but might leave work unfinished or resources dangling longer.
|
|
341
|
+
# Using wait=False might be preferable here, but check implications.
|
|
342
|
+
try:
|
|
343
|
+
self._async_executor.shutdown(wait=False) # Try non-blocking first
|
|
344
|
+
except Exception as e:
|
|
345
|
+
logger.error(
|
|
346
|
+
f"Error during __del__ executor shutdown for Application {self.name}: {e}",
|
|
347
|
+
exc_info=True,
|
|
348
|
+
)
|
|
349
|
+
finally:
|
|
350
|
+
self._async_executor = None
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
# copy docstrings defined in core_pydoc.hpp
|
|
354
|
+
Application.__doc__ = _Application.__doc__
|
|
355
|
+
Application.__init__.__doc__ = _Application.__init__.__doc__
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
class Fragment(_Fragment):
|
|
359
|
+
def __init__(self, app=None, name="", *args, **kwargs):
|
|
360
|
+
if app is not None and not isinstance(app, _Application):
|
|
361
|
+
raise ValueError(
|
|
362
|
+
"The first argument to a Fragment's constructor must be the Application "
|
|
363
|
+
"to which it belongs."
|
|
364
|
+
)
|
|
365
|
+
# It is recommended to not use super()
|
|
366
|
+
# (https://pybind11.readthedocs.io/en/stable/advanced/classes.html#overriding-virtual-functions-in-python)
|
|
367
|
+
_Fragment.__init__(self, self, *args, **kwargs)
|
|
368
|
+
|
|
369
|
+
self.name = name
|
|
370
|
+
self.application = app
|
|
371
|
+
# Set the fragment config to the application config.
|
|
372
|
+
if app:
|
|
373
|
+
self.config(app.config())
|
|
374
|
+
self._async_executor = None
|
|
375
|
+
self._async_executor_lock = _threading.Lock()
|
|
376
|
+
self._start_op = None
|
|
377
|
+
# Initialize the Python service registry for PyFragment
|
|
378
|
+
self._python_service_registry = {}
|
|
379
|
+
|
|
380
|
+
def compose(self):
|
|
381
|
+
pass
|
|
382
|
+
|
|
383
|
+
def run_async(self):
|
|
384
|
+
"""Run the fragment asynchronously using a shared executor.
|
|
385
|
+
|
|
386
|
+
This method uses a shared ThreadPoolExecutor associated with this
|
|
387
|
+
Application instance. The executor is created on the first call.
|
|
388
|
+
Call `shutdown_async_executor()` when done with async runs
|
|
389
|
+
to clean up resources.
|
|
390
|
+
|
|
391
|
+
Returns
|
|
392
|
+
-------
|
|
393
|
+
future : ``concurrent.futures.Future`` object
|
|
394
|
+
"""
|
|
395
|
+
# Ensure only one thread creates the executor
|
|
396
|
+
with self._async_executor_lock:
|
|
397
|
+
if self._async_executor is None:
|
|
398
|
+
# Create the executor ONCE
|
|
399
|
+
self._async_executor = ThreadPoolExecutor(
|
|
400
|
+
max_workers=1, thread_name_prefix=f"HoloscanFragment_{self.name}_Async"
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
# Submit the job to the shared executor
|
|
404
|
+
return self._async_executor.submit(self.run)
|
|
405
|
+
|
|
406
|
+
def shutdown_async_executor(self, wait=True):
|
|
407
|
+
"""Shuts down the shared asynchronous executor.
|
|
408
|
+
|
|
409
|
+
Call this method when the application instance is no longer needed
|
|
410
|
+
and asynchronous runs initiated by `run_async` should terminate.
|
|
411
|
+
|
|
412
|
+
Parameters
|
|
413
|
+
----------
|
|
414
|
+
wait : bool
|
|
415
|
+
If True (default), wait for running tasks to complete before shutting down.
|
|
416
|
+
If False, shut down immediately.
|
|
417
|
+
"""
|
|
418
|
+
# Use the lock to prevent race conditions with run_async
|
|
419
|
+
with self._async_executor_lock:
|
|
420
|
+
if self._async_executor is not None:
|
|
421
|
+
# Shutting down async executor
|
|
422
|
+
self._async_executor.shutdown(wait=wait)
|
|
423
|
+
self._async_executor = None
|
|
424
|
+
|
|
425
|
+
def start_op(self):
|
|
426
|
+
"""Get or create the start operator for this fragment.
|
|
427
|
+
|
|
428
|
+
This operator is nothing but the first operator that was added to the fragment.
|
|
429
|
+
It has the name of `<|start|>` and has a condition of `CountCondition(1)`.
|
|
430
|
+
This Operator is used to start the execution of the fragment.
|
|
431
|
+
Entry operators who want to start the execution of the fragment should connect to this
|
|
432
|
+
operator.
|
|
433
|
+
|
|
434
|
+
If this method is not called, no start operator is created.
|
|
435
|
+
Otherwise, the start operator is created if it does not exist, and the start operator is
|
|
436
|
+
returned.
|
|
437
|
+
|
|
438
|
+
Returns
|
|
439
|
+
-------
|
|
440
|
+
Operator
|
|
441
|
+
The start operator instance. If it doesn't exist, it will be created with
|
|
442
|
+
a CountCondition(1).
|
|
443
|
+
"""
|
|
444
|
+
from ..conditions import CountCondition # noqa: PLC0415
|
|
445
|
+
|
|
446
|
+
if not self._start_op:
|
|
447
|
+
self._start_op = Operator(self, CountCondition(self, 1), name=START_OPERATOR_NAME)
|
|
448
|
+
self.add_operator(self._start_op)
|
|
449
|
+
return self._start_op
|
|
450
|
+
|
|
451
|
+
def __del__(self):
|
|
452
|
+
# This is best-effort cleanup, not guaranteed to be called reliably.
|
|
453
|
+
# Avoid potentially blocking calls or complex logic here.
|
|
454
|
+
if self._async_executor is not None:
|
|
455
|
+
# Non-blocking shutdown is safer in __del__ if possible,
|
|
456
|
+
# but might leave work unfinished or resources dangling longer.
|
|
457
|
+
# Using wait=False might be preferable here, but check implications.
|
|
458
|
+
try:
|
|
459
|
+
self._async_executor.shutdown(wait=False) # Try non-blocking first
|
|
460
|
+
except Exception as e:
|
|
461
|
+
logger.error(
|
|
462
|
+
f"Error during __del__ executor shutdown for Fragment {self.name}: {e}",
|
|
463
|
+
exc_info=True,
|
|
464
|
+
)
|
|
465
|
+
finally:
|
|
466
|
+
self._async_executor = None
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
# copy docstrings defined in core_pydoc.hpp
|
|
470
|
+
Fragment.__doc__ = _Fragment.__doc__
|
|
471
|
+
Fragment.__init__.__doc__ = _Fragment.__init__.__doc__
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
class Operator(_Operator):
|
|
475
|
+
_readonly_attributes = [
|
|
476
|
+
"fragment",
|
|
477
|
+
"conditions",
|
|
478
|
+
"resources",
|
|
479
|
+
"operator_type",
|
|
480
|
+
"description",
|
|
481
|
+
]
|
|
482
|
+
|
|
483
|
+
def __setattr__(self, name, value):
|
|
484
|
+
if name in self._readonly_attributes:
|
|
485
|
+
raise AttributeError(f'cannot override read-only property "{name}"')
|
|
486
|
+
super().__setattr__(name, value)
|
|
487
|
+
|
|
488
|
+
def __init__(self, fragment, *args, **kwargs):
|
|
489
|
+
if not isinstance(fragment, _Fragment):
|
|
490
|
+
raise ValueError(
|
|
491
|
+
"The first argument to an Operator's constructor must be the Fragment "
|
|
492
|
+
"(Application) to which it belongs."
|
|
493
|
+
)
|
|
494
|
+
# It is recommended to not use super()
|
|
495
|
+
# (https://pybind11.readthedocs.io/en/stable/advanced/classes.html#overriding-virtual-functions-in-python)
|
|
496
|
+
_Operator.__init__(self, self, fragment, *args, **kwargs)
|
|
497
|
+
# Create a PyOperatorSpec object and pass it to the C++ API
|
|
498
|
+
spec = OperatorSpec(fragment=self.fragment, op=self)
|
|
499
|
+
self.spec = spec
|
|
500
|
+
# Call setup method in PyOperator class
|
|
501
|
+
self.setup(spec)
|
|
502
|
+
|
|
503
|
+
def setup(self, spec: OperatorSpec):
|
|
504
|
+
"""Default implementation of setup method."""
|
|
505
|
+
pass
|
|
506
|
+
|
|
507
|
+
def initialize(self):
|
|
508
|
+
"""Default implementation of initialize"""
|
|
509
|
+
pass
|
|
510
|
+
|
|
511
|
+
def start(self):
|
|
512
|
+
"""Default implementation of start"""
|
|
513
|
+
pass
|
|
514
|
+
|
|
515
|
+
def compute(self, op_input, op_output, context):
|
|
516
|
+
"""Default implementation of compute"""
|
|
517
|
+
pass
|
|
518
|
+
|
|
519
|
+
def stop(self):
|
|
520
|
+
"""Default implementation of stop"""
|
|
521
|
+
pass
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
# copy docstrings defined in core_pydoc.hpp
|
|
525
|
+
Operator.__doc__ = _Operator.__doc__
|
|
526
|
+
Operator.__init__.__doc__ = _Operator.__init__.__doc__
|
|
527
|
+
|
|
528
|
+
|
|
529
|
+
class Condition(_Condition):
|
|
530
|
+
_readonly_attributes = [
|
|
531
|
+
"fragment",
|
|
532
|
+
"condition_type",
|
|
533
|
+
"description",
|
|
534
|
+
]
|
|
535
|
+
|
|
536
|
+
def __setattr__(self, name, value):
|
|
537
|
+
if name in self._readonly_attributes:
|
|
538
|
+
raise AttributeError(f'cannot override read-only property "{name}"')
|
|
539
|
+
super().__setattr__(name, value)
|
|
540
|
+
|
|
541
|
+
def __init__(self, fragment, *args, **kwargs):
|
|
542
|
+
if not isinstance(fragment, _Fragment):
|
|
543
|
+
raise ValueError(
|
|
544
|
+
"The first argument to an Condition's constructor must be the Fragment "
|
|
545
|
+
"(Application) to which it belongs."
|
|
546
|
+
)
|
|
547
|
+
# It is recommended to not use super()
|
|
548
|
+
# (https://pybind11.readthedocs.io/en/stable/advanced/classes.html#overriding-virtual-functions-in-python)
|
|
549
|
+
_Condition.__init__(self, self, fragment, *args, **kwargs)
|
|
550
|
+
# Create a PyComponentSpec object and pass it to the C++ API
|
|
551
|
+
spec = ComponentSpec(fragment=self.fragment, component=self)
|
|
552
|
+
self.spec = spec
|
|
553
|
+
# Call setup method in PyCondition class
|
|
554
|
+
self.setup(spec)
|
|
555
|
+
|
|
556
|
+
def setup(self, spec: ComponentSpec):
|
|
557
|
+
"""Default implementation of setup method."""
|
|
558
|
+
pass
|
|
559
|
+
|
|
560
|
+
def initialize(self):
|
|
561
|
+
"""Default implementation of initialize"""
|
|
562
|
+
pass
|
|
563
|
+
|
|
564
|
+
def update_state(self, timestamp):
|
|
565
|
+
"""Default implementation of update_state
|
|
566
|
+
|
|
567
|
+
Parameters
|
|
568
|
+
----------
|
|
569
|
+
timestamp : int
|
|
570
|
+
The timestamp at which the update_state method was called.
|
|
571
|
+
|
|
572
|
+
Notes
|
|
573
|
+
-----
|
|
574
|
+
This method is always called by the underlying GXF framework immediately before the
|
|
575
|
+
`Condition.check` method. In some cases, the `Condition.on_execute` method may also wish
|
|
576
|
+
to call this method.
|
|
577
|
+
"""
|
|
578
|
+
pass
|
|
579
|
+
|
|
580
|
+
def check(self, timestamp: int) -> tuple[SchedulingStatusType, int | None]:
|
|
581
|
+
"""Default implementation of check.
|
|
582
|
+
|
|
583
|
+
Parameters
|
|
584
|
+
----------
|
|
585
|
+
timestamp : int
|
|
586
|
+
The timestamp at which the check method is called. This method is called by the
|
|
587
|
+
underlying GXF framework to determine whether an operator is ready to execute.
|
|
588
|
+
|
|
589
|
+
Returns
|
|
590
|
+
-------
|
|
591
|
+
status_type: SchedulingStatusType
|
|
592
|
+
The current status of the operator. See the documentation on native condition
|
|
593
|
+
creation for explanations of the various status types.
|
|
594
|
+
target_timestamp: int or None
|
|
595
|
+
Specifies a specific target timestamp at which the operator is expected to be ready.
|
|
596
|
+
This should only be provided if relevant (it helps the underlying framework avoid
|
|
597
|
+
overhead of repeated checks before the target time).
|
|
598
|
+
|
|
599
|
+
Notes
|
|
600
|
+
-----
|
|
601
|
+
The method should return SchedulingStatusType.READY when the desired condition has been met.
|
|
602
|
+
|
|
603
|
+
The operator will always execute with this default implementation that always execute with
|
|
604
|
+
this default implementation.
|
|
605
|
+
"""
|
|
606
|
+
return SchedulingStatusType.READY, None
|
|
607
|
+
|
|
608
|
+
def on_execute(self, timestamp):
|
|
609
|
+
"""Default implementation of on_execute
|
|
610
|
+
|
|
611
|
+
Parameters
|
|
612
|
+
----------
|
|
613
|
+
timestamp : int
|
|
614
|
+
The timestamp at which the on_execute method was called.
|
|
615
|
+
|
|
616
|
+
Notes
|
|
617
|
+
-----
|
|
618
|
+
This method is called by the underlying GXF framework immediately after the
|
|
619
|
+
`Operator.compute` call for the operator to which the condition has been assigned.
|
|
620
|
+
"""
|
|
621
|
+
pass
|
|
622
|
+
|
|
623
|
+
|
|
624
|
+
# copy docstrings defined in core_pydoc.hpp
|
|
625
|
+
Condition.__doc__ = _Condition.__doc__
|
|
626
|
+
Condition.__init__.__doc__ = _Condition.__init__.__doc__
|
|
627
|
+
|
|
628
|
+
|
|
629
|
+
class Resource(_Resource):
|
|
630
|
+
_readonly_attributes = [
|
|
631
|
+
"fragment",
|
|
632
|
+
"resource_type",
|
|
633
|
+
"description",
|
|
634
|
+
]
|
|
635
|
+
|
|
636
|
+
def __setattr__(self, name, value):
|
|
637
|
+
if name in self._readonly_attributes:
|
|
638
|
+
raise AttributeError(f'cannot override read-only property "{name}"')
|
|
639
|
+
super().__setattr__(name, value)
|
|
640
|
+
|
|
641
|
+
def __init__(self, fragment, *args, **kwargs):
|
|
642
|
+
if not isinstance(fragment, _Fragment):
|
|
643
|
+
raise ValueError(
|
|
644
|
+
"The first argument to an Resource's constructor must be the Fragment "
|
|
645
|
+
"(Application) to which it belongs."
|
|
646
|
+
)
|
|
647
|
+
# It is recommended to not use super()
|
|
648
|
+
# (https://pybind11.readthedocs.io/en/stable/advanced/classes.html#overriding-virtual-functions-in-python)
|
|
649
|
+
_Resource.__init__(self, self, fragment, *args, **kwargs)
|
|
650
|
+
# Create a PyComponentSpec object and pass it to the C++ API
|
|
651
|
+
spec = ComponentSpec(fragment=self.fragment, component=self)
|
|
652
|
+
self.spec = spec
|
|
653
|
+
# Call setup method in PyResource class
|
|
654
|
+
self.setup(spec)
|
|
655
|
+
|
|
656
|
+
def setup(self, spec: ComponentSpec):
|
|
657
|
+
"""Default implementation of setup method."""
|
|
658
|
+
pass
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
# copy docstrings defined in core_pydoc.hpp
|
|
662
|
+
Resource.__doc__ = _Resource.__doc__
|
|
663
|
+
Resource.__init__.__doc__ = _Resource.__init__.__doc__
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
class DefaultFragmentService(_DefaultFragmentService):
|
|
667
|
+
"""Base class for fragment services in Python.
|
|
668
|
+
|
|
669
|
+
Provides default implementations of virtual methods to avoid
|
|
670
|
+
infinite recursion issues with pybind11 trampolines.
|
|
671
|
+
"""
|
|
672
|
+
|
|
673
|
+
def __init__(self, resource=None, *args, **kwargs):
|
|
674
|
+
"""Initialize the fragment service.
|
|
675
|
+
|
|
676
|
+
Parameters
|
|
677
|
+
----------
|
|
678
|
+
resource : Resource, optional
|
|
679
|
+
The underlying resource for this service.
|
|
680
|
+
"""
|
|
681
|
+
# Call the C++ base class constructor
|
|
682
|
+
if resource is not None:
|
|
683
|
+
_DefaultFragmentService.__init__(self, resource, *args, **kwargs)
|
|
684
|
+
else:
|
|
685
|
+
_DefaultFragmentService.__init__(self, *args, **kwargs)
|
|
686
|
+
self._resource_ref = resource
|
|
687
|
+
|
|
688
|
+
def resource(self, new_resource=None):
|
|
689
|
+
"""Get or set the underlying Resource associated with this service.
|
|
690
|
+
|
|
691
|
+
This method is called by the C++ backend.
|
|
692
|
+
|
|
693
|
+
Parameters
|
|
694
|
+
----------
|
|
695
|
+
new_resource : Resource or None
|
|
696
|
+
If provided, sets the resource. If None, acts as getter.
|
|
697
|
+
|
|
698
|
+
Returns
|
|
699
|
+
-------
|
|
700
|
+
Resource or None
|
|
701
|
+
The associated resource when called as a getter.
|
|
702
|
+
"""
|
|
703
|
+
if new_resource is not None:
|
|
704
|
+
self._resource_ref = new_resource
|
|
705
|
+
# We also need to call the C++ base class's resource setter
|
|
706
|
+
super().resource(new_resource)
|
|
707
|
+
return self._resource_ref
|
|
708
|
+
|
|
709
|
+
|
|
710
|
+
class Tracker:
|
|
711
|
+
"""Context manager to add data flow tracking to an application."""
|
|
712
|
+
|
|
713
|
+
def __init__(
|
|
714
|
+
self,
|
|
715
|
+
app,
|
|
716
|
+
*,
|
|
717
|
+
filename=None,
|
|
718
|
+
num_buffered_messages=100,
|
|
719
|
+
num_start_messages_to_skip=10,
|
|
720
|
+
num_last_messages_to_discard=10,
|
|
721
|
+
latency_threshold=0,
|
|
722
|
+
is_limited_tracking=False,
|
|
723
|
+
):
|
|
724
|
+
"""
|
|
725
|
+
Parameters
|
|
726
|
+
----------
|
|
727
|
+
app : holoscan.core.Application
|
|
728
|
+
on which flow tracking should be applied.
|
|
729
|
+
filename : str or None, optional
|
|
730
|
+
If none, logging to file will be disabled. Otherwise, logging will
|
|
731
|
+
write to the specified file.
|
|
732
|
+
num_buffered_messages : int, optional
|
|
733
|
+
Controls the number of messages buffered between file writing when
|
|
734
|
+
`filename` is not ``None``.
|
|
735
|
+
num_start_messages_to_skip : int, optional
|
|
736
|
+
The number of messages to skip at the beginning of the execution. This does not affect
|
|
737
|
+
the log file or the number of source messages metric.
|
|
738
|
+
num_last_messages_to_discard : int, optional
|
|
739
|
+
The number of messages to discard at the end of the execution. This does not affect
|
|
740
|
+
the log file or the number of source messages metric.
|
|
741
|
+
latency_threshold : int, optional
|
|
742
|
+
The minimum end-to-end latency in milliseconds to account for in the end-to-end
|
|
743
|
+
latency metric calculations.
|
|
744
|
+
is_limited_tracking : bool, optional
|
|
745
|
+
If true, the tracking is limited to root and leaf nodes, minimizing the timestamps by
|
|
746
|
+
avoiding intermediate operators.
|
|
747
|
+
"""
|
|
748
|
+
self.app = app
|
|
749
|
+
|
|
750
|
+
# Check the number of fragment nodes to see if it is a distributed app.
|
|
751
|
+
# Use compose_graph(), not compose() to protect against repeated compose() calls.
|
|
752
|
+
self.app.compose_graph()
|
|
753
|
+
self.is_distributed_app = len(app.fragment_graph.get_nodes()) > 0
|
|
754
|
+
|
|
755
|
+
self.enable_logging = filename is not None
|
|
756
|
+
if self.enable_logging:
|
|
757
|
+
self.logging_kwargs = dict(
|
|
758
|
+
filename=filename,
|
|
759
|
+
num_buffered_messages=num_buffered_messages,
|
|
760
|
+
)
|
|
761
|
+
self.tracker_kwargs = dict(
|
|
762
|
+
num_start_messages_to_skip=num_start_messages_to_skip,
|
|
763
|
+
num_last_messages_to_discard=num_last_messages_to_discard,
|
|
764
|
+
latency_threshold=latency_threshold,
|
|
765
|
+
is_limited_tracking=is_limited_tracking,
|
|
766
|
+
)
|
|
767
|
+
|
|
768
|
+
def __enter__(self):
|
|
769
|
+
if self.is_distributed_app:
|
|
770
|
+
self.trackers = self.app.track_distributed(**self.tracker_kwargs)
|
|
771
|
+
for tracker in self.trackers.values():
|
|
772
|
+
if self.enable_logging:
|
|
773
|
+
tracker.enable_logging(**self.logging_kwargs)
|
|
774
|
+
return self.trackers
|
|
775
|
+
else:
|
|
776
|
+
self.tracker = self.app.track(**self.tracker_kwargs)
|
|
777
|
+
if self.enable_logging:
|
|
778
|
+
self.tracker.enable_logging(**self.logging_kwargs)
|
|
779
|
+
return self.tracker
|
|
780
|
+
|
|
781
|
+
def __exit__(self, exc_type, exc_value, exc_tb):
|
|
782
|
+
if self.enable_logging:
|
|
783
|
+
if self.is_distributed_app:
|
|
784
|
+
for tracker in self.trackers.values():
|
|
785
|
+
tracker.end_logging()
|
|
786
|
+
else:
|
|
787
|
+
self.tracker.end_logging()
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
_registry_context = _RegistryContext()
|
|
791
|
+
io_type_registry = _registry_context.registry()
|
|
792
|
+
|
|
793
|
+
_register_types(io_type_registry)
|