holoscan 2.6.0__cp312-cp312-manylinux_2_35_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. holoscan-2.6.0.data/purelib/holoscan/__init__.py +133 -0
  2. holoscan-2.6.0.data/purelib/holoscan/cli/__init__.py +32 -0
  3. holoscan-2.6.0.data/purelib/holoscan/cli/__main__.py +159 -0
  4. holoscan-2.6.0.data/purelib/holoscan/cli/common/argparse_types.py +153 -0
  5. holoscan-2.6.0.data/purelib/holoscan/cli/common/artifact_sources.py +149 -0
  6. holoscan-2.6.0.data/purelib/holoscan/cli/common/constants.py +119 -0
  7. holoscan-2.6.0.data/purelib/holoscan/cli/common/dockerutils.py +509 -0
  8. holoscan-2.6.0.data/purelib/holoscan/cli/common/enum_types.py +52 -0
  9. holoscan-2.6.0.data/purelib/holoscan/cli/common/exceptions.py +130 -0
  10. holoscan-2.6.0.data/purelib/holoscan/cli/common/sdk_utils.py +180 -0
  11. holoscan-2.6.0.data/purelib/holoscan/cli/common/utils.py +130 -0
  12. holoscan-2.6.0.data/purelib/holoscan/cli/logging.json +37 -0
  13. holoscan-2.6.0.data/purelib/holoscan/cli/nics/__init__.py +18 -0
  14. holoscan-2.6.0.data/purelib/holoscan/cli/nics/nics.py +34 -0
  15. holoscan-2.6.0.data/purelib/holoscan/cli/packager/__init__.py +18 -0
  16. holoscan-2.6.0.data/purelib/holoscan/cli/packager/arguments.py +137 -0
  17. holoscan-2.6.0.data/purelib/holoscan/cli/packager/config_reader.py +181 -0
  18. holoscan-2.6.0.data/purelib/holoscan/cli/packager/container_builder.py +368 -0
  19. holoscan-2.6.0.data/purelib/holoscan/cli/packager/manifest_files.py +220 -0
  20. holoscan-2.6.0.data/purelib/holoscan/cli/packager/models.py +93 -0
  21. holoscan-2.6.0.data/purelib/holoscan/cli/packager/package_command.py +189 -0
  22. holoscan-2.6.0.data/purelib/holoscan/cli/packager/packager.py +122 -0
  23. holoscan-2.6.0.data/purelib/holoscan/cli/packager/parameters.py +558 -0
  24. holoscan-2.6.0.data/purelib/holoscan/cli/packager/platforms.py +402 -0
  25. holoscan-2.6.0.data/purelib/holoscan/cli/packager/templates/Dockerfile.jinja2 +470 -0
  26. holoscan-2.6.0.data/purelib/holoscan/cli/packager/templates/dockerignore +93 -0
  27. holoscan-2.6.0.data/purelib/holoscan/cli/packager/templates/tools.sh +416 -0
  28. holoscan-2.6.0.data/purelib/holoscan/cli/py.typed +0 -0
  29. holoscan-2.6.0.data/purelib/holoscan/cli/runner/__init__.py +18 -0
  30. holoscan-2.6.0.data/purelib/holoscan/cli/runner/resources.py +174 -0
  31. holoscan-2.6.0.data/purelib/holoscan/cli/runner/run_command.py +203 -0
  32. holoscan-2.6.0.data/purelib/holoscan/cli/runner/runner.py +306 -0
  33. holoscan-2.6.0.data/purelib/holoscan/cli/version/__init__.py +18 -0
  34. holoscan-2.6.0.data/purelib/holoscan/cli/version/version.py +50 -0
  35. holoscan-2.6.0.data/purelib/holoscan/conditions/__init__.py +57 -0
  36. holoscan-2.6.0.data/purelib/holoscan/core/__init__.py +430 -0
  37. holoscan-2.6.0.data/purelib/holoscan/decorator.py +592 -0
  38. holoscan-2.6.0.data/purelib/holoscan/executors/__init__.py +26 -0
  39. holoscan-2.6.0.data/purelib/holoscan/graphs/__init__.py +32 -0
  40. holoscan-2.6.0.data/purelib/holoscan/gxf/__init__.py +57 -0
  41. holoscan-2.6.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_holoscan_wrapper.so +0 -0
  42. holoscan-2.6.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_holoscan_wrapper_lib.so +0 -0
  43. holoscan-2.6.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_ucx_holoscan.so +0 -0
  44. holoscan-2.6.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_ucx_holoscan_lib.so +0 -0
  45. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_app.so +0 -0
  46. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_core.so +0 -0
  47. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_cuda.so +0 -0
  48. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_logger.so +0 -0
  49. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_multimedia.so +0 -0
  50. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_rmm.so +0 -0
  51. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_sample.so +0 -0
  52. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_serialization.so +0 -0
  53. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_std.so +0 -0
  54. holoscan-2.6.0.data/purelib/holoscan/lib/libgxf_ucx.so +0 -0
  55. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_core.so.2.6.0 +0 -0
  56. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_infer.so.2.6.0 +0 -0
  57. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_infer_onnx_runtime.so.2.6.0 +0 -0
  58. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_infer_torch.so.2.6.0 +0 -0
  59. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_infer_utils.so.2.6.0 +0 -0
  60. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_logger.so.2.6.0 +0 -0
  61. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_aja.so.2.6.0 +0 -0
  62. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_async_ping_rx.so.2.6.0 +0 -0
  63. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_async_ping_tx.so.2.6.0 +0 -0
  64. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_bayer_demosaic.so.2.6.0 +0 -0
  65. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_format_converter.so.2.6.0 +0 -0
  66. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_gxf_codelet.so.2.6.0 +0 -0
  67. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_holoviz.so.2.6.0 +0 -0
  68. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_inference.so.2.6.0 +0 -0
  69. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_inference_processor.so.2.6.0 +0 -0
  70. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_ping_rx.so.2.6.0 +0 -0
  71. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_ping_tensor_rx.so.2.6.0 +0 -0
  72. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_ping_tensor_tx.so.2.6.0 +0 -0
  73. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_ping_tx.so.2.6.0 +0 -0
  74. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_segmentation_postprocessor.so.2.6.0 +0 -0
  75. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_v4l2.so.2.6.0 +0 -0
  76. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_video_stream_recorder.so.2.6.0 +0 -0
  77. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_op_video_stream_replayer.so.2.6.0 +0 -0
  78. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_profiler.so.2.6.0 +0 -0
  79. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_spdlog_logger.so.2.6.0 +0 -0
  80. holoscan-2.6.0.data/purelib/holoscan/lib/libholoscan_viz.so.2.6.0 +0 -0
  81. holoscan-2.6.0.data/purelib/holoscan/lib/libucm.so.0.0.0 +0 -0
  82. holoscan-2.6.0.data/purelib/holoscan/lib/libucp.so.0.0.0 +0 -0
  83. holoscan-2.6.0.data/purelib/holoscan/lib/libucs.so.0.0.0 +0 -0
  84. holoscan-2.6.0.data/purelib/holoscan/lib/libucs_signal.so.0.0.0 +0 -0
  85. holoscan-2.6.0.data/purelib/holoscan/lib/libuct.so.0.0.0 +0 -0
  86. holoscan-2.6.0.data/purelib/holoscan/lib/libyaml-cpp.so.0.7.0 +0 -0
  87. holoscan-2.6.0.data/purelib/holoscan/lib/ucx/libucm_cuda.so.0.0.0 +0 -0
  88. holoscan-2.6.0.data/purelib/holoscan/lib/ucx/libucs_fuse.so.0.0.0 +0 -0
  89. holoscan-2.6.0.data/purelib/holoscan/lib/ucx/libuct_cma.so.0.0.0 +0 -0
  90. holoscan-2.6.0.data/purelib/holoscan/lib/ucx/libuct_cuda.so.0.0.0 +0 -0
  91. holoscan-2.6.0.data/purelib/holoscan/lib/ucx/libuct_cuda_gdrcopy.so.0.0.0 +0 -0
  92. holoscan-2.6.0.data/purelib/holoscan/lib/ucx/libuct_ib.so.0.0.0 +0 -0
  93. holoscan-2.6.0.data/purelib/holoscan/lib/ucx/libuct_rdmacm.so.0.0.0 +0 -0
  94. holoscan-2.6.0.data/purelib/holoscan/lib/ucx/libuct_xpmem.so.0.0.0 +0 -0
  95. holoscan-2.6.0.data/purelib/holoscan/lib/ucx/libucx_perftest_cuda.so.0.0.0 +0 -0
  96. holoscan-2.6.0.data/purelib/holoscan/logger/__init__.py +37 -0
  97. holoscan-2.6.0.data/purelib/holoscan/network_contexts/__init__.py +28 -0
  98. holoscan-2.6.0.data/purelib/holoscan/operators/__init__.py +97 -0
  99. holoscan-2.6.0.data/purelib/holoscan/operators/aja_source/__init__.py +22 -0
  100. holoscan-2.6.0.data/purelib/holoscan/operators/bayer_demosaic/__init__.py +24 -0
  101. holoscan-2.6.0.data/purelib/holoscan/operators/format_converter/__init__.py +23 -0
  102. holoscan-2.6.0.data/purelib/holoscan/operators/gxf_codelet/__init__.py +67 -0
  103. holoscan-2.6.0.data/purelib/holoscan/operators/holoviz/__init__.py +424 -0
  104. holoscan-2.6.0.data/purelib/holoscan/operators/inference/__init__.py +23 -0
  105. holoscan-2.6.0.data/purelib/holoscan/operators/inference_processor/__init__.py +23 -0
  106. holoscan-2.6.0.data/purelib/holoscan/operators/ping_rx/__init__.py +45 -0
  107. holoscan-2.6.0.data/purelib/holoscan/operators/ping_tensor_rx/__init__.py +22 -0
  108. holoscan-2.6.0.data/purelib/holoscan/operators/ping_tensor_tx/__init__.py +22 -0
  109. holoscan-2.6.0.data/purelib/holoscan/operators/ping_tx/__init__.py +46 -0
  110. holoscan-2.6.0.data/purelib/holoscan/operators/segmentation_postprocessor/__init__.py +23 -0
  111. holoscan-2.6.0.data/purelib/holoscan/operators/v4l2_video_capture/__init__.py +23 -0
  112. holoscan-2.6.0.data/purelib/holoscan/operators/video_stream_recorder/__init__.py +22 -0
  113. holoscan-2.6.0.data/purelib/holoscan/operators/video_stream_replayer/__init__.py +22 -0
  114. holoscan-2.6.0.data/purelib/holoscan/resources/__init__.py +147 -0
  115. holoscan-2.6.0.data/purelib/holoscan/schedulers/__init__.py +32 -0
  116. holoscan-2.6.0.data/purelib/holoscan-2.6.0.pth +1 -0
  117. holoscan-2.6.0.dist-info/LICENSE.txt +202 -0
  118. holoscan-2.6.0.dist-info/METADATA +125 -0
  119. holoscan-2.6.0.dist-info/NOTICE.txt +174 -0
  120. holoscan-2.6.0.dist-info/NVIDIA-AI-PRODUCT-EULA.txt +243 -0
  121. holoscan-2.6.0.dist-info/RECORD +126 -0
  122. holoscan-2.6.0.dist-info/WHEEL +5 -0
  123. holoscan-2.6.0.dist-info/axle.lck +0 -0
  124. holoscan-2.6.0.dist-info/entry_points.txt +3 -0
  125. holoscan-2.6.0.dist-info/symlinks.txt +82 -0
  126. holoscan-2.6.0.dist-info/top_level.txt +3 -0
@@ -0,0 +1,592 @@
1
+ """
2
+ SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ SPDX-License-Identifier: Apache-2.0
4
+
5
+ Licensed under the Apache License, Version 2.0 (the "License");
6
+ you may not use this file except in compliance with the License.
7
+ You may obtain a copy of the License at
8
+
9
+ http://www.apache.org/licenses/LICENSE-2.0
10
+
11
+ Unless required by applicable law or agreed to in writing, software
12
+ distributed under the License is distributed on an "AS IS" BASIS,
13
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ See the License for the specific language governing permissions and
15
+ limitations under the License.
16
+ """ # noqa: E501
17
+
18
+ """This module provides a decorator API for creating Python Operators
19
+
20
+ .. autosummary::
21
+
22
+ holoscan.decorator.create_op
23
+ holoscan.decorator.Input
24
+ holoscan.decorator.Output
25
+ """
26
+
27
+ import ast
28
+ import inspect
29
+ import textwrap
30
+ from dataclasses import dataclass, field
31
+ from typing import Any, Optional, Union
32
+
33
+ import cupy as cp
34
+ import numpy as np
35
+
36
+ from holoscan.conditions import BooleanCondition
37
+ from holoscan.core import (
38
+ Condition,
39
+ ConditionType,
40
+ IOSpec,
41
+ Operator,
42
+ OperatorSpec,
43
+ Resource,
44
+ )
45
+ from holoscan.core._core import Fragment as FragmentBase
46
+ from holoscan.core._core import Tensor as TensorBase
47
+
48
+ __all__ = ["Input", "Output", "create_op"]
49
+
50
+
51
+ def _is_tensor_like(obj):
52
+ return (
53
+ (hasattr(obj, "__dlpack__") and hasattr(obj, "__dlpack_device__"))
54
+ or hasattr(obj, "__cuda_array_interface__")
55
+ or hasattr(obj, "__array_interface__")
56
+ )
57
+
58
+
59
+ def _as_python_tensor(tensor):
60
+ if hasattr(tensor, "__array_interface__") or (
61
+ hasattr(tensor, "__dlpack_device__") and tensor.__dlpack_device__()[0] == 1
62
+ ):
63
+ return np.asarray(tensor)
64
+ else:
65
+ return cp.asarray(tensor)
66
+
67
+
68
+ @dataclass
69
+ class Input:
70
+ """Class for specifying an input port and how the received value maps to a function's arguments.
71
+
72
+ Parameters
73
+ ----------
74
+ name : str
75
+ The name of the input port.
76
+ arg_map: str or dict[str, str]
77
+ If `arg_map` is a str, the Python object received by the input port is passed to the
78
+ function argument specified by `arg_map`. If `arg_map` is a dict, the input is assumed to be
79
+ a TensorMap (dictionary of tensors). In this case the keys of the dict are the tensor names
80
+ and the values are the names of the function arguments that the tensors map to.
81
+ condition_type : holoscan.core.ConditionType, optional
82
+ The condition type for the input port.
83
+ condition_kwargs : dict[str, Any], optional
84
+ The keywords passed onto the condition specified by `condition_type`.
85
+ connector_type : holoscan.core.IOSpec.ConnectorType, optional
86
+ The connector type for the input port.
87
+ connector_kwargs : dict[str, Any], optional
88
+ The keywords passed onto the connector specified by `connector_type`.
89
+ """
90
+
91
+ name: str
92
+ arg_map: Optional[Union[str, dict[str, str]]] = ()
93
+ condition_type: Optional[ConditionType] = None
94
+ condition_kwargs: dict[str, Any] = field(default_factory=dict)
95
+ connector_type: Optional[IOSpec.ConnectorType] = None
96
+ connector_kwargs: dict[str, Any] = field(default_factory=dict)
97
+
98
+ def create_input(self, spec: OperatorSpec) -> IOSpec:
99
+ iospec = spec.input(self.name)
100
+ if self.condition_type is not None:
101
+ iospec = iospec.condition(self.condition_type, **self.condition_kwargs)
102
+ if self.connector_type is not None:
103
+ iospec = iospec.connector(self.connector_type, **self.connector_kwargs)
104
+
105
+
106
+ @dataclass
107
+ class Output:
108
+ """Class for specifying an output port and how one or more of a functions returned value(s) map
109
+ to it.
110
+
111
+ Parameters
112
+ ----------
113
+ name : str
114
+ The name of the output port.
115
+ tensor_names: str, tuple(str) or None
116
+ If None, whatever Python object the func outputs is emitted on the output port. If a tuple
117
+ of strings is provided it is assumed that the func returns a dictionary of tensors. The
118
+ names in the tuple specify which tensors in the dict will be transmitted on the output
119
+ port. There is no need to specify `tensor_names` if all tensors in a dict returned by the
120
+ function are to be transmitted. In the case of a single tensor name, a string can be
121
+ provided instead of a tuple.
122
+ condition_type : holoscan.core.ConditionType, optional
123
+ The condition type for the input port.
124
+ condition_kwargs : dict[str, Any], optional
125
+ The keywords passed onto the condition specified by `condition_type`.
126
+ connector_type : holoscan.core.IOSpec.ConnectorType, optional
127
+ The connector type for the input port.
128
+ connector_kwargs : dict[str, Any], optional
129
+ The keywords passed onto the connector specified by `connector_type`.
130
+ """
131
+
132
+ name: str
133
+ tensor_names: Optional[Union[str, tuple[str]]] = ()
134
+ condition_type: Optional[ConditionType] = None
135
+ condition_kwargs: dict[str, Any] = field(default_factory=dict)
136
+ connector_type: Optional[IOSpec.ConnectorType] = None
137
+ connector_kwargs: dict[str, Any] = field(default_factory=dict)
138
+
139
+ def create_output(self, spec: OperatorSpec) -> IOSpec:
140
+ iospec = spec.output(self.name)
141
+ if self.condition_type is not None:
142
+ iospec = iospec.condition(self.condition_type, **self.condition_kwargs)
143
+ if self.connector_type is not None:
144
+ iospec = iospec.connector(self.connector_type, **self.connector_kwargs)
145
+
146
+
147
+ def _as_input(input_: Union[str, Input]):
148
+ """Cast str to Output object."""
149
+ if isinstance(input_, str):
150
+ return Input(input_, arg_map=input_)
151
+ elif not isinstance(input_, Input):
152
+ return ValueError("`inputs` must be a single port name or Input object or a tuple of these")
153
+ return input_
154
+
155
+
156
+ def _as_output(output: Union[str, Output]):
157
+ """Cast str to Output object."""
158
+ if isinstance(output, str):
159
+ return Output(output)
160
+ elif not isinstance(output, Output):
161
+ return ValueError(
162
+ "`outputs` must be a single port name or Output object or a tuple of these"
163
+ )
164
+ return output
165
+
166
+
167
+ def _has_function_returns_value(func):
168
+ """Check if the provided function has any return statements returning a value."""
169
+
170
+ class ReturnVisitor(ast.NodeVisitor):
171
+ def __init__(self):
172
+ self.returns_value = False
173
+
174
+ def visit_Return(self, node): # noqa: N802
175
+ # check if the return statement has a value
176
+ if node.value is not None:
177
+ self.returns_value = True
178
+ return
179
+
180
+ self.generic_visit(node)
181
+
182
+ def visit_ClassDef(self, node): # noqa: N802
183
+ return
184
+
185
+ def visit_FunctionDef(self, node): # noqa: N802
186
+ return
187
+
188
+ def visit_AsyncFunctionDef(self, node): # noqa: N802
189
+ return
190
+
191
+ def visit(self, node):
192
+ if self.returns_value:
193
+ return
194
+ super().visit(node)
195
+
196
+ # parse the source code into an AST
197
+ source_code = inspect.getsource(func)
198
+ # deindent the text if it is indented
199
+ source_code = textwrap.dedent(source_code)
200
+ tree = ast.parse(source_code)
201
+ # initialize the visitor
202
+ visitor = ReturnVisitor()
203
+ # walk the AST
204
+ for node in ast.walk(tree):
205
+ if isinstance(node, ast.FunctionDef) and node.name == func.__name__:
206
+ visitor.generic_visit(node)
207
+ break
208
+ return visitor.returns_value
209
+
210
+
211
+ def create_op(
212
+ function_or_class=None,
213
+ inputs: Union[str, Input, tuple[Union[str, Input]]] = (),
214
+ outputs: Union[str, Output, tuple[Union[str, Output]]] = (),
215
+ cast_tensors=True,
216
+ ):
217
+ """Decorator for creating an operator from a function or a class.
218
+
219
+ When the decorator is used on a class, the class must have a `__call__` method that will be
220
+ used as the operator function.
221
+
222
+ inputs : str, Input, or Tuple[str | Input], optional
223
+ If a str is provided, it is assumed to be the name of the input port and that the function
224
+ has a variable matching that port name to which the object received on the port will be
225
+ connected. If the port name does not match the name of the variable in the function
226
+ signature, or if there are multiple tensors to be mapped to multiple objects, use an Input
227
+ argument. A tuple of str or Input objects can be provided to specify multiple input ports.
228
+ The default of an empty tuple corresponds to no input ports.
229
+ outputs : str, Output, or Tuple[str | Output], optional
230
+ If a str is provided, any value returned by the function will be emitted on an output port
231
+ of that name. If a tuple of multiple str is provided and the function returns a tuple, then
232
+ the tuple elements will be emitted from each output port in the order at which they are
233
+ defined. In this case, the number of output ports should match the length of the output
234
+ tuple. Finally, an Output object can be provided in the case that the function returns a
235
+ dictionary of output arrays that should be split across multiple ports.
236
+ cast_tensors : bool, optional
237
+ If True, automatically cast any tensor-like input to a NumPy or CuPy array (for host and
238
+ device tensors, respectively). If set to False, these will be left as `holoscan.Tensor` and
239
+ the user will have to cast to the desired type within the body of the decorated function or
240
+ class.
241
+
242
+ Notes
243
+ -----
244
+ Another case where using `Input` or `Output` objects is necessary is if the user wishes to
245
+ override the default connector or condition types for the port.
246
+ """
247
+ # used to store the class object if the decorator is used on a class
248
+ class_obj = None
249
+ # used to determine if the decorator was used without args
250
+ is_without_args = function_or_class is not None
251
+
252
+ # convert scalars to tuple
253
+ if isinstance(inputs, (str, Input)):
254
+ inputs = (inputs,)
255
+ # convert any str in the tuple to an Input object
256
+ inputs = tuple(_as_input(i) for i in inputs)
257
+
258
+ if isinstance(outputs, (str, Output)):
259
+ outputs = (outputs,)
260
+ # convert any str in the tuple to an Output object
261
+ outputs = tuple(_as_output(o) for o in outputs)
262
+
263
+ if not isinstance(outputs, tuple):
264
+ raise ValueError(
265
+ "`outputs` must be a single port name or Output object or a tuple of these"
266
+ )
267
+
268
+ def decorator(func_or_cls):
269
+ nonlocal function_or_class, class_obj
270
+
271
+ def make_class(*args, **kwargs):
272
+ if "fragment" in kwargs:
273
+ fragment = kwargs.pop("fragment")
274
+ elif len(args) and isinstance(args[0], FragmentBase):
275
+ fragment, args = args[0], args[1:]
276
+ else:
277
+ raise ValueError(
278
+ "fragment must be provided via kwarg or as the first positional argument"
279
+ )
280
+
281
+ # frame = inspect.currentframe()
282
+ # args_names, _, _, locals_dict = inspect.getargvalues(frame)
283
+ # print(f"{args_names=}, {locals_dict=}")
284
+
285
+ class DynamicOp(Operator):
286
+ def __init__(
287
+ self,
288
+ fragment: FragmentBase,
289
+ *args,
290
+ inputs,
291
+ outputs,
292
+ cast_tensors=cast_tensors,
293
+ **kwargs,
294
+ ):
295
+ self.func = func_or_cls
296
+ self.input_objs = inputs
297
+ self.output_objs = outputs
298
+ self.is_generator = inspect.isgeneratorfunction(self.func)
299
+ self.gen_obj = None
300
+ self.cast_tensors = cast_tensors
301
+
302
+ # remove conditions and resources from *args
303
+ condition_args = tuple(a for a in args if isinstance(a, Condition))
304
+ resource_args = tuple(a for a in args if isinstance(a, Resource))
305
+ args = tuple(a for a in args if not isinstance(a, (Condition, Resource)))
306
+ self.func_args = args
307
+
308
+ # add a boolean condition to prevent triggering if the function is a generator
309
+ # and the iteration is complete
310
+ if self.is_generator:
311
+ condition_args = condition_args + (
312
+ BooleanCondition(fragment, name="_generator_func"),
313
+ )
314
+
315
+ # set name kwarg to self.func.__name__ if not provided
316
+ name = kwargs.pop("name", self.func.__name__)
317
+
318
+ argspec = inspect.getfullargspec(self.func)
319
+
320
+ # remove self from argspec.args if the decorator is used on a class
321
+ if class_obj:
322
+ argspec = argspec._replace(args=argspec.args[1:])
323
+ self.class_obj = class_obj
324
+
325
+ self.func_argspec = argspec
326
+
327
+ # populate inputs and outputs with defaults if decorator was used without args
328
+ if is_without_args:
329
+ self.input_objs = tuple(Input(name, arg_map=name) for name in argspec.args)
330
+ # configure the output port if the function contains return statements
331
+ # (in this case, the port name will be left empty)
332
+ if _has_function_returns_value(function_or_class):
333
+ self.output_objs = tuple((Output(""),))
334
+
335
+ # populate all arguments not provided with defaults
336
+ if argspec.kwonlydefaults is not None:
337
+ for k in argspec.kwonlyargs:
338
+ if k not in kwargs and k in argspec.kwonlydefaults:
339
+ kwargs[k] = argspec.kwonlydefaults[k]
340
+
341
+ # store a list of what ports map to what function arguments
342
+ self.input_mappings = {}
343
+ for input_obj in self.input_objs:
344
+ # store what argument(s) this input maps to
345
+ self.input_mappings[input_obj.name] = input_obj.arg_map
346
+
347
+ # sets self.dynamic_kwargs and self.fixed_kwargs
348
+ self._set_fixed_and_dynamic_kwargs(kwargs)
349
+
350
+ # get the type annotations dict for the function (not currently used)
351
+ # self.func_annotations = inspect.get_annotations(self.func)
352
+ self.func_annotations = self.func.__annotations__
353
+
354
+ super().__init__(fragment, *condition_args, *resource_args, name=name)
355
+
356
+ def _set_fixed_and_dynamic_kwargs(self, kwargs):
357
+ """Split provided kwargs into those which are "fixed" and those which are
358
+ "dynamic".
359
+
360
+ Here "dynamic" refers to function arguments that are obtained from input
361
+ ports. The keys for self.dynamic_kwargs are determined here, but the values
362
+ are initialized to None. Actual values get set during each `compute` call.
363
+
364
+ "fixed" refers to other keyword arguments to the function that don't change
365
+ across calls.
366
+ """
367
+ self.dynamic_kwargs = {}
368
+ for input_map in self.input_mappings.values():
369
+ if isinstance(input_map, str):
370
+ self._add_dynamic_arg(input_map, kwargs)
371
+ elif isinstance(input_map, dict):
372
+ for arg_name in input_map.values():
373
+ self._add_dynamic_arg(arg_name, kwargs)
374
+ self.fixed_kwargs = kwargs
375
+
376
+ # store any positional args with specified defaults in fixed_kwargs instead
377
+ argspec = self.func_argspec
378
+ if argspec.defaults is not None:
379
+ n_default_positional = len(argspec.defaults)
380
+ if n_default_positional > 0:
381
+ self.func_args = self.func_args[:-n_default_positional]
382
+ n_required_positional = len(argspec.args) - len(argspec.defaults)
383
+ for k, v in zip(argspec.args[n_required_positional:], argspec.defaults):
384
+ # don't overwrite any kwargs that were provided
385
+ if k not in self.fixed_kwargs:
386
+ self.fixed_kwargs[k] = v
387
+
388
+ # Now that all args with defaults are in self.fixed_kwargs we can check if any
389
+ # of the required arguments were not specified
390
+ required_args = set(argspec.args) | set(argspec.kwonlyargs)
391
+ if argspec.kwonlydefaults is not None:
392
+ required_args -= set(argspec.kwonlydefaults.keys())
393
+ for arg in required_args:
394
+ if arg not in self.fixed_kwargs and arg not in self.dynamic_kwargs:
395
+ raise ValueError(f"required argument, '{arg}', has not been specified")
396
+
397
+ def _add_dynamic_arg(self, arg_name, kwargs):
398
+ """helper function for _set_fixed_and_dynamic_kwargs"""
399
+ if arg_name in self.dynamic_kwargs:
400
+ raise ValueError(
401
+ "duplicate specification of mapping to function kwarg: '{arg_name}'"
402
+ )
403
+ self.dynamic_kwargs[arg_name] = None
404
+ try:
405
+ kwargs.pop(arg_name)
406
+ except KeyError as e:
407
+ argspec = self.func_argspec
408
+ if arg_name not in argspec.kwonlyargs + argspec.args:
409
+ msg = (
410
+ f"Provided func does not have an arg or kwarg named '{arg_name}'."
411
+ " The provided wrapped function has"
412
+ f" positional args: {argspec.args}"
413
+ f" and keyword-only args: {argspec.kwonlyargs}"
414
+ )
415
+ raise KeyError(msg) from e
416
+ return
417
+
418
+ # # not used by the Application, but can be useful to test the call
419
+ # def __call__(self, *args, **kwargs):
420
+ # print(f"{self.msg=}")
421
+ # return self.func(*self.func_args, *args, **self.fixed_kwargs, **kwargs)
422
+
423
+ def setup(self, spec: OperatorSpec):
424
+ for input_obj in self.input_objs:
425
+ input_obj.create_input(spec)
426
+
427
+ self.output_tensor_map = {}
428
+ for output_obj in self.output_objs:
429
+ output_obj.create_output(spec)
430
+ if isinstance(output_obj.tensor_names, str):
431
+ output_obj.tensor_names = (output_obj.tensor_names,)
432
+ self.output_tensor_map[output_obj.name] = tuple(output_obj.tensor_names)
433
+
434
+ def compute(self, op_input, op_output, context):
435
+ for port_name, arg_map in self.input_mappings.items():
436
+ # print(f"input {port_name=}, {arg_map=}")
437
+ msg = op_input.receive(port_name)
438
+ if isinstance(arg_map, str):
439
+ # print(f"{msg=}")
440
+ if isinstance(msg, dict):
441
+ try:
442
+ # try tensor based on matching name
443
+ msg = msg[arg_map]
444
+ except KeyError as e:
445
+ # use tensor regardless of name if only one is present
446
+ tensors = tuple(
447
+ v for k, v in msg.items() if isinstance(v, TensorBase)
448
+ )
449
+ if len(tensors) == 1:
450
+ msg = tensors[0]
451
+ elif len(tensors) > 1:
452
+ raise ValueError(
453
+ "More than one tensor found in port, but none has "
454
+ f"name {arg_map}"
455
+ ) from e
456
+
457
+ # cast holoscan.Tensor to cp.asarray(Tensor) here or require the user
458
+ # to do it in the provided func?
459
+ if self.cast_tensors and isinstance(msg, TensorBase):
460
+ msg = _as_python_tensor(msg)
461
+
462
+ self.dynamic_kwargs[arg_map] = msg
463
+ elif isinstance(arg_map, dict):
464
+ for tensor_name, arg_name in arg_map.items():
465
+ try:
466
+ val = msg[tensor_name]
467
+ except KeyError as e:
468
+ raise KeyError(
469
+ f"key with name '{tensor_name}' not found in input dict"
470
+ ) from e
471
+ if self.cast_tensors and isinstance(val, TensorBase):
472
+ val = _as_python_tensor(val)
473
+ self.dynamic_kwargs[arg_name] = val
474
+
475
+ if self.is_generator:
476
+ if self.gen_obj is None:
477
+ out = self.func(
478
+ *self.func_args, **self.fixed_kwargs, **self.dynamic_kwargs
479
+ )
480
+ self.gen_obj = out
481
+ try:
482
+ out = next(self.gen_obj)
483
+ except StopIteration:
484
+ # disable the condition to prevent further calls
485
+ self.conditions["_generator_func"].disable_tick()
486
+ return
487
+ else:
488
+ out = self.func(*self.func_args, **self.fixed_kwargs, **self.dynamic_kwargs)
489
+
490
+ # if the output is a tuple and there is >1 port, we distribute the outputs
491
+ if isinstance(out, tuple) and (len(self.output_tensor_map) > 1):
492
+ # for tuple case, each port should correspond to each output tuple element
493
+ if any([len(names) > 1 for names in self.output_tensor_map.values()]):
494
+ raise ValueError(
495
+ "The function output was found to be a tuple type, but each "
496
+ "output tuple element must have its own port. In other words, "
497
+ "the `outputs` argument of `create_op` should be a tuple of port "
498
+ "names equal in length to the returned tuple."
499
+ )
500
+ # Make sure check that the output tuple length and number of ports match
501
+ if len(out) != len(self.output_tensor_map):
502
+ raise ValueError(
503
+ f"The number of output tuple elements and number of tensors must "
504
+ f"match.\n"
505
+ f"Output tuple length = {len(out)}\n"
506
+ f"Number of output tensors = {len(self.output_tensor_map)}"
507
+ )
508
+ for (port_name, tensor_names), out_element in zip(
509
+ self.output_tensor_map.items(), out
510
+ ):
511
+ if _is_tensor_like(out_element):
512
+ name = "" if len(tensor_names) == 0 else tensor_names[0]
513
+ out_element = {name: out_element}
514
+ op_output.emit(out_element, port_name)
515
+ return
516
+
517
+ for port_name, tensor_names in self.output_tensor_map.items():
518
+ if tensor_names is None or len(tensor_names) == 0:
519
+ if _is_tensor_like(out):
520
+ # emit as dict of tensor-like objects
521
+ out = {"": out}
522
+ op_output.emit(out, port_name)
523
+ elif len(tensor_names) == 1:
524
+ name = tensor_names[0]
525
+ if _is_tensor_like(out):
526
+ # emit as dict of tensor-like objects
527
+ out = {name: out}
528
+ op_output.emit(out, port_name)
529
+ else:
530
+ if name not in out:
531
+ raise ValueError(
532
+ f"tensor with name '{name}' not found in function output"
533
+ )
534
+ op_output.emit({name: out[name]}, port_name)
535
+ else:
536
+ out_tensors = {}
537
+ for name in tensor_names:
538
+ if name not in out:
539
+ raise ValueError(
540
+ f"tensor with name '{name}' not found in function output"
541
+ )
542
+ out_tensors[name] = out[name]
543
+ # print(f"outputting tensors named: {tuple(out_tensors.keys())} on
544
+ # port {port_name}")
545
+ # print(f"tensormap emit of {out_tensors=}")
546
+ op_output.emit(out_tensors, port_name)
547
+
548
+ op = DynamicOp(fragment, *args, inputs=inputs, outputs=outputs, **kwargs)
549
+
550
+ def _to_camel_case(name):
551
+ """Convert name to camel case"""
552
+ parts = name.split("_")
553
+ return "".join(p.capitalize() for p in parts)
554
+
555
+ # manually update instead of using functools.update_wrapper(op, func_or_cls) because:
556
+ # - don't want to overwrite __doc__ with func.__doc__
557
+ # - want to use name instead of func.__name__
558
+ if class_obj:
559
+ class_name = class_obj.__class__.__name__
560
+ op.__name__ = class_name + "Op" if not class_name.endswith("Op") else class_name
561
+ else:
562
+ op.__name__ = _to_camel_case(func_or_cls.__name__) + "Op"
563
+ op.__qualname__ = op.__name__
564
+ op.__module__ = func_or_cls.__module__
565
+ return op
566
+
567
+ def init_class(*args, **kwargs):
568
+ nonlocal class_obj, function_or_class
569
+ # create an instance of the class (using function_or_class as the class)
570
+ class_obj = function_or_class(*args, **kwargs)
571
+ # use the class's __call__ method as the operator function
572
+ if not callable(class_obj):
573
+ raise ValueError(
574
+ f"{function_or_class} must have a __call__ method to be used as an operator"
575
+ )
576
+ function_or_class = class_obj.__call__
577
+ return decorator(function_or_class)
578
+
579
+ if func_or_cls is None:
580
+ return decorator
581
+
582
+ # check if the decorator was used on a class first
583
+ if inspect.isclass(func_or_cls): # if isinstance(func_or_cls, type):
584
+ function_or_class = func_or_cls
585
+ return init_class
586
+
587
+ if callable(func_or_cls):
588
+ return make_class
589
+
590
+ raise Exception(f"Invalid usage of decorator for {func_or_cls}")
591
+
592
+ return decorator(function_or_class)
@@ -0,0 +1,26 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """This module provides a Python API for the C++ API Executor classes.
16
+
17
+ .. autosummary::
18
+
19
+ holoscan.executors.GXFExecutor
20
+ """
21
+
22
+ from ._executors import GXFExecutor
23
+
24
+ __all__ = [
25
+ "GXFExecutor",
26
+ ]
@@ -0,0 +1,32 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """This module provides a Python API for the C++ API Graph classes.
16
+
17
+ .. autosummary::
18
+
19
+ holoscan.graphs.FlowGraph
20
+ holoscan.graphs.FragmentFlowGraph
21
+ holoscan.graphs.OperatorFlowGraph
22
+ """
23
+
24
+ from ._graphs import FragmentFlowGraph, OperatorFlowGraph
25
+
26
+ FlowGraph = OperatorFlowGraph # provide alias for backwards compatibility
27
+
28
+ __all__ = [
29
+ "FlowGraph",
30
+ "FragmentFlowGraph",
31
+ "OperatorFlowGraph",
32
+ ]