holoscan-cu13 3.7.0__cp310-cp310-manylinux_2_35_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. holoscan_cu13-3.7.0.data/purelib/holoscan/__init__.py +135 -0
  2. holoscan_cu13-3.7.0.data/purelib/holoscan/cli/__main__.py +26 -0
  3. holoscan_cu13-3.7.0.data/purelib/holoscan/conditions/__init__.py +75 -0
  4. holoscan_cu13-3.7.0.data/purelib/holoscan/conditions/_conditions.cpython-310-aarch64-linux-gnu.so +0 -0
  5. holoscan_cu13-3.7.0.data/purelib/holoscan/core/__init__.py +799 -0
  6. holoscan_cu13-3.7.0.data/purelib/holoscan/core/_core.cpython-310-aarch64-linux-gnu.so +0 -0
  7. holoscan_cu13-3.7.0.data/purelib/holoscan/data_loggers/__init__.py +79 -0
  8. holoscan_cu13-3.7.0.data/purelib/holoscan/data_loggers/async_console_logger/__init__.py +22 -0
  9. holoscan_cu13-3.7.0.data/purelib/holoscan/data_loggers/async_console_logger/_async_console_logger.cpython-310-aarch64-linux-gnu.so +0 -0
  10. holoscan_cu13-3.7.0.data/purelib/holoscan/data_loggers/basic_console_logger/__init__.py +22 -0
  11. holoscan_cu13-3.7.0.data/purelib/holoscan/data_loggers/basic_console_logger/_basic_console_logger.cpython-310-aarch64-linux-gnu.so +0 -0
  12. holoscan_cu13-3.7.0.data/purelib/holoscan/decorator.py +662 -0
  13. holoscan_cu13-3.7.0.data/purelib/holoscan/executors/__init__.py +26 -0
  14. holoscan_cu13-3.7.0.data/purelib/holoscan/executors/_executors.cpython-310-aarch64-linux-gnu.so +0 -0
  15. holoscan_cu13-3.7.0.data/purelib/holoscan/graphs/__init__.py +32 -0
  16. holoscan_cu13-3.7.0.data/purelib/holoscan/graphs/_graphs.cpython-310-aarch64-linux-gnu.so +0 -0
  17. holoscan_cu13-3.7.0.data/purelib/holoscan/gxf/__init__.py +60 -0
  18. holoscan_cu13-3.7.0.data/purelib/holoscan/gxf/_gxf.cpython-310-aarch64-linux-gnu.so +0 -0
  19. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_holoscan_wrapper.so +0 -0
  20. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_holoscan_wrapper_lib.so +0 -0
  21. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_ucx_holoscan.so +0 -0
  22. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/gxf_extensions/libgxf_ucx_holoscan_lib.so +0 -0
  23. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_app.so +0 -0
  24. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_core.so +0 -0
  25. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_cuda.so +0 -0
  26. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_http.so +0 -0
  27. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_logger.so +0 -0
  28. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_multimedia.so +0 -0
  29. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_rmm.so +0 -0
  30. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_sample.so +0 -0
  31. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_serialization.so +0 -0
  32. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_std.so +0 -0
  33. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libgxf_ucx.so +0 -0
  34. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoinfer_utils.so.3.7.0 +0 -0
  35. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_core.so.3.7.0 +0 -0
  36. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_data_loggers_async_console_logger.so.3.7.0 +0 -0
  37. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_data_loggers_basic_console_logger.so.3.7.0 +0 -0
  38. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_infer.so.3.7.0 +0 -0
  39. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_infer_onnx_runtime.so.3.7.0 +0 -0
  40. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_infer_torch.so.3.7.0 +0 -0
  41. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_infer_utils.so.3.7.0 +0 -0
  42. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_logger.so.3.7.0 +0 -0
  43. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_async_ping_rx.so.3.7.0 +0 -0
  44. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_async_ping_tx.so.3.7.0 +0 -0
  45. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_bayer_demosaic.so.3.7.0 +0 -0
  46. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_format_converter.so.3.7.0 +0 -0
  47. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_gxf_codelet.so.3.7.0 +0 -0
  48. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_holoviz.so.3.7.0 +0 -0
  49. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_inference.so.3.7.0 +0 -0
  50. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_inference_processor.so.3.7.0 +0 -0
  51. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_ping_rx.so.3.7.0 +0 -0
  52. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_ping_tensor_rx.so.3.7.0 +0 -0
  53. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_ping_tensor_tx.so.3.7.0 +0 -0
  54. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_ping_tx.so.3.7.0 +0 -0
  55. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_segmentation_postprocessor.so.3.7.0 +0 -0
  56. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_test_ops.so.3.7.0 +0 -0
  57. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_v4l2.so.3.7.0 +0 -0
  58. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_video_stream_recorder.so.3.7.0 +0 -0
  59. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_op_video_stream_replayer.so.3.7.0 +0 -0
  60. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_pose_tree.so.3.7.0 +0 -0
  61. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_profiler.so.3.7.0 +0 -0
  62. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_spdlog_logger.so.3.7.0 +0 -0
  63. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libholoscan_viz.so.3.7.0 +0 -0
  64. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libucm.so.0.0.0 +0 -0
  65. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libucp.so.0.0.0 +0 -0
  66. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libucs.so.0.0.0 +0 -0
  67. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libucs_signal.so.0.0.0 +0 -0
  68. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libuct.so.0.0.0 +0 -0
  69. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/libucxx.so +0 -0
  70. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/ucx/libucm_cuda.so.0.0.0 +0 -0
  71. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/ucx/libuct_cma.so.0.0.0 +0 -0
  72. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/ucx/libuct_cuda.so.0.0.0 +0 -0
  73. holoscan_cu13-3.7.0.data/purelib/holoscan/lib/ucx/libucx_perftest_cuda.so.0.0.0 +0 -0
  74. holoscan_cu13-3.7.0.data/purelib/holoscan/logger/__init__.py +37 -0
  75. holoscan_cu13-3.7.0.data/purelib/holoscan/logger/_logger.cpython-310-aarch64-linux-gnu.so +0 -0
  76. holoscan_cu13-3.7.0.data/purelib/holoscan/network_contexts/__init__.py +28 -0
  77. holoscan_cu13-3.7.0.data/purelib/holoscan/network_contexts/_network_contexts.cpython-310-aarch64-linux-gnu.so +0 -0
  78. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/__init__.py +99 -0
  79. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/bayer_demosaic/__init__.py +24 -0
  80. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/bayer_demosaic/_bayer_demosaic.cpython-310-aarch64-linux-gnu.so +0 -0
  81. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/format_converter/__init__.py +23 -0
  82. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/format_converter/_format_converter.cpython-310-aarch64-linux-gnu.so +0 -0
  83. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/gxf_codelet/__init__.py +67 -0
  84. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/gxf_codelet/_gxf_codelet.cpython-310-aarch64-linux-gnu.so +0 -0
  85. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/holoviz/__init__.py +423 -0
  86. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/holoviz/_holoviz.cpython-310-aarch64-linux-gnu.so +0 -0
  87. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/inference/__init__.py +28 -0
  88. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/inference/_inference.cpython-310-aarch64-linux-gnu.so +0 -0
  89. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/inference_processor/__init__.py +23 -0
  90. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/inference_processor/_inference_processor.cpython-310-aarch64-linux-gnu.so +0 -0
  91. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/ping_rx/__init__.py +45 -0
  92. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/ping_tensor_rx/__init__.py +22 -0
  93. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/ping_tensor_rx/_ping_tensor_rx.cpython-310-aarch64-linux-gnu.so +0 -0
  94. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/ping_tensor_tx/__init__.py +22 -0
  95. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/ping_tensor_tx/_ping_tensor_tx.cpython-310-aarch64-linux-gnu.so +0 -0
  96. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/ping_tx/__init__.py +46 -0
  97. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/segmentation_postprocessor/__init__.py +23 -0
  98. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/segmentation_postprocessor/_segmentation_postprocessor.cpython-310-aarch64-linux-gnu.so +0 -0
  99. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/test_ops/__init__.py +22 -0
  100. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/test_ops/_test_ops.cpython-310-aarch64-linux-gnu.so +0 -0
  101. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/v4l2_video_capture/__init__.py +23 -0
  102. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/v4l2_video_capture/_v4l2_video_capture.cpython-310-aarch64-linux-gnu.so +0 -0
  103. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/video_stream_recorder/__init__.py +22 -0
  104. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/video_stream_recorder/_video_stream_recorder.cpython-310-aarch64-linux-gnu.so +0 -0
  105. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/video_stream_replayer/__init__.py +22 -0
  106. holoscan_cu13-3.7.0.data/purelib/holoscan/operators/video_stream_replayer/_video_stream_replayer.cpython-310-aarch64-linux-gnu.so +0 -0
  107. holoscan_cu13-3.7.0.data/purelib/holoscan/pose_tree/__init__.py +271 -0
  108. holoscan_cu13-3.7.0.data/purelib/holoscan/pose_tree/_pose_tree.cpython-310-aarch64-linux-gnu.so +0 -0
  109. holoscan_cu13-3.7.0.data/purelib/holoscan/resources/__init__.py +171 -0
  110. holoscan_cu13-3.7.0.data/purelib/holoscan/resources/_resources.cpython-310-aarch64-linux-gnu.so +0 -0
  111. holoscan_cu13-3.7.0.data/purelib/holoscan/schedulers/__init__.py +32 -0
  112. holoscan_cu13-3.7.0.data/purelib/holoscan/schedulers/_schedulers.cpython-310-aarch64-linux-gnu.so +0 -0
  113. holoscan_cu13-3.7.0.data/purelib/holoscan_cu13-3.7.0.pth +1 -0
  114. holoscan_cu13-3.7.0.dist-info/LICENSE.txt +202 -0
  115. holoscan_cu13-3.7.0.dist-info/METADATA +148 -0
  116. holoscan_cu13-3.7.0.dist-info/NOTICE.txt +191 -0
  117. holoscan_cu13-3.7.0.dist-info/NVIDIA-AI-PRODUCT-EULA.txt +243 -0
  118. holoscan_cu13-3.7.0.dist-info/README.md +35 -0
  119. holoscan_cu13-3.7.0.dist-info/RECORD +124 -0
  120. holoscan_cu13-3.7.0.dist-info/WHEEL +5 -0
  121. holoscan_cu13-3.7.0.dist-info/axle.lck +0 -0
  122. holoscan_cu13-3.7.0.dist-info/entry_points.txt +3 -0
  123. holoscan_cu13-3.7.0.dist-info/symlinks.txt +78 -0
  124. holoscan_cu13-3.7.0.dist-info/top_level.txt +2 -0
@@ -0,0 +1,662 @@
1
+ """
2
+ SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ SPDX-License-Identifier: Apache-2.0
4
+
5
+ Licensed under the Apache License, Version 2.0 (the "License");
6
+ you may not use this file except in compliance with the License.
7
+ You may obtain a copy of the License at
8
+
9
+ http://www.apache.org/licenses/LICENSE-2.0
10
+
11
+ Unless required by applicable law or agreed to in writing, software
12
+ distributed under the License is distributed on an "AS IS" BASIS,
13
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ See the License for the specific language governing permissions and
15
+ limitations under the License.
16
+ """ # noqa: E501
17
+
18
+ """This module provides a decorator API for creating Python Operators
19
+
20
+ .. autosummary::
21
+
22
+ holoscan.decorator.create_op
23
+ holoscan.decorator.Input
24
+ holoscan.decorator.Output
25
+ """
26
+
27
+ import ast
28
+ import inspect
29
+ import textwrap
30
+ from collections.abc import Callable, Sequence
31
+ from dataclasses import dataclass, field
32
+ from typing import Any
33
+
34
+ import cupy as cp
35
+ import numpy as np
36
+
37
+ from holoscan.conditions import BooleanCondition
38
+ from holoscan.core import (
39
+ ConditionType,
40
+ IOSpec,
41
+ Operator,
42
+ OperatorSpec,
43
+ )
44
+ from holoscan.core import _Condition as ConditionBase
45
+ from holoscan.core import _Resource as ResourceBase
46
+ from holoscan.core._core import Fragment as FragmentBase
47
+ from holoscan.core._core import Tensor as TensorBase
48
+
49
+ __all__ = ["Input", "Output", "create_op"]
50
+
51
+
52
+ def _is_tensor_like(obj):
53
+ return (
54
+ (hasattr(obj, "__dlpack__") and hasattr(obj, "__dlpack_device__"))
55
+ or hasattr(obj, "__cuda_array_interface__")
56
+ or hasattr(obj, "__array_interface__")
57
+ )
58
+
59
+
60
+ def _as_python_tensor(tensor):
61
+ if hasattr(tensor, "__array_interface__") or (
62
+ hasattr(tensor, "__dlpack_device__") and tensor.__dlpack_device__()[0] == 1
63
+ ):
64
+ return np.asarray(tensor)
65
+ else:
66
+ return cp.asarray(tensor)
67
+
68
+
69
+ @dataclass
70
+ class Input:
71
+ """Class for specifying an input port and how the received value maps to a function's arguments.
72
+
73
+ Parameters
74
+ ----------
75
+ name : str
76
+ The name of the input port.
77
+ arg_map: str or dict[str, str]
78
+ If `arg_map` is a str, the Python object received by the input port is passed to the
79
+ function argument specified by `arg_map`. If `arg_map` is a dict, the input is assumed to be
80
+ a TensorMap (dictionary of tensors). In this case the keys of the dict are the tensor names
81
+ and the values are the names of the function arguments that the tensors map to.
82
+ size: int | holoscan.core.IOSpec.IOSize, optional
83
+ The size of the queue for the input port.
84
+ By default, `IOSpec.SIZE_ONE` (== `IOSpec.IOSize(1)`) is used.
85
+ If `IOSpec.ANY_SIZE` is used, it defines multiple receivers internally for the input port.
86
+ Otherwise, the size of the input queue is set to the specified value, and the message
87
+ available condition for the input port is set with `min_size` equal to the same value.
88
+
89
+ The following size constants are supported:
90
+ - ``IOSpec.ANY_SIZE``: Any size.
91
+ - ``IOSpec.PRECEDING_COUNT``: Number of preceding connections.
92
+ - ``IOSpec.SIZE_ONE``: The queue size is 1.
93
+
94
+ Please refer to the [Holoscan SDK User Guide](https://docs.nvidia.com/holoscan/sdk-user-guide/holoscan_create_operator.html#receiving-any-number-of-inputs-python)
95
+ to see how to receive any number of inputs in Python.
96
+ policy : holoscan.core.IOSpec.QueuePolicy, optional
97
+ The queue policy for the input port.
98
+ The queue policy to set. Valid values are:
99
+
100
+ - QueuePolicy.POP : If the queue is full, pop the oldest item, then add the new one.
101
+ - QueuePolicy.REJECT : If the queue is full, reject (discard) the new item.
102
+ - QueuePolicy.FAULT : If the queue is full, log a warning and reject the new item.
103
+ condition_type : holoscan.core.ConditionType, optional
104
+ The condition type for the input port.
105
+ condition_kwargs : dict[str, Any], optional
106
+ The keywords passed onto the condition specified by `condition_type`.
107
+ connector_type : holoscan.core.IOSpec.ConnectorType, optional
108
+ The connector type for the input port.
109
+ connector_kwargs : dict[str, Any], optional
110
+ The keywords passed onto the connector specified by `connector_type`.
111
+ """
112
+
113
+ name: str
114
+ arg_map: str | dict[str, str] | None = ()
115
+ size: int | IOSpec.IOSize = IOSpec.SIZE_ONE
116
+ policy: IOSpec.QueuePolicy | None = None
117
+ condition_type: ConditionType | None = None
118
+ condition_kwargs: dict[str, Any] = field(default_factory=dict)
119
+ connector_type: IOSpec.ConnectorType | None = None
120
+ connector_kwargs: dict[str, Any] = field(default_factory=dict)
121
+
122
+ def create_input(self, spec: OperatorSpec) -> IOSpec:
123
+ if isinstance(self.size, int):
124
+ self.size = IOSpec.IOSize(self.size)
125
+ elif not isinstance(self.size, IOSpec.IOSize):
126
+ raise ValueError(f"Invalid size: {self.size}")
127
+
128
+ if self.policy is not None and not isinstance(self.policy, IOSpec.QueuePolicy):
129
+ raise ValueError(f"Invalid policy: {self.policy}")
130
+
131
+ iospec = spec.input(self.name, size=self.size, policy=self.policy)
132
+
133
+ if self.condition_type is not None:
134
+ iospec = iospec.condition(self.condition_type, **self.condition_kwargs)
135
+ if self.connector_type is not None:
136
+ iospec = iospec.connector(self.connector_type, **self.connector_kwargs)
137
+
138
+
139
+ @dataclass
140
+ class Output:
141
+ """Class for specifying an output port and how one or more of a functions returned value(s) map
142
+ to it.
143
+
144
+ Parameters
145
+ ----------
146
+ name : str
147
+ The name of the output port.
148
+ tensor_names: str, tuple(str) or None
149
+ If None, whatever Python object the func outputs is emitted on the output port. If a tuple
150
+ of strings is provided it is assumed that the func returns a dictionary of tensors. The
151
+ names in the tuple specify which tensors in the dict will be transmitted on the output
152
+ port. There is no need to specify `tensor_names` if all tensors in a dict returned by the
153
+ function are to be transmitted. In the case of a single tensor name, a string can be
154
+ provided instead of a tuple.
155
+ size: int | holoscan.core.IOSpec.IOSize, optional
156
+ The size of the queue for the output port.
157
+ By default, `IOSpec.SIZE_ONE` (== `IOSpec.IOSize(1)`) is used.
158
+ policy : holoscan.core.IOSpec.QueuePolicy, optional
159
+ The queue policy for the output port.
160
+ The queue policy to set. Valid values are:
161
+
162
+ - QueuePolicy.POP : If the queue is full, pop the oldest item, then add the new one.
163
+ - QueuePolicy.REJECT : If the queue is full, reject (discard) the new item.
164
+ - QueuePolicy.FAULT : If the queue is full, log a warning and reject the new item.
165
+ condition_type : holoscan.core.ConditionType, optional
166
+ The condition type for the input port.
167
+ condition_kwargs : dict[str, Any], optional
168
+ The keywords passed onto the condition specified by `condition_type`.
169
+ connector_type : holoscan.core.IOSpec.ConnectorType, optional
170
+ The connector type for the input port.
171
+ connector_kwargs : dict[str, Any], optional
172
+ The keywords passed onto the connector specified by `connector_type`.
173
+ """
174
+
175
+ name: str
176
+ tensor_names: str | tuple[str] | None = ()
177
+ size: int | IOSpec.IOSize = IOSpec.SIZE_ONE
178
+ policy: IOSpec.QueuePolicy | None = None
179
+ condition_type: ConditionType | None = None
180
+ condition_kwargs: dict[str, Any] = field(default_factory=dict)
181
+ connector_type: IOSpec.ConnectorType | None = None
182
+ connector_kwargs: dict[str, Any] = field(default_factory=dict)
183
+
184
+ def create_output(self, spec: OperatorSpec) -> IOSpec:
185
+ if isinstance(self.size, int):
186
+ self.size = IOSpec.IOSize(self.size)
187
+ elif not isinstance(self.size, IOSpec.IOSize):
188
+ raise ValueError(f"Invalid size: {self.size}")
189
+
190
+ if self.policy is not None and not isinstance(self.policy, IOSpec.QueuePolicy):
191
+ raise ValueError(f"Invalid policy: {self.policy}")
192
+
193
+ iospec = spec.output(self.name, size=self.size, policy=self.policy)
194
+ if self.condition_type is not None:
195
+ iospec = iospec.condition(self.condition_type, **self.condition_kwargs)
196
+ if self.connector_type is not None:
197
+ iospec = iospec.connector(self.connector_type, **self.connector_kwargs)
198
+
199
+
200
+ def _as_input(input_: str | Input):
201
+ """Cast str to Output object."""
202
+ if isinstance(input_, str):
203
+ return Input(input_, arg_map=input_)
204
+ elif not isinstance(input_, Input):
205
+ return ValueError("`inputs` must be a single port name or Input object or a tuple of these")
206
+ return input_
207
+
208
+
209
+ def _as_output(output: str | Output):
210
+ """Cast str to Output object."""
211
+ if isinstance(output, str):
212
+ return Output(output)
213
+ elif not isinstance(output, Output):
214
+ return ValueError(
215
+ "`outputs` must be a single port name or Output object or a tuple of these"
216
+ )
217
+ return output
218
+
219
+
220
+ def _has_function_returns_value(func):
221
+ """Check if the provided function has any return statements returning a value."""
222
+
223
+ class ReturnVisitor(ast.NodeVisitor):
224
+ def __init__(self):
225
+ self.returns_value = False
226
+
227
+ def visit_Return(self, node): # noqa: N802
228
+ # check if the return statement has a value
229
+ if node.value is not None:
230
+ self.returns_value = True
231
+ return
232
+
233
+ self.generic_visit(node)
234
+
235
+ def visit_ClassDef(self, node): # noqa: N802, ARG002
236
+ return
237
+
238
+ def visit_FunctionDef(self, node): # noqa: N802, ARG002
239
+ return
240
+
241
+ def visit_AsyncFunctionDef(self, node): # noqa: N802, ARG002
242
+ return
243
+
244
+ def visit(self, node):
245
+ if self.returns_value:
246
+ return
247
+ super().visit(node)
248
+
249
+ # parse the source code into an AST
250
+ source_code = inspect.getsource(func)
251
+ # deindent the text if it is indented
252
+ source_code = textwrap.dedent(source_code)
253
+ tree = ast.parse(source_code)
254
+ # initialize the visitor
255
+ visitor = ReturnVisitor()
256
+ # walk the AST
257
+ for node in ast.walk(tree):
258
+ if isinstance(node, ast.FunctionDef) and node.name == func.__name__:
259
+ visitor.generic_visit(node)
260
+ break
261
+ return visitor.returns_value
262
+
263
+
264
+ def create_op(
265
+ function_or_class: type | Callable[..., Any] | None = None,
266
+ inputs: str | Input | Sequence[str | Input] = (),
267
+ outputs: str | Output | Sequence[str | Output] = (),
268
+ cast_tensors: bool = True,
269
+ op_param: str | None = None,
270
+ ) -> Callable:
271
+ """Decorator for creating an operator from a function or a class.
272
+
273
+ When the decorator is used on a class, the class must have a `__call__` method that will be
274
+ used as the operator function.
275
+
276
+ inputs : str, Input, or Tuple[str | Input], optional
277
+ If a str is provided, it is assumed to be the name of the input port and that the function
278
+ has a variable matching that port name to which the object received on the port will be
279
+ connected. If the port name does not match the name of the variable in the function
280
+ signature, or if there are multiple tensors to be mapped to multiple objects, use an Input
281
+ argument. A tuple of str or Input objects can be provided to specify multiple input ports.
282
+ The default of an empty tuple corresponds to no input ports.
283
+ outputs : str, Output, or Tuple[str | Output], optional
284
+ If a str is provided, any value returned by the function will be emitted on an output port
285
+ of that name. If a tuple of multiple str is provided and the function returns a tuple, then
286
+ the tuple elements will be emitted from each output port in the order at which they are
287
+ defined. In this case, the number of output ports should match the length of the output
288
+ tuple. Finally, an Output object can be provided in the case that the function returns a
289
+ dictionary of output arrays that should be split across multiple ports.
290
+ cast_tensors : bool, optional
291
+ If True, automatically cast any tensor-like input to a NumPy or CuPy array (for host and
292
+ device tensors, respectively). If set to False, these will be left as `holoscan.Tensor` and
293
+ the user will have to cast to the desired type within the body of the decorated function or
294
+ class.
295
+ op_param : str, optional
296
+ If provided, adds this parameter name to the function signature which will
297
+ contain a reference to the operator instance. This allows the function to
298
+ access operator methods and attributes.
299
+
300
+ Notes
301
+ -----
302
+ Another case where using `Input` or `Output` objects is necessary is if the user wishes to
303
+ override the default connector or condition types for the port.
304
+ """
305
+ # used to store the class object if the decorator is used on a class
306
+ class_obj = None
307
+ # used to determine if the decorator was used without args
308
+ is_without_args = function_or_class is not None
309
+
310
+ # convert scalars to tuple
311
+ if isinstance(inputs, str | Input):
312
+ inputs = (inputs,)
313
+ # convert any str in the tuple to an Input object
314
+ inputs = tuple(_as_input(i) for i in inputs)
315
+
316
+ if isinstance(outputs, str | Output):
317
+ outputs = (outputs,)
318
+ # convert any str in the tuple to an Output object
319
+ outputs = tuple(_as_output(o) for o in outputs)
320
+
321
+ if not isinstance(outputs, tuple):
322
+ raise ValueError(
323
+ "`outputs` must be a single port name or Output object or a tuple of these"
324
+ )
325
+
326
+ if op_param is not None and not isinstance(op_param, str):
327
+ raise TypeError(f"op_param must be a string or None, got {type(op_param)}")
328
+
329
+ def decorator(func_or_cls):
330
+ nonlocal function_or_class, class_obj
331
+
332
+ def make_class(*args, **kwargs):
333
+ if "fragment" in kwargs:
334
+ fragment = kwargs.pop("fragment")
335
+ elif args and isinstance(args[0], FragmentBase):
336
+ fragment, args = args[0], args[1:]
337
+ else:
338
+ raise ValueError(
339
+ "fragment must be provided via kwarg or as the first positional argument"
340
+ )
341
+
342
+ # frame = inspect.currentframe()
343
+ # args_names, _, _, locals_dict = inspect.getargvalues(frame)
344
+ # print(f"{args_names=}, {locals_dict=}")
345
+
346
+ class DynamicOp(Operator):
347
+ def __init__(
348
+ self,
349
+ fragment: FragmentBase,
350
+ *args,
351
+ inputs,
352
+ outputs,
353
+ cast_tensors=cast_tensors,
354
+ op_param=op_param,
355
+ **kwargs,
356
+ ):
357
+ self.func = func_or_cls
358
+ self.input_objs = inputs
359
+ self.output_objs = outputs
360
+ self.is_generator = inspect.isgeneratorfunction(self.func)
361
+ self.gen_obj = None
362
+ self.cast_tensors = cast_tensors
363
+ self.op_param = op_param
364
+
365
+ # remove conditions and resources from *args
366
+ condition_args = tuple(a for a in args if isinstance(a, ConditionBase))
367
+ resource_args = tuple(a for a in args if isinstance(a, ResourceBase))
368
+ args = tuple(a for a in args if not isinstance(a, ConditionBase | ResourceBase))
369
+ self.func_args = args
370
+
371
+ # add a boolean condition to prevent triggering if the function is a generator
372
+ # and the iteration is complete
373
+ if self.is_generator:
374
+ condition_args = condition_args + (
375
+ BooleanCondition(fragment, name="_generator_func"),
376
+ )
377
+
378
+ # set name kwarg to self.func.__name__ if not provided
379
+ name = kwargs.pop("name", self.func.__name__)
380
+
381
+ argspec = inspect.getfullargspec(self.func)
382
+
383
+ # remove self from argspec.args if the decorator is used on a class
384
+ if class_obj:
385
+ argspec = argspec._replace(args=argspec.args[1:])
386
+ self.class_obj = class_obj
387
+
388
+ self.func_argspec = argspec
389
+
390
+ # populate inputs and outputs with defaults if decorator was used without args
391
+ if is_without_args:
392
+ self.input_objs = tuple(Input(name, arg_map=name) for name in argspec.args)
393
+ # configure the output port if the function contains return statements
394
+ # (in this case, the port name will be left empty)
395
+ if _has_function_returns_value(function_or_class):
396
+ self.output_objs = tuple((Output(""),))
397
+
398
+ # populate all arguments not provided with defaults
399
+ if argspec.kwonlydefaults is not None:
400
+ for k in argspec.kwonlyargs:
401
+ if k not in kwargs and k in argspec.kwonlydefaults:
402
+ kwargs[k] = argspec.kwonlydefaults[k]
403
+
404
+ # store a list of what ports map to what function arguments
405
+ self.input_mappings = {}
406
+ for input_obj in self.input_objs:
407
+ # store what argument(s) this input maps to
408
+ self.input_mappings[input_obj.name] = input_obj.arg_map
409
+
410
+ # sets self.dynamic_kwargs and self.fixed_kwargs
411
+ self._set_fixed_and_dynamic_kwargs(kwargs)
412
+
413
+ # get the type annotations dict for the function (not currently used)
414
+ # self.func_annotations = inspect.get_annotations(self.func)
415
+ self.func_annotations = self.func.__annotations__
416
+
417
+ super().__init__(fragment, *condition_args, *resource_args, name=name)
418
+
419
+ def _set_fixed_and_dynamic_kwargs(self, kwargs):
420
+ """Split provided kwargs into those which are "fixed" and those which are
421
+ "dynamic".
422
+
423
+ Here "dynamic" refers to function arguments that are obtained from input
424
+ ports. The keys for self.dynamic_kwargs are determined here, but the values
425
+ are initialized to None. Actual values get set during each `compute` call.
426
+
427
+ "fixed" refers to other keyword arguments to the function that don't change
428
+ across calls.
429
+ """
430
+ self.dynamic_kwargs = {}
431
+ for input_map in self.input_mappings.values():
432
+ if isinstance(input_map, str):
433
+ self._add_dynamic_arg(input_map, kwargs)
434
+ elif isinstance(input_map, dict):
435
+ for arg_name in input_map.values():
436
+ self._add_dynamic_arg(arg_name, kwargs)
437
+ self.fixed_kwargs = kwargs
438
+
439
+ # add the operator instance to the kwargs if op_param was specified
440
+ if self.op_param:
441
+ self.fixed_kwargs[self.op_param] = self
442
+
443
+ # store any positional args with specified defaults in fixed_kwargs instead
444
+ argspec = self.func_argspec
445
+ if argspec.defaults is not None:
446
+ n_default_positional = len(argspec.defaults)
447
+ if n_default_positional > 0:
448
+ self.func_args = self.func_args[:-n_default_positional]
449
+ n_required_positional = len(argspec.args) - len(argspec.defaults)
450
+ for k, v in zip(
451
+ argspec.args[n_required_positional:], argspec.defaults, strict=False
452
+ ):
453
+ # don't overwrite any kwargs that were provided
454
+ if k not in self.fixed_kwargs:
455
+ self.fixed_kwargs[k] = v
456
+
457
+ # Now that all args with defaults are in self.fixed_kwargs we can check if any
458
+ # of the required arguments were not specified
459
+ required_args = set(argspec.args) | set(argspec.kwonlyargs)
460
+ if argspec.kwonlydefaults is not None:
461
+ required_args -= set(argspec.kwonlydefaults.keys())
462
+ for arg in required_args:
463
+ if arg not in self.fixed_kwargs and arg not in self.dynamic_kwargs:
464
+ raise ValueError(f"required argument, '{arg}', has not been specified")
465
+
466
+ def _add_dynamic_arg(self, arg_name, kwargs):
467
+ """helper function for _set_fixed_and_dynamic_kwargs"""
468
+ if arg_name in self.dynamic_kwargs:
469
+ raise ValueError(
470
+ "duplicate specification of mapping to function kwarg: '{arg_name}'"
471
+ )
472
+ self.dynamic_kwargs[arg_name] = None
473
+ try:
474
+ kwargs.pop(arg_name)
475
+ except KeyError as e:
476
+ argspec = self.func_argspec
477
+ if arg_name not in argspec.kwonlyargs + argspec.args:
478
+ msg = (
479
+ f"Provided func does not have an arg or kwarg named '{arg_name}'."
480
+ " The provided wrapped function has"
481
+ f" positional args: {argspec.args}"
482
+ f" and keyword-only args: {argspec.kwonlyargs}"
483
+ )
484
+ raise KeyError(msg) from e
485
+
486
+ # # not used by the Application, but can be useful to test the call
487
+ # def __call__(self, *args, **kwargs):
488
+ # print(f"{self.msg=}")
489
+ # return self.func(*self.func_args, *args, **self.fixed_kwargs, **kwargs)
490
+
491
+ def setup(self, spec: OperatorSpec):
492
+ for input_obj in self.input_objs:
493
+ input_obj.create_input(spec)
494
+
495
+ self.output_tensor_map = {}
496
+ for output_obj in self.output_objs:
497
+ output_obj.create_output(spec)
498
+ if isinstance(output_obj.tensor_names, str):
499
+ output_obj.tensor_names = (output_obj.tensor_names,)
500
+ self.output_tensor_map[output_obj.name] = tuple(output_obj.tensor_names)
501
+
502
+ def compute(self, op_input, op_output, context):
503
+ for port_name, arg_map in self.input_mappings.items():
504
+ # print(f"input {port_name=}, {arg_map=}")
505
+ msg = op_input.receive(port_name)
506
+ if isinstance(arg_map, str):
507
+ # print(f"{msg=}")
508
+ if isinstance(msg, dict):
509
+ try:
510
+ # try tensor based on matching name
511
+ msg = msg[arg_map]
512
+ except KeyError as e:
513
+ # use tensor regardless of name if only one is present
514
+ tensors = tuple(
515
+ v for k, v in msg.items() if isinstance(v, TensorBase)
516
+ )
517
+ if len(tensors) == 1:
518
+ msg = tensors[0]
519
+ elif len(tensors) > 1:
520
+ raise ValueError(
521
+ "More than one tensor found in port, but none has "
522
+ f"name {arg_map}"
523
+ ) from e
524
+
525
+ # cast holoscan.Tensor to cp.asarray(Tensor) here or require the user
526
+ # to do it in the provided func?
527
+ if self.cast_tensors and isinstance(msg, TensorBase):
528
+ msg = _as_python_tensor(msg)
529
+
530
+ self.dynamic_kwargs[arg_map] = msg
531
+ elif isinstance(arg_map, dict):
532
+ for tensor_name, arg_name in arg_map.items():
533
+ try:
534
+ val = msg[tensor_name]
535
+ except KeyError as e:
536
+ raise KeyError(
537
+ f"key with name '{tensor_name}' not found in input dict"
538
+ ) from e
539
+ if self.cast_tensors and isinstance(val, TensorBase):
540
+ val = _as_python_tensor(val)
541
+ self.dynamic_kwargs[arg_name] = val
542
+
543
+ if self.is_generator:
544
+ if self.gen_obj is None:
545
+ out = self.func(
546
+ *self.func_args, **self.fixed_kwargs, **self.dynamic_kwargs
547
+ )
548
+ self.gen_obj = out
549
+ try:
550
+ out = next(self.gen_obj)
551
+ except StopIteration:
552
+ # disable the condition to prevent further calls
553
+ self.conditions["_generator_func"].disable_tick()
554
+ return
555
+ else:
556
+ out = self.func(*self.func_args, **self.fixed_kwargs, **self.dynamic_kwargs)
557
+
558
+ # if the output is a tuple and there is >1 port, we distribute the outputs
559
+ if isinstance(out, tuple) and (len(self.output_tensor_map) > 1):
560
+ # for tuple case, each port should correspond to each output tuple element
561
+ if any([len(names) > 1 for names in self.output_tensor_map.values()]):
562
+ raise ValueError(
563
+ "The function output was found to be a tuple type, but each "
564
+ "output tuple element must have its own port. In other words, "
565
+ "the `outputs` argument of `create_op` should be a tuple of port "
566
+ "names equal in length to the returned tuple."
567
+ )
568
+ # Make sure check that the output tuple length and number of ports match
569
+ if len(out) != len(self.output_tensor_map):
570
+ raise ValueError(
571
+ f"The number of output tuple elements and number of tensors must "
572
+ f"match.\n"
573
+ f"Output tuple length = {len(out)}\n"
574
+ f"Number of output tensors = {len(self.output_tensor_map)}"
575
+ )
576
+ for (port_name, tensor_names), out_element in zip(
577
+ self.output_tensor_map.items(), out, strict=False
578
+ ):
579
+ if _is_tensor_like(out_element):
580
+ name = "" if len(tensor_names) == 0 else tensor_names[0]
581
+ out_element = {name: out_element}
582
+ op_output.emit(out_element, port_name)
583
+ return
584
+
585
+ for port_name, tensor_names in self.output_tensor_map.items():
586
+ if tensor_names is None or len(tensor_names) == 0:
587
+ if _is_tensor_like(out):
588
+ # emit as dict of tensor-like objects
589
+ out = {"": out}
590
+ op_output.emit(out, port_name)
591
+ elif len(tensor_names) == 1:
592
+ name = tensor_names[0]
593
+ if _is_tensor_like(out):
594
+ # emit as dict of tensor-like objects
595
+ out = {name: out}
596
+ op_output.emit(out, port_name)
597
+ else:
598
+ if name not in out:
599
+ raise ValueError(
600
+ f"tensor with name '{name}' not found in function output"
601
+ )
602
+ op_output.emit({name: out[name]}, port_name)
603
+ else:
604
+ out_tensors = {}
605
+ for name in tensor_names:
606
+ if name not in out:
607
+ raise ValueError(
608
+ f"tensor with name '{name}' not found in function output"
609
+ )
610
+ out_tensors[name] = out[name]
611
+ # print(f"outputting tensors named: {tuple(out_tensors.keys())} on
612
+ # port {port_name}")
613
+ # print(f"tensormap emit of {out_tensors=}")
614
+ op_output.emit(out_tensors, port_name)
615
+
616
+ op = DynamicOp(
617
+ fragment, *args, inputs=inputs, outputs=outputs, op_param=op_param, **kwargs
618
+ )
619
+
620
+ def _to_camel_case(name):
621
+ """Convert name to camel case"""
622
+ parts = name.split("_")
623
+ return "".join(p.capitalize() for p in parts)
624
+
625
+ # manually update instead of using functools.update_wrapper(op, func_or_cls) because:
626
+ # - don't want to overwrite __doc__ with func.__doc__
627
+ # - want to use name instead of func.__name__
628
+ if class_obj:
629
+ class_name = class_obj.__class__.__name__
630
+ op.__name__ = class_name + "Op" if not class_name.endswith("Op") else class_name
631
+ else:
632
+ op.__name__ = _to_camel_case(func_or_cls.__name__) + "Op"
633
+ op.__qualname__ = op.__name__
634
+ op.__module__ = func_or_cls.__module__
635
+ return op
636
+
637
+ def init_class(*args, **kwargs):
638
+ nonlocal class_obj, function_or_class
639
+ # create an instance of the class (using function_or_class as the class)
640
+ class_obj = function_or_class(*args, **kwargs)
641
+ # use the class's __call__ method as the operator function
642
+ if not callable(class_obj):
643
+ raise ValueError(
644
+ f"{function_or_class} must have a __call__ method to be used as an operator"
645
+ )
646
+ function_or_class = class_obj.__call__
647
+ return decorator(function_or_class)
648
+
649
+ if func_or_cls is None:
650
+ return decorator
651
+
652
+ # check if the decorator was used on a class first
653
+ if inspect.isclass(func_or_cls): # if isinstance(func_or_cls, type):
654
+ function_or_class = func_or_cls
655
+ return init_class
656
+
657
+ if callable(func_or_cls):
658
+ return make_class
659
+
660
+ raise Exception(f"Invalid usage of decorator for {func_or_cls}")
661
+
662
+ return decorator(function_or_class)
@@ -0,0 +1,26 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """This module provides a Python API for the C++ API Executor classes.
16
+
17
+ .. autosummary::
18
+
19
+ holoscan.executors.GXFExecutor
20
+ """
21
+
22
+ from ._executors import GXFExecutor
23
+
24
+ __all__ = [
25
+ "GXFExecutor",
26
+ ]