luminarycloud 0.22.0__py3-none-any.whl → 0.22.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luminarycloud/_client/authentication_plugin.py +49 -0
- luminarycloud/_client/client.py +38 -11
- luminarycloud/_client/http_client.py +1 -1
- luminarycloud/_client/retry_interceptor.py +64 -2
- luminarycloud/_helpers/__init__.py +9 -0
- luminarycloud/_helpers/_inference_jobs.py +227 -0
- luminarycloud/_helpers/_parse_iso_datetime.py +54 -0
- luminarycloud/_helpers/download.py +11 -0
- luminarycloud/_helpers/proto_decorator.py +38 -7
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.py +152 -132
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.pyi +66 -8
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2_grpc.py +34 -0
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2_grpc.pyi +12 -0
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2.py +142 -39
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2.pyi +300 -3
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2_grpc.py +34 -0
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2_grpc.pyi +12 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2.py +255 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2.pyi +466 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2_grpc.py +242 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2_grpc.pyi +95 -0
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2.py +29 -7
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2.pyi +39 -0
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2_grpc.py +36 -0
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2_grpc.pyi +18 -0
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2.py +88 -65
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2.pyi +42 -0
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2_grpc.py +34 -0
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2_grpc.pyi +12 -0
- luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2.py +163 -153
- luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2.pyi +37 -3
- luminarycloud/_proto/base/base_pb2.py +7 -6
- luminarycloud/_proto/base/base_pb2.pyi +4 -0
- luminarycloud/_proto/client/simulation_pb2.py +358 -339
- luminarycloud/_proto/client/simulation_pb2.pyi +89 -3
- luminarycloud/_proto/physicsaiinferenceservice/physicsaiinferenceservice_pb2.py +35 -0
- luminarycloud/_proto/physicsaiinferenceservice/physicsaiinferenceservice_pb2.pyi +7 -0
- luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2.py +6 -3
- luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2_grpc.py +68 -0
- luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2_grpc.pyi +24 -0
- luminarycloud/_wrapper.py +53 -7
- luminarycloud/enum/vis_enums.py +6 -0
- luminarycloud/feature_modification.py +25 -32
- luminarycloud/geometry.py +10 -6
- luminarycloud/geometry_version.py +4 -0
- luminarycloud/mesh.py +4 -0
- luminarycloud/meshing/mesh_generation_params.py +5 -6
- luminarycloud/meshing/sizing_strategy/sizing_strategies.py +1 -2
- luminarycloud/outputs/__init__.py +2 -0
- luminarycloud/outputs/output_definitions.py +3 -3
- luminarycloud/outputs/stopping_conditions.py +94 -0
- luminarycloud/params/enum/_enum_wrappers.py +16 -0
- luminarycloud/params/geometry/shapes.py +33 -33
- luminarycloud/params/simulation/adaptive_mesh_refinement/__init__.py +1 -0
- luminarycloud/params/simulation/adaptive_mesh_refinement/active_region_.py +83 -0
- luminarycloud/params/simulation/adaptive_mesh_refinement/boundary_layer_profile_.py +1 -1
- luminarycloud/params/simulation/adaptive_mesh_refinement_.py +8 -1
- luminarycloud/physics_ai/__init__.py +7 -0
- luminarycloud/physics_ai/inference.py +166 -199
- luminarycloud/physics_ai/models.py +22 -0
- luminarycloud/physics_ai/solution.py +4 -0
- luminarycloud/pipelines/api.py +143 -16
- luminarycloud/pipelines/core.py +1 -1
- luminarycloud/pipelines/stages.py +22 -9
- luminarycloud/project.py +61 -8
- luminarycloud/simulation.py +25 -0
- luminarycloud/types/__init__.py +2 -0
- luminarycloud/types/ids.py +2 -0
- luminarycloud/types/vector3.py +1 -2
- luminarycloud/vis/__init__.py +1 -0
- luminarycloud/vis/data_extraction.py +7 -7
- luminarycloud/vis/filters.py +97 -0
- luminarycloud/vis/interactive_report.py +163 -7
- luminarycloud/vis/report.py +113 -1
- luminarycloud/vis/visualization.py +3 -0
- luminarycloud/volume_selection.py +16 -8
- luminarycloud/workflow_utils.py +149 -0
- {luminarycloud-0.22.0.dist-info → luminarycloud-0.22.2.dist-info}/METADATA +1 -1
- {luminarycloud-0.22.0.dist-info → luminarycloud-0.22.2.dist-info}/RECORD +80 -76
- {luminarycloud-0.22.0.dist-info → luminarycloud-0.22.2.dist-info}/WHEEL +1 -1
- luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2.py +0 -61
- luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2.pyi +0 -85
- luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2_grpc.py +0 -67
- luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2_grpc.pyi +0 -26
- luminarycloud/_proto/inferenceservice/inferenceservice_pb2.py +0 -69
- luminarycloud/pipeline_util/dictable.py +0 -27
|
@@ -38,3 +38,52 @@ class AuthenticationPlugin(grpc.AuthMetadataPlugin):
|
|
|
38
38
|
callback(metadata, None)
|
|
39
39
|
except Exception as err:
|
|
40
40
|
callback(None, err)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class AuthInterceptor(
|
|
44
|
+
grpc.UnaryUnaryClientInterceptor,
|
|
45
|
+
grpc.UnaryStreamClientInterceptor,
|
|
46
|
+
grpc.StreamUnaryClientInterceptor,
|
|
47
|
+
grpc.StreamStreamClientInterceptor,
|
|
48
|
+
):
|
|
49
|
+
"""
|
|
50
|
+
I need this as a workaround for container-to-host connections because I need to create a channel
|
|
51
|
+
that uses CallCredentials but doesn't use any ChannelCredentials. I.e. I need to authenticate
|
|
52
|
+
the requests, but I need the connection to be unencrypted. This is because the grpc server on
|
|
53
|
+
the native host isn't using SSL, so I can't use grpc.ssl_channel_credentials(), but it's also
|
|
54
|
+
not reachable on a loopback interface, so I can't use grpc.local_channel_credentials() either.
|
|
55
|
+
So I need to use a grpc.insecure_channel(), but you can't use CallCredentials with an insecure
|
|
56
|
+
channel. So the workaround is to use an interceptor instead of CallCredentials.
|
|
57
|
+
|
|
58
|
+
Also, I don't care about auth0, so I'm only supporting an API key.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
def __init__(self, api_key: str):
|
|
62
|
+
self._api_key = api_key
|
|
63
|
+
|
|
64
|
+
def _augment(self, metadata):
|
|
65
|
+
return metadata + [("x-api-key", self._api_key)]
|
|
66
|
+
|
|
67
|
+
def intercept_unary_unary(self, continuation, client_call_details, request):
|
|
68
|
+
new_details = client_call_details._replace(
|
|
69
|
+
metadata=self._augment(client_call_details.metadata or [])
|
|
70
|
+
)
|
|
71
|
+
return continuation(new_details, request)
|
|
72
|
+
|
|
73
|
+
def intercept_unary_stream(self, continuation, client_call_details, request):
|
|
74
|
+
new_details = client_call_details._replace(
|
|
75
|
+
metadata=self._augment(client_call_details.metadata or [])
|
|
76
|
+
)
|
|
77
|
+
return continuation(new_details, request)
|
|
78
|
+
|
|
79
|
+
def intercept_stream_unary(self, continuation, client_call_details, request_iter):
|
|
80
|
+
new_details = client_call_details._replace(
|
|
81
|
+
metadata=self._augment(client_call_details.metadata or [])
|
|
82
|
+
)
|
|
83
|
+
return continuation(new_details, request_iter)
|
|
84
|
+
|
|
85
|
+
def intercept_stream_stream(self, continuation, client_call_details, request_iter):
|
|
86
|
+
new_details = client_call_details._replace(
|
|
87
|
+
metadata=self._augment(client_call_details.metadata or [])
|
|
88
|
+
)
|
|
89
|
+
return continuation(new_details, request_iter)
|
luminarycloud/_client/client.py
CHANGED
|
@@ -34,7 +34,9 @@ from .._proto.api.v0.luminarycloud.named_variable_set.named_variable_set_pb2_grp
|
|
|
34
34
|
from .._proto.api.v0.luminarycloud.physics_ai.physics_ai_pb2_grpc import (
|
|
35
35
|
PhysicsAiServiceStub,
|
|
36
36
|
)
|
|
37
|
-
from .._proto.api.v0.luminarycloud.
|
|
37
|
+
from .._proto.api.v0.luminarycloud.physicsaiinference.physicsaiinference_pb2_grpc import (
|
|
38
|
+
PhysicsAiInferenceServiceStub,
|
|
39
|
+
)
|
|
38
40
|
from .._proto.api.v0.luminarycloud.thirdpartyintegration.onshape.onshape_pb2_grpc import (
|
|
39
41
|
OnshapeServiceStub,
|
|
40
42
|
)
|
|
@@ -47,7 +49,7 @@ from .._proto.api.v0.luminarycloud.vis.vis_pb2_grpc import VisAPIServiceStub
|
|
|
47
49
|
from .._proto.api.v0.luminarycloud.feature_flag.feature_flag_pb2_grpc import (
|
|
48
50
|
FeatureFlagServiceStub,
|
|
49
51
|
)
|
|
50
|
-
from .authentication_plugin import AuthenticationPlugin
|
|
52
|
+
from .authentication_plugin import AuthenticationPlugin, AuthInterceptor
|
|
51
53
|
from .config import LC_DOMAIN, LC_API_KEY
|
|
52
54
|
from .logging_interceptor import LoggingInterceptor
|
|
53
55
|
from .retry_interceptor import RetryInterceptor
|
|
@@ -71,8 +73,8 @@ class Client(
|
|
|
71
73
|
StoppingConditionServiceStub,
|
|
72
74
|
NamedVariableSetServiceStub,
|
|
73
75
|
PhysicsAiServiceStub,
|
|
74
|
-
InferenceServiceStub,
|
|
75
76
|
OnshapeServiceStub,
|
|
77
|
+
PhysicsAiInferenceServiceStub,
|
|
76
78
|
ProjectUIStateServiceStub,
|
|
77
79
|
FeatureFlagServiceStub,
|
|
78
80
|
):
|
|
@@ -93,11 +95,20 @@ class Client(
|
|
|
93
95
|
The URL of the HTTP REST server. If not provided, it will default to the `target`.
|
|
94
96
|
localhost : bool
|
|
95
97
|
True if the API server is running locally.
|
|
98
|
+
insecure_grpc_channel : bool
|
|
99
|
+
True to use an unencrypted gRPC channel, even though requests are authenticated. There's no
|
|
100
|
+
legitimate reason to do this outside of a local development situation where the SDK is
|
|
101
|
+
running from a container and connecting to an API server that is running on the host.
|
|
96
102
|
grpc_channel_options : Optional[Iterable[tuple[str, str]]]
|
|
97
103
|
A list of gRPC channel args. The full list is available here:
|
|
98
104
|
https://github.com/grpc/grpc/blob/v1.46.x/include/grpc/impl/codegen/grpc_types.h
|
|
99
105
|
api_key : Optional[str]
|
|
100
106
|
The API key to use for authentication.
|
|
107
|
+
log_retries : bool
|
|
108
|
+
True to log each retriable error response. There are some errors the API server may return
|
|
109
|
+
that are known to be transient, and the client will always retry requests when it gets one
|
|
110
|
+
of them. By default, the client retries silently. Set this to True to log the error
|
|
111
|
+
responses (at INFO level) for the retriable errors.
|
|
101
112
|
**kwargs : dict, optional
|
|
102
113
|
Additional arguments are passed to Auth0Client. See _auth/auth.py.
|
|
103
114
|
|
|
@@ -116,9 +127,11 @@ class Client(
|
|
|
116
127
|
target: str = LC_DOMAIN,
|
|
117
128
|
http_target: str | None = None,
|
|
118
129
|
localhost: bool = False,
|
|
130
|
+
insecure_grpc_channel: bool = False,
|
|
119
131
|
grpc_channel_options: Optional[Iterable[tuple[str, Union[str, int]]]] = None,
|
|
120
132
|
channel_credentials: Optional[grpc.ChannelCredentials] = None,
|
|
121
133
|
api_key: Optional[str] = LC_API_KEY,
|
|
134
|
+
log_retries: bool = False,
|
|
122
135
|
**kwargs: Any,
|
|
123
136
|
):
|
|
124
137
|
self._target = target
|
|
@@ -139,7 +152,12 @@ class Client(
|
|
|
139
152
|
if grpc_channel_options:
|
|
140
153
|
grpc_channel_options_with_keep_alive.extend(grpc_channel_options)
|
|
141
154
|
self._channel = self._create_channel(
|
|
142
|
-
localhost,
|
|
155
|
+
localhost,
|
|
156
|
+
insecure_grpc_channel,
|
|
157
|
+
grpc_channel_options_with_keep_alive,
|
|
158
|
+
channel_credentials,
|
|
159
|
+
api_key,
|
|
160
|
+
log_retries,
|
|
143
161
|
)
|
|
144
162
|
self._context_tokens: list[Token] = []
|
|
145
163
|
self.__register_rpcs()
|
|
@@ -193,9 +211,11 @@ class Client(
|
|
|
193
211
|
def _create_channel(
|
|
194
212
|
self,
|
|
195
213
|
localhost: bool = False,
|
|
214
|
+
insecure: bool = False,
|
|
196
215
|
grpc_channel_options: Optional[Iterable[tuple[str, Union[str, int]]]] = None,
|
|
197
216
|
channel_credentials: Optional[grpc.ChannelCredentials] = None,
|
|
198
217
|
api_key: Optional[str] = None,
|
|
218
|
+
log_retries: bool = False,
|
|
199
219
|
) -> grpc.Channel:
|
|
200
220
|
if channel_credentials is None:
|
|
201
221
|
if localhost:
|
|
@@ -217,15 +237,22 @@ class Client(
|
|
|
217
237
|
call_creds = grpc.metadata_call_credentials(auth_plugin)
|
|
218
238
|
composite_creds = grpc.composite_channel_credentials(channel_credentials, call_creds)
|
|
219
239
|
options = grpc_channel_options and list(grpc_channel_options)
|
|
220
|
-
|
|
221
|
-
self._target,
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
240
|
+
if insecure:
|
|
241
|
+
channel = grpc.insecure_channel(self._target, options=options)
|
|
242
|
+
channel = grpc.intercept_channel(
|
|
243
|
+
channel,
|
|
244
|
+
AuthInterceptor(api_key),
|
|
245
|
+
)
|
|
246
|
+
else:
|
|
247
|
+
channel = grpc.secure_channel(
|
|
248
|
+
self._target,
|
|
249
|
+
composite_creds,
|
|
250
|
+
options=options,
|
|
251
|
+
)
|
|
225
252
|
intercepted_channel = grpc.intercept_channel(
|
|
226
253
|
channel,
|
|
227
254
|
LoggingInterceptor(),
|
|
228
|
-
RetryInterceptor(),
|
|
255
|
+
RetryInterceptor(log_retries),
|
|
229
256
|
)
|
|
230
257
|
return add_instrumentation(
|
|
231
258
|
intercepted_channel,
|
|
@@ -248,7 +275,7 @@ class Client(
|
|
|
248
275
|
OutputDefinitionServiceStub.__init__(self, self._channel)
|
|
249
276
|
StoppingConditionServiceStub.__init__(self, self._channel)
|
|
250
277
|
PhysicsAiServiceStub.__init__(self, self._channel)
|
|
251
|
-
|
|
278
|
+
PhysicsAiInferenceServiceStub.__init__(self, self._channel)
|
|
252
279
|
NamedVariableSetServiceStub.__init__(self, self._channel)
|
|
253
280
|
OnshapeServiceStub.__init__(self, self._channel)
|
|
254
281
|
ProjectUIStateServiceStub.__init__(self, self._channel)
|
|
@@ -32,7 +32,7 @@ class HttpClient:
|
|
|
32
32
|
api_key: str | None = None,
|
|
33
33
|
auth0_client: Auth0Client | None = None,
|
|
34
34
|
*,
|
|
35
|
-
timeout: int =
|
|
35
|
+
timeout: int | None = 300,
|
|
36
36
|
retries: int = 3,
|
|
37
37
|
backoff_factor: float = 0.3,
|
|
38
38
|
retriable_status_codes: tuple = (500, 502, 503, 504, 429),
|
|
@@ -2,19 +2,68 @@
|
|
|
2
2
|
from collections.abc import Callable
|
|
3
3
|
from time import sleep
|
|
4
4
|
from typing import Any
|
|
5
|
+
import logging
|
|
5
6
|
|
|
6
7
|
import grpc
|
|
7
8
|
from grpc import (
|
|
8
9
|
ClientCallDetails,
|
|
9
10
|
UnaryUnaryClientInterceptor,
|
|
10
11
|
)
|
|
12
|
+
from grpc_status.rpc_status import GRPC_DETAILS_METADATA_KEY
|
|
11
13
|
|
|
12
14
|
from luminarycloud.exceptions import AuthenticationError
|
|
15
|
+
from luminarycloud._proto.base import base_pb2
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _is_rate_limited(call: grpc.Call) -> bool:
|
|
19
|
+
"""
|
|
20
|
+
Check if a gRPC call failed due to rate limiting.
|
|
21
|
+
|
|
22
|
+
Rate limit errors are identified with the SUBCODE_RATE_LIMITED subcode and UNAVAILABLE status.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
call: The gRPC call to check
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
True if the error is a rate limit error, False otherwise
|
|
29
|
+
"""
|
|
30
|
+
if call.code() != grpc.StatusCode.UNAVAILABLE:
|
|
31
|
+
return False
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
# Get the trailing metadata which contains error details
|
|
35
|
+
# Metadata is a sequence of tuples (key, value)
|
|
36
|
+
for key, value in call.trailing_metadata() or []:
|
|
37
|
+
if key != GRPC_DETAILS_METADATA_KEY or not isinstance(value, bytes):
|
|
38
|
+
continue
|
|
39
|
+
|
|
40
|
+
status = base_pb2.Status()
|
|
41
|
+
status.ParseFromString(value)
|
|
42
|
+
for any_detail in status.details:
|
|
43
|
+
if any_detail.Is(base_pb2.StatusPayload.DESCRIPTOR):
|
|
44
|
+
payload = base_pb2.StatusPayload()
|
|
45
|
+
any_detail.Unpack(payload)
|
|
46
|
+
if payload.subcode == base_pb2.SUBCODE_RATE_LIMITED:
|
|
47
|
+
return True
|
|
48
|
+
except Exception:
|
|
49
|
+
pass
|
|
50
|
+
return False
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
logger = logging.getLogger(__name__)
|
|
13
54
|
|
|
14
55
|
|
|
15
56
|
class RetryInterceptor(UnaryUnaryClientInterceptor):
|
|
57
|
+
def __init__(self, log_retries: bool = False):
|
|
58
|
+
self.log_retries = log_retries
|
|
59
|
+
super().__init__()
|
|
60
|
+
|
|
16
61
|
"""
|
|
17
|
-
A retry interceptor that retries on
|
|
62
|
+
A retry interceptor that retries on rate limit errors and other retryable errors.
|
|
63
|
+
|
|
64
|
+
This interceptor handles:
|
|
65
|
+
1. Rate limit errors (UNAVAILABLE with SUBCODE_RATE_LIMITED) - always retried
|
|
66
|
+
2. [grpc.StatusCode.RESOURCE_EXHAUSTED, grpc.StatusCode.UNAVAILABLE] - retried
|
|
18
67
|
|
|
19
68
|
This is required because, while the retry policy for the gRPC client is configurable via
|
|
20
69
|
https://github.com/grpc/grpc-proto/blob/master/grpc/service_config/service_config.proto,
|
|
@@ -43,7 +92,10 @@ class RetryInterceptor(UnaryUnaryClientInterceptor):
|
|
|
43
92
|
n_max_retries = 20
|
|
44
93
|
max_retry_seconds = 20
|
|
45
94
|
backoffs = [min(i * 2, max_retry_seconds) for i in range(1, n_max_retries)]
|
|
46
|
-
|
|
95
|
+
backoff_index = 0
|
|
96
|
+
while True:
|
|
97
|
+
if backoff_index >= len(backoffs):
|
|
98
|
+
break
|
|
47
99
|
call = continuation(client_call_details, request)
|
|
48
100
|
if call.code() not in retryable_codes:
|
|
49
101
|
break
|
|
@@ -54,7 +106,17 @@ class RetryInterceptor(UnaryUnaryClientInterceptor):
|
|
|
54
106
|
details = call.details() or ""
|
|
55
107
|
if "InteractiveAuthException" in details:
|
|
56
108
|
break
|
|
109
|
+
backoff = backoffs[backoff_index]
|
|
110
|
+
if self.log_retries:
|
|
111
|
+
logger.info(
|
|
112
|
+
f"Retrying {client_call_details.method} in {backoff} seconds (last response: {call.code()}, {call.details()})"
|
|
113
|
+
)
|
|
57
114
|
sleep(backoff)
|
|
115
|
+
# Keep retrying rate-limited calls while increasing the backoff up to the max.
|
|
116
|
+
backoff_index += 1
|
|
117
|
+
if _is_rate_limited(call):
|
|
118
|
+
backoff_index = max(min(backoff_index, len(backoffs) - 2), 0)
|
|
119
|
+
|
|
58
120
|
try:
|
|
59
121
|
call.result()
|
|
60
122
|
except grpc.RpcError as e:
|
|
@@ -2,6 +2,12 @@
|
|
|
2
2
|
from ._create_geometry import (
|
|
3
3
|
create_geometry as create_geometry,
|
|
4
4
|
)
|
|
5
|
+
from ._inference_jobs import (
|
|
6
|
+
create_inference_job as create_inference_job,
|
|
7
|
+
get_inference_job as get_inference_job,
|
|
8
|
+
list_inference_jobs as list_inference_jobs,
|
|
9
|
+
SurfaceForInference as SurfaceForInference,
|
|
10
|
+
)
|
|
5
11
|
from ._create_simulation import (
|
|
6
12
|
create_simulation as create_simulation,
|
|
7
13
|
)
|
|
@@ -24,6 +30,9 @@ from ._simulation_params_from_json import (
|
|
|
24
30
|
from ._timestamp_to_datetime import (
|
|
25
31
|
timestamp_to_datetime as timestamp_to_datetime,
|
|
26
32
|
)
|
|
33
|
+
from ._parse_iso_datetime import (
|
|
34
|
+
parse_iso_datetime as parse_iso_datetime,
|
|
35
|
+
)
|
|
27
36
|
from .upload import (
|
|
28
37
|
upload_file as upload_file,
|
|
29
38
|
)
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
# Copyright 2025 Luminary Cloud, Inc. All Rights Reserved.
|
|
2
|
+
from typing import Any, Dict, Optional
|
|
3
|
+
from json import dumps as json_dumps
|
|
4
|
+
import os
|
|
5
|
+
import uuid
|
|
6
|
+
from .._client import get_default_client
|
|
7
|
+
from .._proto.api.v0.luminarycloud.physicsaiinference import (
|
|
8
|
+
physicsaiinference_pb2 as physicsaiinferencepb,
|
|
9
|
+
)
|
|
10
|
+
from .upload import upload_file, uploadpb
|
|
11
|
+
from ..types import PhysicsAiModelVersionID, PhysicsAiInferenceJobID
|
|
12
|
+
from ..physics_ai.inference import VisualizationExport, InferenceJob, SurfaceForInference
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Helper function to upload an STL file if it is not a GCS URL
|
|
16
|
+
def _upload_if_file(project_id: str, fname: str) -> str:
|
|
17
|
+
if not fname.split(".")[-1].lower() == "stl":
|
|
18
|
+
raise RuntimeError("Unsupported file for inference")
|
|
19
|
+
if fname.startswith("gs://"):
|
|
20
|
+
return fname
|
|
21
|
+
if os.path.exists(fname) and os.path.isfile(fname):
|
|
22
|
+
params = uploadpb.ResourceParams()
|
|
23
|
+
client = get_default_client()
|
|
24
|
+
result = upload_file(client, project_id, params, fname)
|
|
25
|
+
return result[1].url
|
|
26
|
+
raise RuntimeError("Unsupported file for inference")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _build_inference_request(
|
|
30
|
+
project_id: str,
|
|
31
|
+
geometry: str,
|
|
32
|
+
model_version_id: PhysicsAiModelVersionID,
|
|
33
|
+
conditions: Optional[Dict[str, Any]] = None,
|
|
34
|
+
settings: Optional[Dict[str, Any]] = None,
|
|
35
|
+
surfaces: Optional[list[SurfaceForInference]] = None,
|
|
36
|
+
inference_fields: Optional[list[str]] = None,
|
|
37
|
+
per_surface_visualizations: Optional[list[VisualizationExport]] = None,
|
|
38
|
+
merged_visualizations: Optional[list[VisualizationExport]] = None,
|
|
39
|
+
) -> physicsaiinferencepb.CreateInferenceServiceJobRequest:
|
|
40
|
+
"""Helper function to build an inference service job request.
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
project_id : str
|
|
45
|
+
Reference to a project.
|
|
46
|
+
geometry : str
|
|
47
|
+
Path to STL file or GCS URL (gs://) of the geometry to run inference on.
|
|
48
|
+
If a local file path is provided, it will be uploaded to the project.
|
|
49
|
+
model_version_id : PhysicsAiModelVersionID
|
|
50
|
+
The ID of the trained model version to use for inference.
|
|
51
|
+
conditions : Dict[str, Any], optional
|
|
52
|
+
Dictionary of conditions to be passed to the inference service (e.g., alpha, beta, etc.).
|
|
53
|
+
settings : Dict[str, Any], optional
|
|
54
|
+
Dictionary of settings to be passed to inference service (e.g., stencil_size)
|
|
55
|
+
surfaces : list[SurfaceForInference], optional
|
|
56
|
+
List of surfaces for inference, each with 'name' and 'url' keys.
|
|
57
|
+
inference_fields : list[str], optional
|
|
58
|
+
Specific fields within the trained model to return inference results for.
|
|
59
|
+
per_surface_visualizations : list[VisualizationExport], optional
|
|
60
|
+
Types of visualization to write for each surface (e.g., LUMINARY, VTK).
|
|
61
|
+
merged_visualizations : list[VisualizationExport], optional
|
|
62
|
+
Types of merged visualization to write across all surfaces.
|
|
63
|
+
|
|
64
|
+
Returns
|
|
65
|
+
-------
|
|
66
|
+
CreateInferenceServiceJobRequest
|
|
67
|
+
The constructed protobuf request object.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
geometry_url = _upload_if_file(project_id, geometry)
|
|
71
|
+
|
|
72
|
+
# Embed settings and store as bytes
|
|
73
|
+
settings_bytes = b""
|
|
74
|
+
if settings is not None:
|
|
75
|
+
settings_bytes = json_dumps(settings).encode("utf-8")
|
|
76
|
+
|
|
77
|
+
# Convert parameters dict to bytes if provided
|
|
78
|
+
conditions_bytes = b""
|
|
79
|
+
if conditions is not None:
|
|
80
|
+
conditions_bytes = json_dumps(conditions).encode("utf-8")
|
|
81
|
+
|
|
82
|
+
# Generate a unique request_id for deduplication and to satisfy the database constraint
|
|
83
|
+
# The backend uses request_id as the Name field, which must be non-empty
|
|
84
|
+
request_id = str(uuid.uuid4())
|
|
85
|
+
|
|
86
|
+
# Build request with base parameters
|
|
87
|
+
req_params = {
|
|
88
|
+
"request_id": request_id,
|
|
89
|
+
"geometry": geometry_url,
|
|
90
|
+
"model_version_id": str(model_version_id),
|
|
91
|
+
"conditions": conditions_bytes,
|
|
92
|
+
"settings": settings_bytes,
|
|
93
|
+
"project_id": project_id,
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
# Add optional inference fields
|
|
97
|
+
if inference_fields is not None:
|
|
98
|
+
req_params["inference_fields"] = inference_fields
|
|
99
|
+
|
|
100
|
+
# Add optional per-surface visualizations
|
|
101
|
+
if per_surface_visualizations is not None:
|
|
102
|
+
req_params["per_surface_visualizations"] = per_surface_visualizations
|
|
103
|
+
|
|
104
|
+
# Add optional merged visualizations
|
|
105
|
+
if merged_visualizations is not None:
|
|
106
|
+
req_params["merged_visualizations"] = merged_visualizations
|
|
107
|
+
|
|
108
|
+
# Add optional surfaces
|
|
109
|
+
if surfaces is not None:
|
|
110
|
+
surfaces_proto: list[physicsaiinferencepb.SurfaceForInference] = []
|
|
111
|
+
for surface in surfaces:
|
|
112
|
+
surfaces_proto.append(
|
|
113
|
+
physicsaiinferencepb.SurfaceForInference(
|
|
114
|
+
name=surface["name"], url=_upload_if_file(project_id, surface["url"])
|
|
115
|
+
)
|
|
116
|
+
)
|
|
117
|
+
req_params["surfaces"] = surfaces_proto
|
|
118
|
+
|
|
119
|
+
return physicsaiinferencepb.CreateInferenceServiceJobRequest(**req_params)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def create_inference_job(
|
|
123
|
+
project_id: str,
|
|
124
|
+
geometry: str,
|
|
125
|
+
model_version_id: PhysicsAiModelVersionID,
|
|
126
|
+
synchronous: bool = False,
|
|
127
|
+
conditions: Optional[Dict[str, Any]] = None,
|
|
128
|
+
settings: Optional[Dict[str, Any]] = None,
|
|
129
|
+
surfaces: Optional[list[SurfaceForInference]] = None,
|
|
130
|
+
inference_fields: Optional[list[str]] = None,
|
|
131
|
+
per_surface_visualizations: Optional[list[VisualizationExport]] = None,
|
|
132
|
+
merged_visualizations: Optional[list[VisualizationExport]] = None,
|
|
133
|
+
) -> InferenceJob:
|
|
134
|
+
"""Creates a synchronous inference service job.
|
|
135
|
+
|
|
136
|
+
Parameters
|
|
137
|
+
----------
|
|
138
|
+
project_id : str
|
|
139
|
+
Reference to a project.
|
|
140
|
+
geometry : str
|
|
141
|
+
Path to STL file or GCS URL (gs://) of the geometry to run inference on.
|
|
142
|
+
If a local file path is provided, it will be uploaded to the project.
|
|
143
|
+
model_version_id : PhysicsAiModelVersionID
|
|
144
|
+
The ID of the trained model version to use for inference.
|
|
145
|
+
synchronous: bool = False,
|
|
146
|
+
Whether to wait for the job to complete before returning the result.
|
|
147
|
+
conditions : Dict[str, Any], optional
|
|
148
|
+
Dictionary of conditions to be passed to the inference service (e.g., alpha, beta, etc.).
|
|
149
|
+
settings : Dict[str, Any], optional
|
|
150
|
+
Dictionary of settings to be passed to inference service (e.g., stencil_size)
|
|
151
|
+
surfaces : list[SurfaceForInference], optional
|
|
152
|
+
List of surfaces for inference, each with 'name' and 'url' keys.
|
|
153
|
+
inference_fields : list[str], optional
|
|
154
|
+
Specific fields within the trained model to return inference results for.
|
|
155
|
+
per_surface_visualizations : list[VisualizationOutput], optional
|
|
156
|
+
Types of visualization to write for each surface (e.g., LUMINARY, VTK).
|
|
157
|
+
merged_visualizations : list[VisualizationOutput], optional
|
|
158
|
+
Types of merged visualization to write across all surfaces.
|
|
159
|
+
|
|
160
|
+
Returns
|
|
161
|
+
-------
|
|
162
|
+
dict[str, Any]
|
|
163
|
+
Response from the server containing results, with keys mapping to:
|
|
164
|
+
- Numeric results: float/vector values
|
|
165
|
+
- Surface/volume results: URLs to data files
|
|
166
|
+
- Visualization results: URLs to visualization files
|
|
167
|
+
|
|
168
|
+
warning:: This feature is experimental and may change or be removed without notice.
|
|
169
|
+
"""
|
|
170
|
+
req = _build_inference_request(
|
|
171
|
+
project_id,
|
|
172
|
+
geometry,
|
|
173
|
+
model_version_id,
|
|
174
|
+
conditions,
|
|
175
|
+
settings,
|
|
176
|
+
surfaces,
|
|
177
|
+
inference_fields,
|
|
178
|
+
per_surface_visualizations,
|
|
179
|
+
merged_visualizations,
|
|
180
|
+
)
|
|
181
|
+
if synchronous:
|
|
182
|
+
res: physicsaiinferencepb.GetInferenceServiceJobResponse = (
|
|
183
|
+
get_default_client().CreateInferenceServiceJob(req)
|
|
184
|
+
)
|
|
185
|
+
else: # Asynchronous inference
|
|
186
|
+
res: physicsaiinferencepb.CreateInferenceServiceJobAsyncResponse = (
|
|
187
|
+
get_default_client().CreateInferenceServiceJobAsync(req)
|
|
188
|
+
)
|
|
189
|
+
return InferenceJob(res.job)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def get_inference_job(job_id: str) -> InferenceJob:
|
|
193
|
+
"""Retrieves an inference service job by its ID.
|
|
194
|
+
|
|
195
|
+
Parameters
|
|
196
|
+
----------
|
|
197
|
+
job_id : str
|
|
198
|
+
The ID of the inference job to retrieve.
|
|
199
|
+
|
|
200
|
+
Returns
|
|
201
|
+
-------
|
|
202
|
+
dict[str, Any]
|
|
203
|
+
The inference job details including results and status.
|
|
204
|
+
|
|
205
|
+
warning:: This feature is experimental and may change or be removed without notice.
|
|
206
|
+
"""
|
|
207
|
+
|
|
208
|
+
req = physicsaiinferencepb.GetInferenceServiceJobRequest(job_id=job_id)
|
|
209
|
+
res: physicsaiinferencepb.GetInferenceServiceJobResponse = (
|
|
210
|
+
get_default_client().GetInferenceServiceJob(req)
|
|
211
|
+
)
|
|
212
|
+
return InferenceJob(res.job)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def list_inference_jobs(project_id: str) -> list[InferenceJob]:
|
|
216
|
+
"""Lists all inference service jobs for a project.
|
|
217
|
+
|
|
218
|
+
Parameters
|
|
219
|
+
----------
|
|
220
|
+
project_id : str
|
|
221
|
+
The project to list inference jobs for.
|
|
222
|
+
"""
|
|
223
|
+
req = physicsaiinferencepb.ListInferenceServiceJobsRequest(project_id=project_id)
|
|
224
|
+
res: physicsaiinferencepb.ListInferenceServiceJobsResponse = (
|
|
225
|
+
get_default_client().ListInferenceServiceJobs(req)
|
|
226
|
+
)
|
|
227
|
+
return [InferenceJob(job) for job in res.jobs]
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# Copyright 2023-2025 Luminary Cloud, Inc. All Rights Reserved.
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def parse_iso_datetime(iso_str: str) -> datetime:
|
|
6
|
+
"""
|
|
7
|
+
Parse an ISO format datetime string, handling 'Z' timezone indicator.
|
|
8
|
+
|
|
9
|
+
This function acts as a compatibility shim for Python < 3.11, which doesn't
|
|
10
|
+
support 'Z' in fromisoformat. It normalizes 'Z' (or 'z') to '+00:00' before parsing,
|
|
11
|
+
making it compatible with all Python versions (3.7+).
|
|
12
|
+
|
|
13
|
+
Parameters
|
|
14
|
+
----------
|
|
15
|
+
iso_str : str
|
|
16
|
+
ISO format datetime string, optionally ending with 'Z' or 'z' for UTC.
|
|
17
|
+
|
|
18
|
+
Returns
|
|
19
|
+
-------
|
|
20
|
+
datetime
|
|
21
|
+
Parsed datetime object.
|
|
22
|
+
|
|
23
|
+
Raises
|
|
24
|
+
------
|
|
25
|
+
ValueError
|
|
26
|
+
If the string is not a valid ISO format datetime string.
|
|
27
|
+
TypeError
|
|
28
|
+
If iso_str is not a string.
|
|
29
|
+
|
|
30
|
+
Examples
|
|
31
|
+
--------
|
|
32
|
+
>>> parse_iso_datetime("2023-07-31T13:54:12Z")
|
|
33
|
+
datetime.datetime(2023, 7, 31, 13, 54, 12, tzinfo=datetime.timezone.utc)
|
|
34
|
+
>>> parse_iso_datetime("2023-07-31T13:54:12+00:00")
|
|
35
|
+
datetime.datetime(2023, 7, 31, 13, 54, 12, tzinfo=datetime.timezone.utc)
|
|
36
|
+
"""
|
|
37
|
+
if not isinstance(iso_str, str):
|
|
38
|
+
raise TypeError(f"parse_iso_datetime expects a string, got {type(iso_str).__name__}")
|
|
39
|
+
|
|
40
|
+
if not iso_str:
|
|
41
|
+
raise ValueError("parse_iso_datetime: empty string is not a valid ISO format datetime")
|
|
42
|
+
|
|
43
|
+
# Normalize 'Z' or 'z' at the end to '+00:00'
|
|
44
|
+
# Strip whitespace first to handle cases like "2023-01-01T00:00:00Z "
|
|
45
|
+
iso_str = iso_str.strip()
|
|
46
|
+
|
|
47
|
+
if iso_str.endswith(("Z", "z")):
|
|
48
|
+
# Only normalize if the string is longer than just 'Z'/'z'
|
|
49
|
+
if len(iso_str) > 1:
|
|
50
|
+
iso_str = iso_str[:-1] + "+00:00"
|
|
51
|
+
else:
|
|
52
|
+
raise ValueError("parse_iso_datetime: 'Z' alone is not a valid ISO format datetime")
|
|
53
|
+
|
|
54
|
+
return datetime.fromisoformat(iso_str)
|
|
@@ -17,6 +17,7 @@ from .._proto.api.v0.luminarycloud.solution.solution_pb2 import (
|
|
|
17
17
|
)
|
|
18
18
|
from .._proto.api.v0.luminarycloud.physics_ai.physics_ai_pb2 import (
|
|
19
19
|
GetSolutionDataPhysicsAIRequest,
|
|
20
|
+
SurfaceGroup,
|
|
20
21
|
)
|
|
21
22
|
from .._client import Client
|
|
22
23
|
from ..enum.quantity_type import QuantityType
|
|
@@ -140,6 +141,7 @@ def download_solution_physics_ai(
|
|
|
140
141
|
process_volume: bool = False,
|
|
141
142
|
single_precision: bool = False,
|
|
142
143
|
internal_options: Optional[Dict[str, str]] = None,
|
|
144
|
+
export_surface_groups: Optional[Dict[str, List[str]]] = None,
|
|
143
145
|
) -> Optional[FileChunkStream]:
|
|
144
146
|
"""
|
|
145
147
|
Returns the download as a file-like object, or None if destination_url is provided.
|
|
@@ -165,6 +167,9 @@ def download_solution_physics_ai(
|
|
|
165
167
|
Whether to process volume meshes during physics AI processing.
|
|
166
168
|
single_precision: bool
|
|
167
169
|
If True, the solution will be downloaded in single precision.
|
|
170
|
+
export_surface_groups: Optional[Dict[str, List[str]]]
|
|
171
|
+
Dictionary mapping group names to lists of surface names.
|
|
172
|
+
Each group will be exported as an individual STL file.
|
|
168
173
|
|
|
169
174
|
Examples
|
|
170
175
|
--------
|
|
@@ -173,6 +178,11 @@ def download_solution_physics_ai(
|
|
|
173
178
|
... fp.write(dl.read())
|
|
174
179
|
"""
|
|
175
180
|
|
|
181
|
+
surface_groups_pb = []
|
|
182
|
+
if export_surface_groups:
|
|
183
|
+
for group_name, surfaces in export_surface_groups.items():
|
|
184
|
+
surface_groups_pb.append(SurfaceGroup(name=group_name, surfaces=surfaces))
|
|
185
|
+
|
|
176
186
|
request = GetSolutionDataPhysicsAIRequest(
|
|
177
187
|
solution_id=solution_id,
|
|
178
188
|
exclude_surfaces=exclude_surfaces or [],
|
|
@@ -186,6 +196,7 @@ def download_solution_physics_ai(
|
|
|
186
196
|
process_volume=process_volume,
|
|
187
197
|
single_precision=single_precision,
|
|
188
198
|
internal_options=internal_options or {},
|
|
199
|
+
export_surface_groups=surface_groups_pb,
|
|
189
200
|
)
|
|
190
201
|
response = client.GetSolutionDataPhysicsAI(request)
|
|
191
202
|
|