fxn 0.0.53__tar.gz → 0.0.55__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fxn-0.0.53 → fxn-0.0.55}/PKG-INFO +1 -1
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/__init__.py +5 -4
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/cli/__init__.py +2 -1
- fxn-0.0.55/fxn/beta/cli/mcp.py +16 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/metadata.py +106 -18
- fxn-0.0.55/fxn/cli/__init__.py +99 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/cli/auth.py +5 -5
- {fxn-0.0.53 → fxn-0.0.55}/fxn/cli/compile.py +50 -18
- {fxn-0.0.53 → fxn-0.0.55}/fxn/cli/misc.py +4 -4
- {fxn-0.0.53 → fxn-0.0.55}/fxn/cli/predictions.py +4 -13
- {fxn-0.0.53 → fxn-0.0.55}/fxn/cli/predictors.py +3 -3
- {fxn-0.0.53 → fxn-0.0.55}/fxn/cli/sources.py +3 -3
- {fxn-0.0.53 → fxn-0.0.55}/fxn/compile.py +7 -6
- fxn-0.0.55/fxn/lib/linux/arm64/libFunction.so +0 -0
- fxn-0.0.55/fxn/lib/linux/x86_64/libFunction.so +0 -0
- fxn-0.0.55/fxn/lib/macos/arm64/Function.dylib +0 -0
- fxn-0.0.55/fxn/lib/macos/x86_64/Function.dylib +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/lib/windows/arm64/Function.dll +0 -0
- fxn-0.0.55/fxn/lib/windows/x86_64/Function.dll +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/sandbox.py +15 -2
- {fxn-0.0.53 → fxn-0.0.55}/fxn/services/prediction.py +16 -1
- {fxn-0.0.53 → fxn-0.0.55}/fxn/types/__init__.py +1 -1
- {fxn-0.0.53 → fxn-0.0.55}/fxn/types/predictor.py +5 -18
- {fxn-0.0.53 → fxn-0.0.55}/fxn/version.py +1 -1
- {fxn-0.0.53 → fxn-0.0.55}/fxn.egg-info/PKG-INFO +1 -1
- {fxn-0.0.53 → fxn-0.0.55}/fxn.egg-info/SOURCES.txt +1 -0
- fxn-0.0.53/fxn/cli/__init__.py +0 -53
- fxn-0.0.53/fxn/lib/linux/arm64/libFunction.so +0 -0
- fxn-0.0.53/fxn/lib/linux/x86_64/libFunction.so +0 -0
- fxn-0.0.53/fxn/lib/macos/arm64/Function.dylib +0 -0
- fxn-0.0.53/fxn/lib/macos/x86_64/Function.dylib +0 -0
- fxn-0.0.53/fxn/lib/windows/x86_64/Function.dll +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/LICENSE +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/README.md +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/__init__.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/cli/llm.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/client.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/llm/__init__.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/llm/server.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/services/__init__.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/services/prediction.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/beta/services/remote.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/c/__init__.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/c/configuration.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/c/fxnc.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/c/map.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/c/prediction.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/c/predictor.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/c/stream.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/c/value.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/client.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/function.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/lib/__init__.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/logging.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/services/__init__.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/services/predictor.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/services/user.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/types/dtype.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/types/prediction.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn/types/user.py +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn.egg-info/dependency_links.txt +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn.egg-info/entry_points.txt +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn.egg-info/requires.txt +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/fxn.egg-info/top_level.txt +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/pyproject.toml +0 -0
- {fxn-0.0.53 → fxn-0.0.55}/setup.cfg +0 -0
@@ -6,8 +6,9 @@
|
|
6
6
|
from .metadata import (
|
7
7
|
CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
|
8
8
|
OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
|
9
|
-
QnnInferenceMetadata, QnnInferenceBackend, QnnInferenceQuantization,
|
10
|
-
# Deprecated
|
11
|
-
ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
|
9
|
+
QnnInferenceMetadata, QnnInferenceBackend, QnnInferenceQuantization, TensorRTInferenceMetadata
|
12
10
|
)
|
13
|
-
from .services import RemoteAcceleration
|
11
|
+
from .services import RemoteAcceleration
|
12
|
+
|
13
|
+
# DEPRECATED
|
14
|
+
from .metadata import ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
|
@@ -0,0 +1,16 @@
|
|
1
|
+
#
|
2
|
+
# Function
|
3
|
+
# Copyright © 2025 NatML Inc. All Rights Reserved.
|
4
|
+
#
|
5
|
+
|
6
|
+
from pathlib import Path
|
7
|
+
from typer import Argument, Option, Typer
|
8
|
+
from typing_extensions import Annotated
|
9
|
+
|
10
|
+
app = Typer(no_args_is_help=True)
|
11
|
+
|
12
|
+
@app.command(name="serve", help="Start an MCP server.")
|
13
|
+
def serve (
|
14
|
+
port: Annotated[int, Option(help="Port to start the server on.")] = 11436
|
15
|
+
):
|
16
|
+
pass
|
@@ -3,28 +3,46 @@
|
|
3
3
|
# Copyright © 2025 NatML Inc. All Rights Reserved.
|
4
4
|
#
|
5
5
|
|
6
|
-
from os import PathLike
|
7
6
|
from pathlib import Path
|
8
7
|
from pydantic import BaseModel, BeforeValidator, ConfigDict, Field
|
9
8
|
from typing import Annotated, Literal
|
10
9
|
|
11
10
|
def _validate_torch_module (module: "torch.nn.Module") -> "torch.nn.Module": # type: ignore
|
12
11
|
try:
|
13
|
-
from torch.nn import Module
|
12
|
+
from torch.nn import Module
|
14
13
|
if not isinstance(module, Module):
|
15
|
-
raise ValueError(f"Expected torch.nn.Module
|
14
|
+
raise ValueError(f"Expected `torch.nn.Module` model but got `{type(module).__qualname__}`")
|
16
15
|
return module
|
17
16
|
except ImportError:
|
18
|
-
raise ImportError("PyTorch is required to create this metadata but is not installed.")
|
17
|
+
raise ImportError("PyTorch is required to create this metadata but it is not installed.")
|
19
18
|
|
20
19
|
def _validate_ort_inference_session (session: "onnxruntime.InferenceSession") -> "onnxruntime.InferenceSession": # type: ignore
|
21
20
|
try:
|
22
|
-
from onnxruntime import InferenceSession
|
21
|
+
from onnxruntime import InferenceSession
|
23
22
|
if not isinstance(session, InferenceSession):
|
24
|
-
raise ValueError(f"Expected onnxruntime.InferenceSession
|
23
|
+
raise ValueError(f"Expected `onnxruntime.InferenceSession` model but got `{type(session).__qualname__}`")
|
25
24
|
return session
|
26
25
|
except ImportError:
|
27
|
-
raise ImportError("ONNXRuntime is required to create this metadata but is not installed.")
|
26
|
+
raise ImportError("ONNXRuntime is required to create this metadata but it is not installed.")
|
27
|
+
|
28
|
+
def _validate_torch_tensor_args (args: list) -> list:
|
29
|
+
try:
|
30
|
+
from torch import Tensor
|
31
|
+
for idx, arg in enumerate(args):
|
32
|
+
if not isinstance(arg, Tensor):
|
33
|
+
raise ValueError(f"Expected `torch.Tensor` instance at `model_args[{idx}]` but got `{type(arg).__qualname__}`")
|
34
|
+
return args
|
35
|
+
except ImportError:
|
36
|
+
raise ImportError("PyTorch is required to create this metadata but it is not installed.")
|
37
|
+
|
38
|
+
def _validate_llama_cpp_model (model: "llama_cpp.llama.Llama") -> "llama_cpp.llama.Llama": # type: ignore
|
39
|
+
try:
|
40
|
+
from llama_cpp import Llama
|
41
|
+
if not isinstance(model, Llama):
|
42
|
+
raise ValueError(f"Expected `llama_cpp.llama.Llama` model but got `{type(model).__qualname__}`")
|
43
|
+
return model
|
44
|
+
except ImportError:
|
45
|
+
raise ImportError("Llama-cpp-python is required to create this metadata but it is not installed.")
|
28
46
|
|
29
47
|
class CoreMLInferenceMetadata (BaseModel):
|
30
48
|
"""
|
@@ -39,10 +57,15 @@ class CoreMLInferenceMetadata (BaseModel):
|
|
39
57
|
description="PyTorch module to apply metadata to.",
|
40
58
|
exclude=True
|
41
59
|
)
|
42
|
-
model_args: list[object] = Field(
|
60
|
+
model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
|
43
61
|
description="Positional inputs to the model.",
|
44
62
|
exclude=True
|
45
63
|
)
|
64
|
+
output_keys: list[str] | None = Field(
|
65
|
+
default=None,
|
66
|
+
description="Model output dictionary keys. Use this if the model returns a dictionary.",
|
67
|
+
exclude=True
|
68
|
+
)
|
46
69
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
47
70
|
|
48
71
|
class OnnxInferenceMetadata (BaseModel):
|
@@ -58,10 +81,15 @@ class OnnxInferenceMetadata (BaseModel):
|
|
58
81
|
description="PyTorch module to apply metadata to.",
|
59
82
|
exclude=True
|
60
83
|
)
|
61
|
-
model_args: list[object] = Field(
|
84
|
+
model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
|
62
85
|
description="Positional inputs to the model.",
|
63
86
|
exclude=True
|
64
87
|
)
|
88
|
+
output_keys: list[str] | None = Field(
|
89
|
+
default=None,
|
90
|
+
description="Model output dictionary keys. Use this if the model returns a dictionary.",
|
91
|
+
exclude=True
|
92
|
+
)
|
65
93
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
66
94
|
|
67
95
|
class OnnxRuntimeInferenceSessionMetadata (BaseModel):
|
@@ -96,10 +124,15 @@ class LiteRTInferenceMetadata (BaseModel):
|
|
96
124
|
description="PyTorch module to apply metadata to.",
|
97
125
|
exclude=True
|
98
126
|
)
|
99
|
-
model_args: list[object] = Field(
|
127
|
+
model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
|
100
128
|
description="Positional inputs to the model.",
|
101
129
|
exclude=True
|
102
130
|
)
|
131
|
+
output_keys: list[str] | None = Field(
|
132
|
+
default=None,
|
133
|
+
description="Model output dictionary keys. Use this if the model returns a dictionary.",
|
134
|
+
exclude=True
|
135
|
+
)
|
103
136
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
104
137
|
|
105
138
|
class OpenVINOInferenceMetadata (BaseModel):
|
@@ -115,13 +148,18 @@ class OpenVINOInferenceMetadata (BaseModel):
|
|
115
148
|
description="PyTorch module to apply metadata to.",
|
116
149
|
exclude=True
|
117
150
|
)
|
118
|
-
model_args: list[object] = Field(
|
151
|
+
model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
|
119
152
|
description="Positional inputs to the model.",
|
120
153
|
exclude=True
|
121
154
|
)
|
155
|
+
output_keys: list[str] | None = Field(
|
156
|
+
default=None,
|
157
|
+
description="Model output dictionary keys. Use this if the model returns a dictionary.",
|
158
|
+
exclude=True
|
159
|
+
)
|
122
160
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
123
161
|
|
124
|
-
QnnInferenceBackend = Literal["cpu", "gpu"
|
162
|
+
QnnInferenceBackend = Literal["cpu", "gpu", "htp"]
|
125
163
|
QnnInferenceQuantization = Literal["w8a8", "w8a16", "w4a8", "w4a16"]
|
126
164
|
|
127
165
|
class QnnInferenceMetadata (BaseModel):
|
@@ -139,10 +177,15 @@ class QnnInferenceMetadata (BaseModel):
|
|
139
177
|
description="PyTorch module to apply metadata to.",
|
140
178
|
exclude=True
|
141
179
|
)
|
142
|
-
model_args: list[object] = Field(
|
180
|
+
model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
|
143
181
|
description="Positional inputs to the model.",
|
144
182
|
exclude=True
|
145
183
|
)
|
184
|
+
output_keys: list[str] | None = Field(
|
185
|
+
default=None,
|
186
|
+
description="Model output dictionary keys. Use this if the model returns a dictionary.",
|
187
|
+
exclude=True
|
188
|
+
)
|
146
189
|
backend: QnnInferenceBackend = Field(
|
147
190
|
default="cpu",
|
148
191
|
description="QNN backend to execute the model.",
|
@@ -155,13 +198,58 @@ class QnnInferenceMetadata (BaseModel):
|
|
155
198
|
)
|
156
199
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
157
200
|
|
158
|
-
|
201
|
+
CudaArchitecture = Literal[
|
202
|
+
"sm_80", "sm_86", "sm_87", # Ampere (A100)
|
203
|
+
"sm_89", # Ada Lovelace (L40)
|
204
|
+
"sm_90", # Hopper (H100)
|
205
|
+
"sm_100", # Blackwell (B200)
|
206
|
+
]
|
207
|
+
|
208
|
+
TensorRTPrecision = Literal["fp32", "fp16", "int8", "int4"]
|
209
|
+
|
210
|
+
class TensorRTInferenceMetadata (BaseModel):
|
211
|
+
"""
|
212
|
+
Metadata required to lower a PyTorch model for inference on Nvidia GPUs with TensorRT.
|
213
|
+
|
214
|
+
Members:
|
215
|
+
model (torch.nn.Module): PyTorch module to apply metadata to.
|
216
|
+
model_args (tuple[Tensor,...]): Positional inputs to the model.
|
217
|
+
cuda_arch (CudaArchitecture): Target CUDA architecture for the TensorRT engine. Defaults to `sm_80` (Ampere).
|
218
|
+
precision (TensorRTPrecision): TensorRT engine inference precision. Defaults to `fp16`.
|
219
|
+
"""
|
220
|
+
kind: Literal["meta.inference.tensorrt"] = "meta.inference.tensorrt"
|
221
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
222
|
+
description="PyTorch module to apply metadata to.",
|
223
|
+
exclude=True
|
224
|
+
)
|
225
|
+
model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
|
226
|
+
description="Positional inputs to the model.",
|
227
|
+
exclude=True
|
228
|
+
)
|
229
|
+
output_keys: list[str] | None = Field(
|
230
|
+
default=None,
|
231
|
+
description="Model output dictionary keys. Use this if the model returns a dictionary.",
|
232
|
+
exclude=True
|
233
|
+
)
|
234
|
+
cuda_arch: CudaArchitecture = Field(
|
235
|
+
default="sm_80",
|
236
|
+
description="Target CUDA architecture for the TensorRT engine.",
|
237
|
+
exclude=True
|
238
|
+
)
|
239
|
+
precision: TensorRTPrecision = Field(
|
240
|
+
default="fp16",
|
241
|
+
description="TensorRT engine inference precision.",
|
242
|
+
exclude=True
|
243
|
+
)
|
244
|
+
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
245
|
+
|
246
|
+
class LlamaCppInferenceMetadata (BaseModel):
|
159
247
|
"""
|
160
|
-
Metadata required to lower a
|
248
|
+
Metadata required to lower a Llama.cpp model for LLM inference.
|
161
249
|
"""
|
162
|
-
kind: Literal["meta.inference.
|
163
|
-
|
164
|
-
description="
|
250
|
+
kind: Literal["meta.inference.llama_cpp"] = "meta.inference.llama_cpp"
|
251
|
+
model: Annotated[object, BeforeValidator(_validate_llama_cpp_model)] = Field(
|
252
|
+
description="Llama model that metadata applies to.",
|
165
253
|
exclude=True
|
166
254
|
)
|
167
255
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
@@ -0,0 +1,99 @@
|
|
1
|
+
#
|
2
|
+
# Function
|
3
|
+
# Copyright © 2025 NatML Inc. All Rights Reserved.
|
4
|
+
#
|
5
|
+
|
6
|
+
import typer
|
7
|
+
|
8
|
+
from ..logging import TracebackMarkupConsole
|
9
|
+
from ..version import __version__
|
10
|
+
|
11
|
+
from .auth import app as auth_app
|
12
|
+
from .compile import compile_predictor, triage_predictor
|
13
|
+
from .misc import cli_options
|
14
|
+
from .predictions import create_prediction
|
15
|
+
from .predictors import archive_predictor, delete_predictor, retrieve_predictor
|
16
|
+
from .sources import retrieve_source
|
17
|
+
from ..beta.cli import llm_app, mcp_app
|
18
|
+
|
19
|
+
# Define CLI
|
20
|
+
typer.main.console_stderr = TracebackMarkupConsole()
|
21
|
+
app = typer.Typer(
|
22
|
+
name=f"Function CLI {__version__}",
|
23
|
+
no_args_is_help=True,
|
24
|
+
pretty_exceptions_show_locals=False,
|
25
|
+
pretty_exceptions_short=True,
|
26
|
+
add_completion=False
|
27
|
+
)
|
28
|
+
|
29
|
+
# Add top level options
|
30
|
+
app.callback()(cli_options)
|
31
|
+
|
32
|
+
# Predictions
|
33
|
+
app.command(
|
34
|
+
name="predict",
|
35
|
+
help="Make a prediction.",
|
36
|
+
context_settings={ "allow_extra_args": True, "ignore_unknown_options": True },
|
37
|
+
rich_help_panel="Predictions"
|
38
|
+
)(create_prediction)
|
39
|
+
app.command(
|
40
|
+
name="source",
|
41
|
+
help="Retrieve the generated C++ code for a given prediction.",
|
42
|
+
rich_help_panel="Predictions"
|
43
|
+
)(retrieve_source)
|
44
|
+
|
45
|
+
# Predictors
|
46
|
+
app.command(
|
47
|
+
name="compile",
|
48
|
+
help="Create a predictor by compiling a Python function.",
|
49
|
+
rich_help_panel="Predictors"
|
50
|
+
)(compile_predictor)
|
51
|
+
app.command(
|
52
|
+
name="retrieve",
|
53
|
+
help="Retrieve a predictor.",
|
54
|
+
rich_help_panel="Predictors"
|
55
|
+
)(retrieve_predictor)
|
56
|
+
app.command(
|
57
|
+
name="archive",
|
58
|
+
help="Archive a predictor." ,
|
59
|
+
rich_help_panel="Predictors"
|
60
|
+
)(archive_predictor)
|
61
|
+
app.command(
|
62
|
+
name="delete",
|
63
|
+
help="Delete a predictor.",
|
64
|
+
rich_help_panel="Predictors"
|
65
|
+
)(delete_predictor)
|
66
|
+
|
67
|
+
# Subcommands
|
68
|
+
app.add_typer(
|
69
|
+
auth_app,
|
70
|
+
name="auth",
|
71
|
+
help="Login, logout, and check your authentication status.",
|
72
|
+
rich_help_panel="Auth"
|
73
|
+
)
|
74
|
+
app.add_typer(
|
75
|
+
llm_app,
|
76
|
+
name="llm",
|
77
|
+
hidden=True,
|
78
|
+
help="Work with large language models (LLMs).",
|
79
|
+
rich_help_panel="Beta"
|
80
|
+
)
|
81
|
+
app.add_typer(
|
82
|
+
mcp_app,
|
83
|
+
name="mcp",
|
84
|
+
hidden=True,
|
85
|
+
help="Provide prediction functions as tools for use by AI assistants.",
|
86
|
+
rich_help_panel="Beta"
|
87
|
+
)
|
88
|
+
|
89
|
+
# Insiders
|
90
|
+
app.command(
|
91
|
+
name="triage",
|
92
|
+
help="Triage a compile error.",
|
93
|
+
rich_help_panel="Insiders",
|
94
|
+
hidden=True
|
95
|
+
)(triage_predictor)
|
96
|
+
|
97
|
+
# Run
|
98
|
+
if __name__ == "__main__":
|
99
|
+
app()
|
@@ -12,7 +12,7 @@ from ..function import Function
|
|
12
12
|
app = Typer(no_args_is_help=True)
|
13
13
|
|
14
14
|
@app.command(name="login", help="Login to Function.")
|
15
|
-
def login
|
15
|
+
def login(
|
16
16
|
access_key: str=Argument(..., help="Function access key.", envvar="FXN_ACCESS_KEY")
|
17
17
|
):
|
18
18
|
fxn = Function(access_key=access_key)
|
@@ -22,18 +22,18 @@ def login (
|
|
22
22
|
print_json(data=user)
|
23
23
|
|
24
24
|
@app.command(name="status", help="Get current authentication status.")
|
25
|
-
def auth_status
|
25
|
+
def auth_status():
|
26
26
|
fxn = Function(get_access_key())
|
27
27
|
user = fxn.users.retrieve()
|
28
28
|
user = user.model_dump() if user else None
|
29
29
|
print_json(data=user)
|
30
30
|
|
31
31
|
@app.command(name="logout", help="Logout from Function.")
|
32
|
-
def logout
|
32
|
+
def logout():
|
33
33
|
_set_access_key(None)
|
34
34
|
print("Successfully logged out of Function")
|
35
35
|
|
36
|
-
def get_access_key
|
36
|
+
def get_access_key() -> str:
|
37
37
|
"""
|
38
38
|
Get the CLI access key.
|
39
39
|
|
@@ -46,7 +46,7 @@ def get_access_key () -> str:
|
|
46
46
|
with open(credentials_path) as f:
|
47
47
|
return f.read()
|
48
48
|
|
49
|
-
def _set_access_key
|
49
|
+
def _set_access_key(key: str):
|
50
50
|
"""
|
51
51
|
Set the CLI access key.
|
52
52
|
|
@@ -9,9 +9,11 @@ from inspect import getmembers, getmodulename, isfunction
|
|
9
9
|
from pathlib import Path
|
10
10
|
from pydantic import BaseModel
|
11
11
|
from rich import print as print_rich
|
12
|
+
from rich.panel import Panel
|
12
13
|
import sys
|
13
14
|
from typer import Argument, Option
|
14
15
|
from typing import Callable, Literal
|
16
|
+
from typing_extensions import Annotated
|
15
17
|
from urllib.parse import urlparse, urlunparse
|
16
18
|
|
17
19
|
from ..client import FunctionAPIError
|
@@ -21,16 +23,39 @@ from ..sandbox import EntrypointCommand
|
|
21
23
|
from ..logging import CustomProgress, CustomProgressTask
|
22
24
|
from .auth import get_access_key
|
23
25
|
|
24
|
-
|
25
|
-
pass
|
26
|
-
|
27
|
-
def compile_predictor (
|
26
|
+
def compile_predictor(
|
28
27
|
path: str=Argument(..., help="Predictor path."),
|
29
28
|
overwrite: bool=Option(False, "--overwrite", help="Whether to delete any existing predictor with the same tag before compiling."),
|
30
29
|
):
|
31
30
|
run_async(_compile_predictor_async(path, overwrite=overwrite))
|
32
31
|
|
33
|
-
|
32
|
+
def triage_predictor(
|
33
|
+
reference_code: Annotated[str, Argument(help="Predictor compilation reference code.")]
|
34
|
+
):
|
35
|
+
fxn = Function(get_access_key())
|
36
|
+
error = fxn.client.request(
|
37
|
+
method="GET",
|
38
|
+
path=f"/predictors/triage?referenceCode={reference_code}",
|
39
|
+
response_type=_TriagedCompileError
|
40
|
+
)
|
41
|
+
user_panel = Panel(
|
42
|
+
error.user,
|
43
|
+
title="User Error",
|
44
|
+
title_align="left",
|
45
|
+
highlight=True,
|
46
|
+
border_style="bright_red"
|
47
|
+
)
|
48
|
+
internal_panel = Panel(
|
49
|
+
error.internal,
|
50
|
+
title="Internal Error",
|
51
|
+
title_align="left",
|
52
|
+
highlight=True,
|
53
|
+
border_style="gold1"
|
54
|
+
)
|
55
|
+
print_rich(user_panel)
|
56
|
+
print_rich(internal_panel)
|
57
|
+
|
58
|
+
async def _compile_predictor_async(
|
34
59
|
path: str,
|
35
60
|
*,
|
36
61
|
overwrite: bool
|
@@ -78,11 +103,11 @@ async def _compile_predictor_async (
|
|
78
103
|
task_queue.push_log(event)
|
79
104
|
elif isinstance(event, _ErrorEvent):
|
80
105
|
task_queue.push_error(event)
|
81
|
-
raise
|
106
|
+
raise _CompileError(event.data.error)
|
82
107
|
predictor_url = _compute_predictor_url(fxn.client.api_url, spec.tag)
|
83
108
|
print_rich(f"\n[bold spring_green3]🎉 Predictor is now being compiled.[/bold spring_green3] Check it out at [link={predictor_url}]{predictor_url}[/link]")
|
84
109
|
|
85
|
-
def _load_predictor_func
|
110
|
+
def _load_predictor_func(path: str) -> Callable[...,object]:
|
86
111
|
if "" not in sys.path:
|
87
112
|
sys.path.insert(0, "")
|
88
113
|
path: Path = Path(path).resolve()
|
@@ -97,7 +122,7 @@ def _load_predictor_func (path: str) -> Callable[...,object]:
|
|
97
122
|
main_func = next(func for _, func in getmembers(module, isfunction) if hasattr(func, "__predictor_spec"))
|
98
123
|
return main_func
|
99
124
|
|
100
|
-
def _compute_predictor_url
|
125
|
+
def _compute_predictor_url(api_url: str, tag: str) -> str:
|
101
126
|
parsed_url = urlparse(api_url)
|
102
127
|
hostname_parts = parsed_url.hostname.split(".")
|
103
128
|
if hostname_parts[0] == "api":
|
@@ -107,32 +132,39 @@ def _compute_predictor_url (api_url: str, tag: str) -> str:
|
|
107
132
|
predictor_url = urlunparse(parsed_url._replace(netloc=netloc, path=f"{tag}"))
|
108
133
|
return predictor_url
|
109
134
|
|
110
|
-
class _Predictor
|
135
|
+
class _Predictor(BaseModel):
|
111
136
|
tag: str
|
112
137
|
|
113
|
-
class _LogData
|
138
|
+
class _LogData(BaseModel):
|
114
139
|
message: str
|
115
140
|
level: int = 0
|
116
141
|
status: Literal["success", "error"] = "success"
|
117
142
|
update: bool = False
|
118
143
|
|
119
|
-
class _LogEvent
|
144
|
+
class _LogEvent(BaseModel):
|
120
145
|
event: Literal["log"]
|
121
146
|
data: _LogData
|
122
147
|
|
123
|
-
class _ErrorData
|
148
|
+
class _ErrorData(BaseModel):
|
124
149
|
error: str
|
125
150
|
|
126
|
-
class _ErrorEvent
|
151
|
+
class _ErrorEvent(BaseModel):
|
127
152
|
event: Literal["error"]
|
128
153
|
data: _ErrorData
|
129
154
|
|
155
|
+
class _CompileError(Exception):
|
156
|
+
pass
|
157
|
+
|
158
|
+
class _TriagedCompileError(BaseModel):
|
159
|
+
user: str
|
160
|
+
internal: str
|
161
|
+
|
130
162
|
class ProgressLogQueue:
|
131
163
|
|
132
|
-
def __init__
|
164
|
+
def __init__(self):
|
133
165
|
self.queue: list[tuple[int, CustomProgressTask]] = []
|
134
166
|
|
135
|
-
def push_log
|
167
|
+
def push_log(self, event: _LogEvent):
|
136
168
|
# Check for update
|
137
169
|
if event.data.update and self.queue:
|
138
170
|
current_level, current_task = self.queue[-1]
|
@@ -149,15 +181,15 @@ class ProgressLogQueue:
|
|
149
181
|
task.__enter__()
|
150
182
|
self.queue.append((event.data.level, task))
|
151
183
|
|
152
|
-
def push_error
|
184
|
+
def push_error(self, error: _ErrorEvent):
|
153
185
|
while self.queue:
|
154
186
|
_, current_task = self.queue.pop()
|
155
187
|
current_task.__exit__(RuntimeError, None, None)
|
156
188
|
|
157
|
-
def __enter__
|
189
|
+
def __enter__(self):
|
158
190
|
return self
|
159
191
|
|
160
|
-
def __exit__
|
192
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
161
193
|
while self.queue:
|
162
194
|
_, current_task = self.queue.pop()
|
163
195
|
current_task.__exit__(None, None, None)
|
@@ -9,22 +9,22 @@ from webbrowser import open as open_browser
|
|
9
9
|
|
10
10
|
from ..version import __version__
|
11
11
|
|
12
|
-
def _explore
|
12
|
+
def _explore(value: bool):
|
13
13
|
if value:
|
14
14
|
open_browser("https://fxn.ai/explore")
|
15
15
|
raise Exit()
|
16
16
|
|
17
|
-
def _learn
|
17
|
+
def _learn(value: bool):
|
18
18
|
if value:
|
19
19
|
open_browser("https://docs.fxn.ai")
|
20
20
|
raise Exit()
|
21
21
|
|
22
|
-
def _version
|
22
|
+
def _version(value: bool):
|
23
23
|
if value:
|
24
24
|
print(__version__)
|
25
25
|
raise Exit()
|
26
26
|
|
27
|
-
def cli_options
|
27
|
+
def cli_options(
|
28
28
|
explore: bool = Option(None, "--explore", callback=_explore, help="Explore predictors on Function."),
|
29
29
|
learn: bool = Option(None, "--learn", callback=_learn, help="Learn about Function."),
|
30
30
|
version: bool = Option(None, "--version", callback=_version, help="Get the Function CLI version.")
|
@@ -24,7 +24,7 @@ def create_prediction (
|
|
24
24
|
):
|
25
25
|
run_async(_predict_async(tag, quiet=quiet, context=context))
|
26
26
|
|
27
|
-
async def _predict_async
|
27
|
+
async def _predict_async(tag: str, quiet: bool, context: Context):
|
28
28
|
# Preload
|
29
29
|
with CustomProgress(transient=True, disable=quiet):
|
30
30
|
fxn = Function(get_access_key())
|
@@ -42,16 +42,7 @@ async def _predict_async (tag: str, quiet: bool, context: Context):
|
|
42
42
|
prediction = fxn.predictions.create(tag, inputs=inputs)
|
43
43
|
_log_prediction(prediction)
|
44
44
|
|
45
|
-
def _parse_value (value: str):
|
46
|
-
"""
|
47
|
-
Parse a value from a CLI argument.
|
48
|
-
|
49
|
-
Parameters:
|
50
|
-
value (str): CLI input argument.
|
51
|
-
|
52
|
-
Returns:
|
53
|
-
bool | int | float | str | Path: Parsed value.
|
54
|
-
"""
|
45
|
+
def _parse_value (value: str) -> float | int | bool | str | Image.Image | BytesIO:
|
55
46
|
# Boolean
|
56
47
|
if value == "true":
|
57
48
|
return True
|
@@ -81,14 +72,14 @@ def _parse_value (value: str):
|
|
81
72
|
# String
|
82
73
|
return value
|
83
74
|
|
84
|
-
def _log_prediction
|
75
|
+
def _log_prediction(prediction: Prediction):
|
85
76
|
images = [value for value in prediction.results or [] if isinstance(value, Image.Image)]
|
86
77
|
prediction.results = [_serialize_value(value) for value in prediction.results] if prediction.results is not None else None
|
87
78
|
print_json(data=prediction.model_dump())
|
88
79
|
for image in images:
|
89
80
|
image.show()
|
90
81
|
|
91
|
-
def _serialize_value
|
82
|
+
def _serialize_value(value) -> str:
|
92
83
|
if isinstance(value, ndarray):
|
93
84
|
return array_repr(value)
|
94
85
|
if isinstance(value, Image.Image):
|
@@ -10,7 +10,7 @@ from ..function import Function
|
|
10
10
|
from ..logging import CustomProgress, CustomProgressTask
|
11
11
|
from .auth import get_access_key
|
12
12
|
|
13
|
-
def retrieve_predictor
|
13
|
+
def retrieve_predictor(
|
14
14
|
tag: str=Argument(..., help="Predictor tag.")
|
15
15
|
):
|
16
16
|
with CustomProgress(transient=True):
|
@@ -20,7 +20,7 @@ def retrieve_predictor (
|
|
20
20
|
predictor = predictor.model_dump() if predictor else None
|
21
21
|
print_json(data=predictor)
|
22
22
|
|
23
|
-
def archive_predictor
|
23
|
+
def archive_predictor(
|
24
24
|
tag: str=Argument(..., help="Predictor tag.")
|
25
25
|
):
|
26
26
|
with CustomProgress():
|
@@ -34,7 +34,7 @@ def archive_predictor (
|
|
34
34
|
path=f"/predictors/{tag}/archive"
|
35
35
|
)
|
36
36
|
|
37
|
-
def delete_predictor
|
37
|
+
def delete_predictor(
|
38
38
|
tag: str=Argument(..., help="Predictor tag.")
|
39
39
|
):
|
40
40
|
with CustomProgress():
|
@@ -14,9 +14,9 @@ from ..function import Function
|
|
14
14
|
from ..logging import CustomProgress, CustomProgressTask
|
15
15
|
from .auth import get_access_key
|
16
16
|
|
17
|
-
def retrieve_source
|
18
|
-
predictor: Annotated[str, Option(help="Predictor tag.")] = None,
|
17
|
+
def retrieve_source(
|
19
18
|
prediction: Annotated[str, Option(help="Prediction identifier. If specified, this MUST be from a prediction returned by the Function API.")] = None,
|
19
|
+
predictor: Annotated[str, Option(help="Predictor tag. If specified, a prediction will be made with this predictor before retrieving the source.")] = None,
|
20
20
|
output: Annotated[Path, Option(help="Path to output source file.")] = Path("predictor.cpp")
|
21
21
|
):
|
22
22
|
if not ((predictor is not None) ^ (prediction is not None)):
|
@@ -37,7 +37,7 @@ def retrieve_source (
|
|
37
37
|
source.code = str(output.resolve())
|
38
38
|
print_json(data=source.model_dump(mode="json", by_alias=True))
|
39
39
|
|
40
|
-
class _PredictionSource
|
40
|
+
class _PredictionSource(BaseModel):
|
41
41
|
tag: str
|
42
42
|
target: str
|
43
43
|
code: str
|
@@ -14,10 +14,10 @@ from typing import Any, Callable, Literal, ParamSpec, TypeVar, cast
|
|
14
14
|
from .beta import (
|
15
15
|
CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
|
16
16
|
OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
|
17
|
-
QnnInferenceMetadata
|
17
|
+
QnnInferenceMetadata, TensorRTInferenceMetadata
|
18
18
|
)
|
19
19
|
from .sandbox import Sandbox
|
20
|
-
from .types import
|
20
|
+
from .types import PredictorAccess
|
21
21
|
|
22
22
|
CompileTarget = Literal[
|
23
23
|
"android",
|
@@ -36,7 +36,8 @@ CompileMetadata = (
|
|
36
36
|
OnnxInferenceMetadata |
|
37
37
|
OnnxRuntimeInferenceSessionMetadata |
|
38
38
|
OpenVINOInferenceMetadata |
|
39
|
-
QnnInferenceMetadata
|
39
|
+
QnnInferenceMetadata |
|
40
|
+
TensorRTInferenceMetadata
|
40
41
|
)
|
41
42
|
|
42
43
|
P = ParamSpec("P")
|
@@ -51,7 +52,7 @@ class PredictorSpec (BaseModel):
|
|
51
52
|
sandbox: Sandbox = Field(description="Sandbox to compile the function.")
|
52
53
|
targets: list[str] | None = Field(description="Targets to compile this predictor for. Pass `None` to compile for our default targets.")
|
53
54
|
metadata: list[object] = Field(default=[], description="Metadata to use while compiling the function.")
|
54
|
-
access:
|
55
|
+
access: PredictorAccess = Field(description="Predictor access.")
|
55
56
|
card: str | None = Field(default=None, description="Predictor card (markdown).")
|
56
57
|
media: str | None = Field(default=None, description="Predictor media URL.")
|
57
58
|
license: str | None = Field(default=None, description="Predictor license URL. This is required for public predictors.")
|
@@ -65,7 +66,7 @@ def compile (
|
|
65
66
|
trace_modules: list[ModuleType]=[],
|
66
67
|
targets: list[CompileTarget]=None,
|
67
68
|
metadata: list[CompileMetadata]=[],
|
68
|
-
access:
|
69
|
+
access: PredictorAccess="private",
|
69
70
|
card: str | Path=None,
|
70
71
|
media: Path=None,
|
71
72
|
license: str=None,
|
@@ -81,7 +82,7 @@ def compile (
|
|
81
82
|
trace_modules (list): Modules to trace and compile.
|
82
83
|
targets (list): Targets to compile this predictor for. Pass `None` to compile for our default targets.
|
83
84
|
metadata (list): Metadata to use while compiling the function.
|
84
|
-
access (
|
85
|
+
access (PredictorAccess): Predictor access.
|
85
86
|
card (str | Path): Predictor card markdown string or path to card.
|
86
87
|
media (Path): Predictor thumbnail image (jpeg or png) path.
|
87
88
|
license (str): Predictor license URL. This is required for public predictors.
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
@@ -57,6 +57,8 @@ class EntrypointCommand (UploadableCommand):
|
|
57
57
|
class PipInstallCommand (BaseModel):
|
58
58
|
kind: Literal["pip_install"] = "pip_install"
|
59
59
|
packages: list[str]
|
60
|
+
index_url: str | None
|
61
|
+
flags: str
|
60
62
|
|
61
63
|
class AptInstallCommand (BaseModel):
|
62
64
|
kind: Literal["apt_install"] = "apt_install"
|
@@ -133,14 +135,25 @@ class Sandbox (BaseModel):
|
|
133
135
|
)
|
134
136
|
return Sandbox(commands=self.commands + [command])
|
135
137
|
|
136
|
-
def pip_install (
|
138
|
+
def pip_install (
|
139
|
+
self,
|
140
|
+
*packages: str,
|
141
|
+
index_url: str=None,
|
142
|
+
flags: str=""
|
143
|
+
) -> Sandbox:
|
137
144
|
"""
|
138
145
|
Install Python packages in the sandbox.
|
139
146
|
|
140
147
|
Parameters:
|
141
148
|
packages (list): Packages to install.
|
149
|
+
index_url (str | None): Index URL to search for package.
|
150
|
+
flags (str): Additional flags to pass to `pip`.
|
142
151
|
"""
|
143
|
-
command = PipInstallCommand(
|
152
|
+
command = PipInstallCommand(
|
153
|
+
packages=packages,
|
154
|
+
index_url=index_url,
|
155
|
+
flags=flags
|
156
|
+
)
|
144
157
|
return Sandbox(commands=self.commands + [command])
|
145
158
|
|
146
159
|
def apt_install (self, *packages: str) -> Sandbox:
|
@@ -117,6 +117,21 @@ class PredictionService:
|
|
117
117
|
with prediction:
|
118
118
|
yield self.__to_prediction(tag, prediction)
|
119
119
|
|
120
|
+
def delete (self, tag: str) -> bool:
|
121
|
+
"""
|
122
|
+
Delete a predictor that is loaded in memory.
|
123
|
+
|
124
|
+
Parameters:
|
125
|
+
tag (str): Predictor tag.
|
126
|
+
|
127
|
+
Returns:
|
128
|
+
bool: Whether the predictor was successfully deleted from memory.
|
129
|
+
"""
|
130
|
+
if tag not in self.__cache:
|
131
|
+
return False
|
132
|
+
with self.__cache.pop(tag):
|
133
|
+
return True
|
134
|
+
|
120
135
|
def __create_raw_prediction (
|
121
136
|
self,
|
122
137
|
tag: str,
|
@@ -245,7 +260,7 @@ class PredictionService:
|
|
245
260
|
tmp_file.write(chunk)
|
246
261
|
completed += len(chunk)
|
247
262
|
task.update(total=size, completed=completed)
|
248
|
-
|
263
|
+
Path(tmp_file.name).replace(path)
|
249
264
|
return path
|
250
265
|
|
251
266
|
def __get_resource_path (self, resource: PredictionResource) -> Path:
|
@@ -5,5 +5,5 @@
|
|
5
5
|
|
6
6
|
from .dtype import Dtype
|
7
7
|
from .prediction import Acceleration, Prediction, PredictionResource
|
8
|
-
from .predictor import
|
8
|
+
from .predictor import EnumerationMember, Parameter, Predictor, PredictorAccess, PredictorStatus, Signature
|
9
9
|
from .user import User
|
@@ -3,28 +3,15 @@
|
|
3
3
|
# Copyright © 2025 NatML Inc. All Rights Reserved.
|
4
4
|
#
|
5
5
|
|
6
|
-
from enum import Enum
|
7
6
|
from pydantic import AliasChoices, BaseModel, ConfigDict, Field
|
8
|
-
from typing import Any
|
7
|
+
from typing import Any, Literal
|
9
8
|
|
10
9
|
from .dtype import Dtype
|
11
10
|
from .user import User
|
12
11
|
|
13
|
-
|
14
|
-
"""
|
15
|
-
Predictor access mode.
|
16
|
-
"""
|
17
|
-
Public = "PUBLIC"
|
18
|
-
Private = "PRIVATE"
|
12
|
+
PredictorAccess = Literal["public", "private", "unlisted"]
|
19
13
|
|
20
|
-
|
21
|
-
"""
|
22
|
-
Predictor status.
|
23
|
-
"""
|
24
|
-
Compiling = "COMPILING"
|
25
|
-
Active = "ACTIVE"
|
26
|
-
Invalid = "INVALID"
|
27
|
-
Archived = "ARCHIVED"
|
14
|
+
PredictorStatus = Literal["compiling", "active", "archived"]
|
28
15
|
|
29
16
|
class EnumerationMember (BaseModel):
|
30
17
|
"""
|
@@ -79,7 +66,7 @@ class Predictor (BaseModel):
|
|
79
66
|
owner (User): Predictor owner.
|
80
67
|
name (str): Predictor name.
|
81
68
|
status (PredictorStatus): Predictor status.
|
82
|
-
access (
|
69
|
+
access (PredictorAccess): Predictor access.
|
83
70
|
signature (Signature): Predictor signature.
|
84
71
|
created (str): Date created.
|
85
72
|
description (str): Predictor description.
|
@@ -91,7 +78,7 @@ class Predictor (BaseModel):
|
|
91
78
|
owner: User = Field(description="Predictor owner.")
|
92
79
|
name: str = Field(description="Predictor name.")
|
93
80
|
status: PredictorStatus = Field(description="Predictor status.")
|
94
|
-
access:
|
81
|
+
access: PredictorAccess = Field(description="Predictor access.")
|
95
82
|
signature: Signature = Field(description="Predictor signature.")
|
96
83
|
created: str = Field(description="Date created.")
|
97
84
|
description: str | None = Field(default=None, description="Predictor description.")
|
fxn-0.0.53/fxn/cli/__init__.py
DELETED
@@ -1,53 +0,0 @@
|
|
1
|
-
#
|
2
|
-
# Function
|
3
|
-
# Copyright © 2025 NatML Inc. All Rights Reserved.
|
4
|
-
#
|
5
|
-
|
6
|
-
import typer
|
7
|
-
|
8
|
-
from ..logging import TracebackMarkupConsole
|
9
|
-
from ..version import __version__
|
10
|
-
|
11
|
-
from .auth import app as auth_app
|
12
|
-
from .compile import compile_predictor
|
13
|
-
from .misc import cli_options
|
14
|
-
from .predictions import create_prediction
|
15
|
-
from .predictors import archive_predictor, delete_predictor, retrieve_predictor
|
16
|
-
from .sources import retrieve_source
|
17
|
-
from ..beta.cli import llm_app
|
18
|
-
|
19
|
-
# Define CLI
|
20
|
-
typer.main.console_stderr = TracebackMarkupConsole()
|
21
|
-
app = typer.Typer(
|
22
|
-
name=f"Function CLI {__version__}",
|
23
|
-
no_args_is_help=True,
|
24
|
-
pretty_exceptions_show_locals=False,
|
25
|
-
pretty_exceptions_short=True,
|
26
|
-
add_completion=False
|
27
|
-
)
|
28
|
-
|
29
|
-
# Add top level options
|
30
|
-
app.callback()(cli_options)
|
31
|
-
|
32
|
-
# Add subcommands
|
33
|
-
app.add_typer(auth_app, name="auth", help="Login, logout, and check your authentication status.")
|
34
|
-
app.add_typer(llm_app, name="llm", hidden=True, help="Work with large language models (LLMs).")
|
35
|
-
|
36
|
-
# Add top-level commands
|
37
|
-
app.command(
|
38
|
-
name="predict",
|
39
|
-
help="Make a prediction.",
|
40
|
-
context_settings={ "allow_extra_args": True, "ignore_unknown_options": True }
|
41
|
-
)(create_prediction)
|
42
|
-
app.command(
|
43
|
-
name="compile",
|
44
|
-
help="Create a predictor by compiling a Python function."
|
45
|
-
)(compile_predictor)
|
46
|
-
app.command(name="retrieve", help="Retrieve a predictor.")(retrieve_predictor)
|
47
|
-
app.command(name="archive", help="Archive a predictor.")(archive_predictor)
|
48
|
-
app.command(name="delete", help="Delete a predictor.")(delete_predictor)
|
49
|
-
app.command(name="source", help="Retrieve the generated native code for a given predictor.")(retrieve_source)
|
50
|
-
|
51
|
-
# Run
|
52
|
-
if __name__ == "__main__":
|
53
|
-
app()
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|