fxn 0.0.50__py3-none-any.whl → 0.0.52__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fxn/beta/__init__.py +2 -1
- fxn/beta/metadata.py +28 -9
- fxn/compile.py +23 -8
- fxn/version.py +1 -1
- {fxn-0.0.50.dist-info → fxn-0.0.52.dist-info}/METADATA +1 -1
- {fxn-0.0.50.dist-info → fxn-0.0.52.dist-info}/RECORD +10 -10
- {fxn-0.0.50.dist-info → fxn-0.0.52.dist-info}/WHEEL +1 -1
- {fxn-0.0.50.dist-info → fxn-0.0.52.dist-info}/entry_points.txt +0 -0
- {fxn-0.0.50.dist-info → fxn-0.0.52.dist-info}/licenses/LICENSE +0 -0
- {fxn-0.0.50.dist-info → fxn-0.0.52.dist-info}/top_level.txt +0 -0
fxn/beta/__init__.py
CHANGED
@@ -5,6 +5,7 @@
|
|
5
5
|
|
6
6
|
from .metadata import (
|
7
7
|
CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
|
8
|
-
ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
|
8
|
+
ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
|
9
|
+
QnnInferenceMetadata
|
9
10
|
)
|
10
11
|
from .remote import RemoteAcceleration
|
fxn/beta/metadata.py
CHANGED
@@ -30,8 +30,8 @@ class CoreMLInferenceMetadata (BaseModel):
|
|
30
30
|
Metadata required to lower a PyTorch model for inference on iOS, macOS, and visionOS with CoreML.
|
31
31
|
"""
|
32
32
|
kind: Literal["meta.inference.coreml"] = "meta.inference.coreml"
|
33
|
-
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.")
|
34
|
-
model_args: list[object] = Field(description="Positional inputs to the model.")
|
33
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
|
34
|
+
model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
|
35
35
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
36
36
|
|
37
37
|
class ONNXInferenceMetadata (BaseModel):
|
@@ -39,8 +39,8 @@ class ONNXInferenceMetadata (BaseModel):
|
|
39
39
|
Metadata required to lower a PyTorch model for inference.
|
40
40
|
"""
|
41
41
|
kind: Literal["meta.inference.onnx"] = "meta.inference.onnx"
|
42
|
-
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.")
|
43
|
-
model_args: list[object] = Field(description="Positional inputs to the model.")
|
42
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
|
43
|
+
model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
|
44
44
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
45
45
|
|
46
46
|
class ONNXRuntimeInferenceSessionMetadata (BaseModel):
|
@@ -48,8 +48,8 @@ class ONNXRuntimeInferenceSessionMetadata (BaseModel):
|
|
48
48
|
Metadata required to lower an ONNXRuntime `InferenceSession` for inference.
|
49
49
|
"""
|
50
50
|
kind: Literal["meta.inference.onnxruntime"] = "meta.inference.onnxruntime"
|
51
|
-
session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(description="ONNXRuntime inference session to apply metadata to.")
|
52
|
-
model_path: Path = Field(description="ONNX model path. The model must exist at this path in the compiler sandbox.")
|
51
|
+
session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(description="ONNXRuntime inference session to apply metadata to.", exclude=True)
|
52
|
+
model_path: Path = Field(description="ONNX model path. The model must exist at this path in the compiler sandbox.", exclude=True)
|
53
53
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
54
54
|
|
55
55
|
class LiteRTInferenceMetadata (BaseModel):
|
@@ -57,8 +57,27 @@ class LiteRTInferenceMetadata (BaseModel):
|
|
57
57
|
Metadata required to lower PyTorch model for inference with LiteRT (fka TensorFlow Lite).
|
58
58
|
"""
|
59
59
|
kind: Literal["meta.inference.litert"] = "meta.inference.litert"
|
60
|
-
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.")
|
61
|
-
model_args: list[object] = Field(description="Positional inputs to the model.")
|
60
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
|
61
|
+
model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
|
62
|
+
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
63
|
+
|
64
|
+
class OpenVINOInferenceMetadata (BaseModel):
|
65
|
+
"""
|
66
|
+
Metadata required to lower PyTorch model for interence with Intel OpenVINO.
|
67
|
+
"""
|
68
|
+
kind: Literal["meta.inference.openvino"] = "meta.inference.openvino"
|
69
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
|
70
|
+
model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
|
71
|
+
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
72
|
+
|
73
|
+
class QnnInferenceMetadata (BaseModel):
|
74
|
+
"""
|
75
|
+
Metadata required to lower a PyTorch model for inference on Qualcomm accelerators with QNN SDK.
|
76
|
+
"""
|
77
|
+
kind: Literal["meta.inference.qnn"] = "meta.inference.qnn"
|
78
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
|
79
|
+
model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
|
80
|
+
backend: Literal["cpu", "gpu"] = Field(default="cpu", description="QNN backend to execute the model.", exclude=True) # CHECK # Add `htp`
|
62
81
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
63
82
|
|
64
83
|
class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
|
@@ -66,5 +85,5 @@ class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
|
|
66
85
|
Metadata required to lower a GGUF model for LLM inference.
|
67
86
|
"""
|
68
87
|
kind: Literal["meta.inference.gguf"] = "meta.inference.gguf"
|
69
|
-
model_path: Path = Field(description="GGUF model path. The model must exist at this path in the compiler sandbox.")
|
88
|
+
model_path: Path = Field(description="GGUF model path. The model must exist at this path in the compiler sandbox.", exclude=True)
|
70
89
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
fxn/compile.py
CHANGED
@@ -9,25 +9,39 @@ from inspect import isasyncgenfunction, iscoroutinefunction
|
|
9
9
|
from pathlib import Path
|
10
10
|
from pydantic import BaseModel, ConfigDict, Field
|
11
11
|
from types import ModuleType
|
12
|
-
from typing import Literal
|
12
|
+
from typing import Any, Callable, Literal, ParamSpec, TypeVar, cast
|
13
13
|
|
14
14
|
from .beta import (
|
15
15
|
CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
|
16
|
-
ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
|
16
|
+
ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
|
17
|
+
QnnInferenceMetadata
|
17
18
|
)
|
18
19
|
from .sandbox import Sandbox
|
19
20
|
from .types import AccessMode
|
20
21
|
|
21
|
-
CompileTarget = Literal[
|
22
|
+
CompileTarget = Literal[
|
23
|
+
"android",
|
24
|
+
"ios",
|
25
|
+
"linux",
|
26
|
+
"macos",
|
27
|
+
"visionos",
|
28
|
+
"wasm",
|
29
|
+
"windows"
|
30
|
+
]
|
22
31
|
|
23
32
|
CompileMetadata = (
|
24
33
|
CoreMLInferenceMetadata |
|
25
34
|
LiteRTInferenceMetadata |
|
26
35
|
LlamaCppInferenceMetadata |
|
27
36
|
ONNXInferenceMetadata |
|
28
|
-
ONNXRuntimeInferenceSessionMetadata
|
37
|
+
ONNXRuntimeInferenceSessionMetadata |
|
38
|
+
OpenVINOInferenceMetadata |
|
39
|
+
QnnInferenceMetadata
|
29
40
|
)
|
30
41
|
|
42
|
+
P = ParamSpec("P")
|
43
|
+
R = TypeVar("R")
|
44
|
+
|
31
45
|
class PredictorSpec (BaseModel):
|
32
46
|
"""
|
33
47
|
Descriptor of a predictor to be compiled.
|
@@ -36,6 +50,7 @@ class PredictorSpec (BaseModel):
|
|
36
50
|
description: str = Field(description="Predictor description. MUST be less than 100 characters long.", min_length=4, max_length=100)
|
37
51
|
sandbox: Sandbox = Field(description="Sandbox to compile the function.")
|
38
52
|
targets: list[str] | None = Field(description="Targets to compile this predictor for. Pass `None` to compile for our default targets.")
|
53
|
+
metadata: list[object] = Field(default=[], description="Metadata to use while compiling the function.")
|
39
54
|
access: AccessMode = Field(description="Predictor access.")
|
40
55
|
card: str | None = Field(default=None, description="Predictor card (markdown).")
|
41
56
|
media: str | None = Field(default=None, description="Predictor media URL.")
|
@@ -49,13 +64,13 @@ def compile (
|
|
49
64
|
sandbox: Sandbox=None,
|
50
65
|
trace_modules: list[ModuleType]=[],
|
51
66
|
targets: list[CompileTarget]=None,
|
52
|
-
access: AccessMode=AccessMode.Private,
|
53
67
|
metadata: list[CompileMetadata]=[],
|
68
|
+
access: AccessMode=AccessMode.Private,
|
54
69
|
card: str | Path=None,
|
55
70
|
media: Path=None,
|
56
71
|
license: str=None,
|
57
72
|
**kwargs
|
58
|
-
):
|
73
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
59
74
|
"""
|
60
75
|
Create a predictor by compiling a stateless function.
|
61
76
|
|
@@ -65,8 +80,8 @@ def compile (
|
|
65
80
|
sandbox (Sandbox): Sandbox to compile the function.
|
66
81
|
trace_modules (list): Modules to trace and compile.
|
67
82
|
targets (list): Targets to compile this predictor for. Pass `None` to compile for our default targets.
|
68
|
-
access (AccessMode): Predictor access.
|
69
83
|
metadata (list): Metadata to use while compiling the function.
|
84
|
+
access (AccessMode): Predictor access.
|
70
85
|
card (str | Path): Predictor card markdown string or path to card.
|
71
86
|
media (Path): Predictor thumbnail image (jpeg or png) path.
|
72
87
|
license (str): Predictor license URL. This is required for public predictors.
|
@@ -96,5 +111,5 @@ def compile (
|
|
96
111
|
def wrapper (*args, **kwargs):
|
97
112
|
return func(*args, **kwargs)
|
98
113
|
wrapper.__predictor_spec = spec
|
99
|
-
return wrapper
|
114
|
+
return cast(Callable[P, R], wrapper)
|
100
115
|
return decorator
|
fxn/version.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
fxn/__init__.py,sha256=gnJK7iOmMVWFhluW9bOvTNxJbpT-GwzDJTMmjA_XxOE,284
|
2
2
|
fxn/client.py,sha256=Deje8eiS1VOHX85tQnV34viv2CPVx2ljwHSbyVB5Z1o,3790
|
3
|
-
fxn/compile.py,sha256=
|
3
|
+
fxn/compile.py,sha256=flfWCztkPl4mj8HHNJ5hagiw6GPXwEgCzML5DF3jE3I,4371
|
4
4
|
fxn/function.py,sha256=XeEuALkbVhkvwEBUfP0A2fu3tdimwHemoR17oomhzc8,1407
|
5
5
|
fxn/logging.py,sha256=MsTSf0GZxrHNDwVAXDOh8_zRUg9hkeZ8DfhFUJs7D8A,7250
|
6
6
|
fxn/sandbox.py,sha256=50yY2GDdkAFl-6pXTleaD1LXYM6-pJ3C1epKsr0xdrM,7313
|
7
|
-
fxn/version.py,sha256=
|
8
|
-
fxn/beta/__init__.py,sha256=
|
7
|
+
fxn/version.py,sha256=EVnLH1XFtA_XTvNVV5KvP6eyRyNaYwO1HmlkKenTsNw,95
|
8
|
+
fxn/beta/__init__.py,sha256=P8x10VoMJw58mhz-ZfnHECPeMq7AELBz6mQN4a-8WAc,334
|
9
9
|
fxn/beta/client.py,sha256=0lfwQPcB9ToIJC7AcCXO6DlJKkmId8EChhd9bk29GGE,2611
|
10
|
-
fxn/beta/metadata.py,sha256=
|
10
|
+
fxn/beta/metadata.py,sha256=z3ykTsbMsmPEtK8HNpZcFCeJfszZR7f7lUQqppnz7_w,4919
|
11
11
|
fxn/beta/prediction.py,sha256=9DTBahNF6m0TicLab2o9e8IKpiSV6K7cUSTYaFju0ZU,356
|
12
12
|
fxn/beta/remote.py,sha256=psPNcGFQKMGHAJG4NRDIQ2trStvTj2NOZeQqVI-e29Q,7529
|
13
13
|
fxn/c/__init__.py,sha256=NMIduqO_MYtI9jVCu6ZxvbBtYQXoQyNEWblNy3m2UPY,313
|
@@ -41,9 +41,9 @@ fxn/types/dtype.py,sha256=71Tuu4IydmELcBcSBbmWswhCE-7WqBSQ4VkETsFRzjA,617
|
|
41
41
|
fxn/types/prediction.py,sha256=BdLTxnKiSFbz5warX8g_Z4DedNxXK3gaNjSKR2FP8tA,2051
|
42
42
|
fxn/types/predictor.py,sha256=KRGZEuDt7WPMCyRcZvQq4y2FMocfVrLEUNJCJgfDY9Y,4000
|
43
43
|
fxn/types/user.py,sha256=Z44TwEocyxSrfKyzcNfmAXUrpX_Ry8fJ7MffSxRn4oU,1071
|
44
|
-
fxn-0.0.
|
45
|
-
fxn-0.0.
|
46
|
-
fxn-0.0.
|
47
|
-
fxn-0.0.
|
48
|
-
fxn-0.0.
|
49
|
-
fxn-0.0.
|
44
|
+
fxn-0.0.52.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
45
|
+
fxn-0.0.52.dist-info/METADATA,sha256=du31QRBmBTzGvzhr3Nu23Om-JwMg7GKQLh6nJw3LOfA,16136
|
46
|
+
fxn-0.0.52.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
47
|
+
fxn-0.0.52.dist-info/entry_points.txt,sha256=O_AwD5dYaeB-YT1F9hPAPuDYCkw_W0tdNGYbc5RVR2k,45
|
48
|
+
fxn-0.0.52.dist-info/top_level.txt,sha256=1ULIEGrnMlhId8nYAkjmRn9g3KEFuHKboq193SEKQkA,4
|
49
|
+
fxn-0.0.52.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|