fxn 0.0.50__tar.gz → 0.0.52__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. {fxn-0.0.50 → fxn-0.0.52}/PKG-INFO +1 -1
  2. {fxn-0.0.50 → fxn-0.0.52}/fxn/beta/__init__.py +2 -1
  3. {fxn-0.0.50 → fxn-0.0.52}/fxn/beta/metadata.py +28 -9
  4. {fxn-0.0.50 → fxn-0.0.52}/fxn/compile.py +23 -8
  5. {fxn-0.0.50 → fxn-0.0.52}/fxn/version.py +1 -1
  6. {fxn-0.0.50 → fxn-0.0.52}/fxn.egg-info/PKG-INFO +1 -1
  7. {fxn-0.0.50 → fxn-0.0.52}/LICENSE +0 -0
  8. {fxn-0.0.50 → fxn-0.0.52}/README.md +0 -0
  9. {fxn-0.0.50 → fxn-0.0.52}/fxn/__init__.py +0 -0
  10. {fxn-0.0.50 → fxn-0.0.52}/fxn/beta/client.py +0 -0
  11. {fxn-0.0.50 → fxn-0.0.52}/fxn/beta/prediction.py +0 -0
  12. {fxn-0.0.50 → fxn-0.0.52}/fxn/beta/remote.py +0 -0
  13. {fxn-0.0.50 → fxn-0.0.52}/fxn/c/__init__.py +0 -0
  14. {fxn-0.0.50 → fxn-0.0.52}/fxn/c/configuration.py +0 -0
  15. {fxn-0.0.50 → fxn-0.0.52}/fxn/c/fxnc.py +0 -0
  16. {fxn-0.0.50 → fxn-0.0.52}/fxn/c/map.py +0 -0
  17. {fxn-0.0.50 → fxn-0.0.52}/fxn/c/prediction.py +0 -0
  18. {fxn-0.0.50 → fxn-0.0.52}/fxn/c/predictor.py +0 -0
  19. {fxn-0.0.50 → fxn-0.0.52}/fxn/c/stream.py +0 -0
  20. {fxn-0.0.50 → fxn-0.0.52}/fxn/c/value.py +0 -0
  21. {fxn-0.0.50 → fxn-0.0.52}/fxn/cli/__init__.py +0 -0
  22. {fxn-0.0.50 → fxn-0.0.52}/fxn/cli/auth.py +0 -0
  23. {fxn-0.0.50 → fxn-0.0.52}/fxn/cli/compile.py +0 -0
  24. {fxn-0.0.50 → fxn-0.0.52}/fxn/cli/misc.py +0 -0
  25. {fxn-0.0.50 → fxn-0.0.52}/fxn/cli/predictions.py +0 -0
  26. {fxn-0.0.50 → fxn-0.0.52}/fxn/cli/predictors.py +0 -0
  27. {fxn-0.0.50 → fxn-0.0.52}/fxn/cli/sources.py +0 -0
  28. {fxn-0.0.50 → fxn-0.0.52}/fxn/client.py +0 -0
  29. {fxn-0.0.50 → fxn-0.0.52}/fxn/function.py +0 -0
  30. {fxn-0.0.50 → fxn-0.0.52}/fxn/lib/__init__.py +0 -0
  31. {fxn-0.0.50 → fxn-0.0.52}/fxn/lib/linux/arm64/libFunction.so +0 -0
  32. {fxn-0.0.50 → fxn-0.0.52}/fxn/lib/linux/x86_64/libFunction.so +0 -0
  33. {fxn-0.0.50 → fxn-0.0.52}/fxn/lib/macos/arm64/Function.dylib +0 -0
  34. {fxn-0.0.50 → fxn-0.0.52}/fxn/lib/macos/x86_64/Function.dylib +0 -0
  35. {fxn-0.0.50 → fxn-0.0.52}/fxn/lib/windows/arm64/Function.dll +0 -0
  36. {fxn-0.0.50 → fxn-0.0.52}/fxn/lib/windows/x86_64/Function.dll +0 -0
  37. {fxn-0.0.50 → fxn-0.0.52}/fxn/logging.py +0 -0
  38. {fxn-0.0.50 → fxn-0.0.52}/fxn/sandbox.py +0 -0
  39. {fxn-0.0.50 → fxn-0.0.52}/fxn/services/__init__.py +0 -0
  40. {fxn-0.0.50 → fxn-0.0.52}/fxn/services/prediction.py +0 -0
  41. {fxn-0.0.50 → fxn-0.0.52}/fxn/services/predictor.py +0 -0
  42. {fxn-0.0.50 → fxn-0.0.52}/fxn/services/user.py +0 -0
  43. {fxn-0.0.50 → fxn-0.0.52}/fxn/types/__init__.py +0 -0
  44. {fxn-0.0.50 → fxn-0.0.52}/fxn/types/dtype.py +0 -0
  45. {fxn-0.0.50 → fxn-0.0.52}/fxn/types/prediction.py +0 -0
  46. {fxn-0.0.50 → fxn-0.0.52}/fxn/types/predictor.py +0 -0
  47. {fxn-0.0.50 → fxn-0.0.52}/fxn/types/user.py +0 -0
  48. {fxn-0.0.50 → fxn-0.0.52}/fxn.egg-info/SOURCES.txt +0 -0
  49. {fxn-0.0.50 → fxn-0.0.52}/fxn.egg-info/dependency_links.txt +0 -0
  50. {fxn-0.0.50 → fxn-0.0.52}/fxn.egg-info/entry_points.txt +0 -0
  51. {fxn-0.0.50 → fxn-0.0.52}/fxn.egg-info/requires.txt +0 -0
  52. {fxn-0.0.50 → fxn-0.0.52}/fxn.egg-info/top_level.txt +0 -0
  53. {fxn-0.0.50 → fxn-0.0.52}/pyproject.toml +0 -0
  54. {fxn-0.0.50 → fxn-0.0.52}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fxn
3
- Version: 0.0.50
3
+ Version: 0.0.52
4
4
  Summary: Run prediction functions locally in Python. Register at https://fxn.ai.
5
5
  Author-email: "NatML Inc." <hi@fxn.ai>
6
6
  License: Apache License
@@ -5,6 +5,7 @@
5
5
 
6
6
  from .metadata import (
7
7
  CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
8
- ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
8
+ ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
9
+ QnnInferenceMetadata
9
10
  )
10
11
  from .remote import RemoteAcceleration
@@ -30,8 +30,8 @@ class CoreMLInferenceMetadata (BaseModel):
30
30
  Metadata required to lower a PyTorch model for inference on iOS, macOS, and visionOS with CoreML.
31
31
  """
32
32
  kind: Literal["meta.inference.coreml"] = "meta.inference.coreml"
33
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.")
34
- model_args: list[object] = Field(description="Positional inputs to the model.")
33
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
34
+ model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
35
35
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
36
36
 
37
37
  class ONNXInferenceMetadata (BaseModel):
@@ -39,8 +39,8 @@ class ONNXInferenceMetadata (BaseModel):
39
39
  Metadata required to lower a PyTorch model for inference.
40
40
  """
41
41
  kind: Literal["meta.inference.onnx"] = "meta.inference.onnx"
42
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.")
43
- model_args: list[object] = Field(description="Positional inputs to the model.")
42
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
43
+ model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
44
44
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
45
45
 
46
46
  class ONNXRuntimeInferenceSessionMetadata (BaseModel):
@@ -48,8 +48,8 @@ class ONNXRuntimeInferenceSessionMetadata (BaseModel):
48
48
  Metadata required to lower an ONNXRuntime `InferenceSession` for inference.
49
49
  """
50
50
  kind: Literal["meta.inference.onnxruntime"] = "meta.inference.onnxruntime"
51
- session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(description="ONNXRuntime inference session to apply metadata to.")
52
- model_path: Path = Field(description="ONNX model path. The model must exist at this path in the compiler sandbox.")
51
+ session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(description="ONNXRuntime inference session to apply metadata to.", exclude=True)
52
+ model_path: Path = Field(description="ONNX model path. The model must exist at this path in the compiler sandbox.", exclude=True)
53
53
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
54
54
 
55
55
  class LiteRTInferenceMetadata (BaseModel):
@@ -57,8 +57,27 @@ class LiteRTInferenceMetadata (BaseModel):
57
57
  Metadata required to lower PyTorch model for inference with LiteRT (fka TensorFlow Lite).
58
58
  """
59
59
  kind: Literal["meta.inference.litert"] = "meta.inference.litert"
60
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.")
61
- model_args: list[object] = Field(description="Positional inputs to the model.")
60
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
61
+ model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
62
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
63
+
64
+ class OpenVINOInferenceMetadata (BaseModel):
65
+ """
66
+ Metadata required to lower PyTorch model for interence with Intel OpenVINO.
67
+ """
68
+ kind: Literal["meta.inference.openvino"] = "meta.inference.openvino"
69
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
70
+ model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
71
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
72
+
73
+ class QnnInferenceMetadata (BaseModel):
74
+ """
75
+ Metadata required to lower a PyTorch model for inference on Qualcomm accelerators with QNN SDK.
76
+ """
77
+ kind: Literal["meta.inference.qnn"] = "meta.inference.qnn"
78
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
79
+ model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
80
+ backend: Literal["cpu", "gpu"] = Field(default="cpu", description="QNN backend to execute the model.", exclude=True) # CHECK # Add `htp`
62
81
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
63
82
 
64
83
  class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
@@ -66,5 +85,5 @@ class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
66
85
  Metadata required to lower a GGUF model for LLM inference.
67
86
  """
68
87
  kind: Literal["meta.inference.gguf"] = "meta.inference.gguf"
69
- model_path: Path = Field(description="GGUF model path. The model must exist at this path in the compiler sandbox.")
88
+ model_path: Path = Field(description="GGUF model path. The model must exist at this path in the compiler sandbox.", exclude=True)
70
89
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
@@ -9,25 +9,39 @@ from inspect import isasyncgenfunction, iscoroutinefunction
9
9
  from pathlib import Path
10
10
  from pydantic import BaseModel, ConfigDict, Field
11
11
  from types import ModuleType
12
- from typing import Literal
12
+ from typing import Any, Callable, Literal, ParamSpec, TypeVar, cast
13
13
 
14
14
  from .beta import (
15
15
  CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
16
- ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
16
+ ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
17
+ QnnInferenceMetadata
17
18
  )
18
19
  from .sandbox import Sandbox
19
20
  from .types import AccessMode
20
21
 
21
- CompileTarget = Literal["android", "ios", "linux", "macos", "visionos", "wasm", "windows"]
22
+ CompileTarget = Literal[
23
+ "android",
24
+ "ios",
25
+ "linux",
26
+ "macos",
27
+ "visionos",
28
+ "wasm",
29
+ "windows"
30
+ ]
22
31
 
23
32
  CompileMetadata = (
24
33
  CoreMLInferenceMetadata |
25
34
  LiteRTInferenceMetadata |
26
35
  LlamaCppInferenceMetadata |
27
36
  ONNXInferenceMetadata |
28
- ONNXRuntimeInferenceSessionMetadata
37
+ ONNXRuntimeInferenceSessionMetadata |
38
+ OpenVINOInferenceMetadata |
39
+ QnnInferenceMetadata
29
40
  )
30
41
 
42
+ P = ParamSpec("P")
43
+ R = TypeVar("R")
44
+
31
45
  class PredictorSpec (BaseModel):
32
46
  """
33
47
  Descriptor of a predictor to be compiled.
@@ -36,6 +50,7 @@ class PredictorSpec (BaseModel):
36
50
  description: str = Field(description="Predictor description. MUST be less than 100 characters long.", min_length=4, max_length=100)
37
51
  sandbox: Sandbox = Field(description="Sandbox to compile the function.")
38
52
  targets: list[str] | None = Field(description="Targets to compile this predictor for. Pass `None` to compile for our default targets.")
53
+ metadata: list[object] = Field(default=[], description="Metadata to use while compiling the function.")
39
54
  access: AccessMode = Field(description="Predictor access.")
40
55
  card: str | None = Field(default=None, description="Predictor card (markdown).")
41
56
  media: str | None = Field(default=None, description="Predictor media URL.")
@@ -49,13 +64,13 @@ def compile (
49
64
  sandbox: Sandbox=None,
50
65
  trace_modules: list[ModuleType]=[],
51
66
  targets: list[CompileTarget]=None,
52
- access: AccessMode=AccessMode.Private,
53
67
  metadata: list[CompileMetadata]=[],
68
+ access: AccessMode=AccessMode.Private,
54
69
  card: str | Path=None,
55
70
  media: Path=None,
56
71
  license: str=None,
57
72
  **kwargs
58
- ):
73
+ ) -> Callable[[Callable[P, R]], Callable[P, R]]:
59
74
  """
60
75
  Create a predictor by compiling a stateless function.
61
76
 
@@ -65,8 +80,8 @@ def compile (
65
80
  sandbox (Sandbox): Sandbox to compile the function.
66
81
  trace_modules (list): Modules to trace and compile.
67
82
  targets (list): Targets to compile this predictor for. Pass `None` to compile for our default targets.
68
- access (AccessMode): Predictor access.
69
83
  metadata (list): Metadata to use while compiling the function.
84
+ access (AccessMode): Predictor access.
70
85
  card (str | Path): Predictor card markdown string or path to card.
71
86
  media (Path): Predictor thumbnail image (jpeg or png) path.
72
87
  license (str): Predictor license URL. This is required for public predictors.
@@ -96,5 +111,5 @@ def compile (
96
111
  def wrapper (*args, **kwargs):
97
112
  return func(*args, **kwargs)
98
113
  wrapper.__predictor_spec = spec
99
- return wrapper
114
+ return cast(Callable[P, R], wrapper)
100
115
  return decorator
@@ -3,4 +3,4 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- __version__ = "0.0.50"
6
+ __version__ = "0.0.52"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fxn
3
- Version: 0.0.50
3
+ Version: 0.0.52
4
4
  Summary: Run prediction functions locally in Python. Register at https://fxn.ai.
5
5
  Author-email: "NatML Inc." <hi@fxn.ai>
6
6
  License: Apache License
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes