fxn 0.0.52__tar.gz → 0.0.54__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {fxn-0.0.52 → fxn-0.0.54}/PKG-INFO +1 -1
  2. fxn-0.0.54/fxn/beta/__init__.py +13 -0
  3. fxn-0.0.54/fxn/beta/cli/__init__.py +7 -0
  4. fxn-0.0.54/fxn/beta/cli/llm.py +22 -0
  5. fxn-0.0.54/fxn/beta/cli/mcp.py +16 -0
  6. {fxn-0.0.52 → fxn-0.0.54}/fxn/beta/client.py +1 -2
  7. fxn-0.0.54/fxn/beta/llm/__init__.py +5 -0
  8. fxn-0.0.54/fxn/beta/llm/server.py +5 -0
  9. fxn-0.0.54/fxn/beta/metadata.py +189 -0
  10. fxn-0.0.54/fxn/beta/services/__init__.py +7 -0
  11. {fxn-0.0.52/fxn/beta → fxn-0.0.54/fxn/beta/services}/prediction.py +1 -1
  12. {fxn-0.0.52/fxn/beta → fxn-0.0.54/fxn/beta/services}/remote.py +4 -4
  13. fxn-0.0.54/fxn/cli/__init__.py +91 -0
  14. {fxn-0.0.52 → fxn-0.0.54}/fxn/cli/compile.py +2 -0
  15. {fxn-0.0.52 → fxn-0.0.54}/fxn/cli/sources.py +1 -1
  16. {fxn-0.0.52 → fxn-0.0.54}/fxn/compile.py +7 -7
  17. fxn-0.0.54/fxn/lib/linux/arm64/libFunction.so +0 -0
  18. fxn-0.0.54/fxn/lib/linux/x86_64/libFunction.so +0 -0
  19. fxn-0.0.54/fxn/lib/macos/arm64/Function.dylib +0 -0
  20. fxn-0.0.54/fxn/lib/macos/x86_64/Function.dylib +0 -0
  21. {fxn-0.0.52 → fxn-0.0.54}/fxn/lib/windows/arm64/Function.dll +0 -0
  22. fxn-0.0.54/fxn/lib/windows/x86_64/Function.dll +0 -0
  23. {fxn-0.0.52 → fxn-0.0.54}/fxn/sandbox.py +15 -2
  24. {fxn-0.0.52 → fxn-0.0.54}/fxn/services/prediction.py +0 -12
  25. {fxn-0.0.52 → fxn-0.0.54}/fxn/types/__init__.py +1 -1
  26. {fxn-0.0.52 → fxn-0.0.54}/fxn/types/predictor.py +5 -18
  27. {fxn-0.0.52 → fxn-0.0.54}/fxn/version.py +1 -1
  28. {fxn-0.0.52 → fxn-0.0.54}/fxn.egg-info/PKG-INFO +1 -1
  29. {fxn-0.0.52 → fxn-0.0.54}/fxn.egg-info/SOURCES.txt +8 -2
  30. fxn-0.0.52/fxn/beta/__init__.py +0 -11
  31. fxn-0.0.52/fxn/beta/metadata.py +0 -89
  32. fxn-0.0.52/fxn/cli/__init__.py +0 -51
  33. fxn-0.0.52/fxn/lib/linux/arm64/libFunction.so +0 -0
  34. fxn-0.0.52/fxn/lib/linux/x86_64/libFunction.so +0 -0
  35. fxn-0.0.52/fxn/lib/macos/arm64/Function.dylib +0 -0
  36. fxn-0.0.52/fxn/lib/macos/x86_64/Function.dylib +0 -0
  37. fxn-0.0.52/fxn/lib/windows/x86_64/Function.dll +0 -0
  38. {fxn-0.0.52 → fxn-0.0.54}/LICENSE +0 -0
  39. {fxn-0.0.52 → fxn-0.0.54}/README.md +0 -0
  40. {fxn-0.0.52 → fxn-0.0.54}/fxn/__init__.py +0 -0
  41. {fxn-0.0.52 → fxn-0.0.54}/fxn/c/__init__.py +0 -0
  42. {fxn-0.0.52 → fxn-0.0.54}/fxn/c/configuration.py +0 -0
  43. {fxn-0.0.52 → fxn-0.0.54}/fxn/c/fxnc.py +0 -0
  44. {fxn-0.0.52 → fxn-0.0.54}/fxn/c/map.py +0 -0
  45. {fxn-0.0.52 → fxn-0.0.54}/fxn/c/prediction.py +0 -0
  46. {fxn-0.0.52 → fxn-0.0.54}/fxn/c/predictor.py +0 -0
  47. {fxn-0.0.52 → fxn-0.0.54}/fxn/c/stream.py +0 -0
  48. {fxn-0.0.52 → fxn-0.0.54}/fxn/c/value.py +0 -0
  49. {fxn-0.0.52 → fxn-0.0.54}/fxn/cli/auth.py +0 -0
  50. {fxn-0.0.52 → fxn-0.0.54}/fxn/cli/misc.py +0 -0
  51. {fxn-0.0.52 → fxn-0.0.54}/fxn/cli/predictions.py +0 -0
  52. {fxn-0.0.52 → fxn-0.0.54}/fxn/cli/predictors.py +0 -0
  53. {fxn-0.0.52 → fxn-0.0.54}/fxn/client.py +0 -0
  54. {fxn-0.0.52 → fxn-0.0.54}/fxn/function.py +0 -0
  55. {fxn-0.0.52 → fxn-0.0.54}/fxn/lib/__init__.py +0 -0
  56. {fxn-0.0.52 → fxn-0.0.54}/fxn/logging.py +0 -0
  57. {fxn-0.0.52 → fxn-0.0.54}/fxn/services/__init__.py +0 -0
  58. {fxn-0.0.52 → fxn-0.0.54}/fxn/services/predictor.py +0 -0
  59. {fxn-0.0.52 → fxn-0.0.54}/fxn/services/user.py +0 -0
  60. {fxn-0.0.52 → fxn-0.0.54}/fxn/types/dtype.py +0 -0
  61. {fxn-0.0.52 → fxn-0.0.54}/fxn/types/prediction.py +0 -0
  62. {fxn-0.0.52 → fxn-0.0.54}/fxn/types/user.py +0 -0
  63. {fxn-0.0.52 → fxn-0.0.54}/fxn.egg-info/dependency_links.txt +0 -0
  64. {fxn-0.0.52 → fxn-0.0.54}/fxn.egg-info/entry_points.txt +0 -0
  65. {fxn-0.0.52 → fxn-0.0.54}/fxn.egg-info/requires.txt +0 -0
  66. {fxn-0.0.52 → fxn-0.0.54}/fxn.egg-info/top_level.txt +0 -0
  67. {fxn-0.0.52 → fxn-0.0.54}/pyproject.toml +0 -0
  68. {fxn-0.0.52 → fxn-0.0.54}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fxn
3
- Version: 0.0.52
3
+ Version: 0.0.54
4
4
  Summary: Run prediction functions locally in Python. Register at https://fxn.ai.
5
5
  Author-email: "NatML Inc." <hi@fxn.ai>
6
6
  License: Apache License
@@ -0,0 +1,13 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from .metadata import (
7
+ CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
8
+ OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
9
+ QnnInferenceMetadata, QnnInferenceBackend, QnnInferenceQuantization,
10
+ # Deprecated
11
+ ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
12
+ )
13
+ from .services import RemoteAcceleration
@@ -0,0 +1,7 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from .llm import app as llm_app
7
+ from .mcp import app as mcp_app
@@ -0,0 +1,22 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from pathlib import Path
7
+ from typer import Argument, Option, Typer
8
+ from typing_extensions import Annotated
9
+
10
+ app = Typer(no_args_is_help=True)
11
+
12
+ @app.command(name="chat", help="Start a chat session.")
13
+ def chat (
14
+ model: Annotated[str, Argument(help="Model to chat with.")]
15
+ ):
16
+ pass
17
+
18
+ @app.command(name="serve", help="Start an LLM server.")
19
+ def serve (
20
+ port: Annotated[int, Option(help="Port to start the server on.")] = 11435
21
+ ):
22
+ pass
@@ -0,0 +1,16 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from pathlib import Path
7
+ from typer import Argument, Option, Typer
8
+ from typing_extensions import Annotated
9
+
10
+ app = Typer(no_args_is_help=True)
11
+
12
+ @app.command(name="serve", help="Start an MCP server.")
13
+ def serve (
14
+ port: Annotated[int, Option(help="Port to start the server on.")] = 11436
15
+ ):
16
+ pass
@@ -10,8 +10,7 @@ from typing import get_origin, Callable, Generator, Iterator, TypeVar
10
10
  from ..client import FunctionClient
11
11
  from ..services import PredictionService as EdgePredictionService
12
12
  from ..types import Acceleration
13
- from .prediction import PredictionService
14
- from .remote import RemoteAcceleration
13
+ from .services import PredictionService, RemoteAcceleration
15
14
 
16
15
  F = TypeVar("F", bound=Callable[..., object])
17
16
 
@@ -0,0 +1,5 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
@@ -0,0 +1,5 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
@@ -0,0 +1,189 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from pathlib import Path
7
+ from pydantic import BaseModel, BeforeValidator, ConfigDict, Field
8
+ from typing import Annotated, Literal
9
+
10
+ def _validate_torch_module (module: "torch.nn.Module") -> "torch.nn.Module": # type: ignore
11
+ try:
12
+ from torch.nn import Module
13
+ if not isinstance(module, Module):
14
+ raise ValueError(f"Expected `torch.nn.Module` model but got `{type(module).__qualname__}`")
15
+ return module
16
+ except ImportError:
17
+ raise ImportError("PyTorch is required to create this metadata but it is not installed.")
18
+
19
+ def _validate_ort_inference_session (session: "onnxruntime.InferenceSession") -> "onnxruntime.InferenceSession": # type: ignore
20
+ try:
21
+ from onnxruntime import InferenceSession
22
+ if not isinstance(session, InferenceSession):
23
+ raise ValueError(f"Expected `onnxruntime.InferenceSession` model but got `{type(session).__qualname__}`")
24
+ return session
25
+ except ImportError:
26
+ raise ImportError("ONNXRuntime is required to create this metadata but it is not installed.")
27
+
28
+ def _validate_torch_tensor_args (args: list) -> list:
29
+ try:
30
+ from torch import Tensor
31
+ for idx, arg in enumerate(args):
32
+ if not isinstance(arg, Tensor):
33
+ raise ValueError(f"Expected `torch.Tensor` instance at `model_args[{idx}]` but got `{type(arg).__qualname__}`")
34
+ return args
35
+ except ImportError:
36
+ raise ImportError("PyTorch is required to create this metadata but it is not installed.")
37
+
38
+ def _validate_llama_cpp_model (model: "llama_cpp.llama.Llama") -> "llama_cpp.llama.Llama": # type: ignore
39
+ try:
40
+ from llama_cpp import Llama
41
+ if not isinstance(model, Llama):
42
+ raise ValueError(f"Expected `llama_cpp.llama.Llama` model but got `{type(model).__qualname__}`")
43
+ return model
44
+ except ImportError:
45
+ raise ImportError("Llama-cpp-python is required to create this metadata but it is not installed.")
46
+
47
+ class CoreMLInferenceMetadata (BaseModel):
48
+ """
49
+ Metadata required to lower a PyTorch model for inference on iOS, macOS, and visionOS with CoreML.
50
+
51
+ Members:
52
+ model (torch.nn.Module): PyTorch module to apply metadata to.
53
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
54
+ """
55
+ kind: Literal["meta.inference.coreml"] = "meta.inference.coreml"
56
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
57
+ description="PyTorch module to apply metadata to.",
58
+ exclude=True
59
+ )
60
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
61
+ description="Positional inputs to the model.",
62
+ exclude=True
63
+ )
64
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
65
+
66
+ class OnnxInferenceMetadata (BaseModel):
67
+ """
68
+ Metadata required to lower a PyTorch model for inference.
69
+
70
+ Members:
71
+ model (torch.nn.Module): PyTorch module to apply metadata to.
72
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
73
+ """
74
+ kind: Literal["meta.inference.onnx"] = "meta.inference.onnx"
75
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
76
+ description="PyTorch module to apply metadata to.",
77
+ exclude=True
78
+ )
79
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
80
+ description="Positional inputs to the model.",
81
+ exclude=True
82
+ )
83
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
84
+
85
+ class OnnxRuntimeInferenceSessionMetadata (BaseModel):
86
+ """
87
+ Metadata required to lower an ONNXRuntime `InferenceSession` for inference.
88
+
89
+ Members:
90
+ session (onnxruntime.InferenceSession): ONNXRuntime inference session to apply metadata to.
91
+ model_path (str | Path): ONNX model path. The model must exist at this path in the compiler sandbox.
92
+ """
93
+ kind: Literal["meta.inference.onnxruntime"] = "meta.inference.onnxruntime"
94
+ session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(
95
+ description="ONNXRuntime inference session to apply metadata to.",
96
+ exclude=True
97
+ )
98
+ model_path: str | Path = Field(
99
+ description="ONNX model path. The model must exist at this path in the compiler sandbox.",
100
+ exclude=True
101
+ )
102
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
103
+
104
+ class LiteRTInferenceMetadata (BaseModel):
105
+ """
106
+ Metadata required to lower PyTorch model for inference with LiteRT (fka TensorFlow Lite).
107
+
108
+ Members:
109
+ model (torch.nn.Module): PyTorch module to apply metadata to.
110
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
111
+ """
112
+ kind: Literal["meta.inference.litert"] = "meta.inference.litert"
113
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
114
+ description="PyTorch module to apply metadata to.",
115
+ exclude=True
116
+ )
117
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
118
+ description="Positional inputs to the model.",
119
+ exclude=True
120
+ )
121
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
122
+
123
+ class OpenVINOInferenceMetadata (BaseModel):
124
+ """
125
+ Metadata required to lower PyTorch model for interence with Intel OpenVINO.
126
+
127
+ Members:
128
+ model (torch.nn.Module): PyTorch module to apply metadata to.
129
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
130
+ """
131
+ kind: Literal["meta.inference.openvino"] = "meta.inference.openvino"
132
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
133
+ description="PyTorch module to apply metadata to.",
134
+ exclude=True
135
+ )
136
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
137
+ description="Positional inputs to the model.",
138
+ exclude=True
139
+ )
140
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
141
+
142
+ QnnInferenceBackend = Literal["cpu", "gpu", "htp"]
143
+ QnnInferenceQuantization = Literal["w8a8", "w8a16", "w4a8", "w4a16"]
144
+
145
+ class QnnInferenceMetadata (BaseModel):
146
+ """
147
+ Metadata required to lower a PyTorch model for inference on Qualcomm accelerators with QNN SDK.
148
+
149
+ Members:
150
+ model (torch.nn.Module): PyTorch module to apply metadata to.
151
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
152
+ backend (QnnInferenceBackend): QNN inference backend. Defaults to `cpu`.
153
+ quantization (QnnInferenceQuantization): QNN model quantization mode. This MUST only be specified when backend is `htp`.
154
+ """
155
+ kind: Literal["meta.inference.qnn"] = "meta.inference.qnn"
156
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
157
+ description="PyTorch module to apply metadata to.",
158
+ exclude=True
159
+ )
160
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
161
+ description="Positional inputs to the model.",
162
+ exclude=True
163
+ )
164
+ backend: QnnInferenceBackend = Field(
165
+ default="cpu",
166
+ description="QNN backend to execute the model.",
167
+ exclude=True
168
+ )
169
+ quantization: QnnInferenceQuantization | None = Field(
170
+ default=None,
171
+ description="QNN model quantization mode. This MUST only be specified when backend is `htp`.",
172
+ exclude=True
173
+ )
174
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
175
+
176
+ class LlamaCppInferenceMetadata (BaseModel):
177
+ """
178
+ Metadata required to lower a Llama.cpp model for LLM inference.
179
+ """
180
+ kind: Literal["meta.inference.llama_cpp"] = "meta.inference.llama_cpp"
181
+ model: Annotated[object, BeforeValidator(_validate_llama_cpp_model)] = Field(
182
+ description="Llama model that metadata applies to.",
183
+ exclude=True
184
+ )
185
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
186
+
187
+ # DEPRECATED
188
+ ONNXInferenceMetadata = OnnxInferenceMetadata
189
+ ONNXRuntimeInferenceSessionMetadata = OnnxRuntimeInferenceSessionMetadata
@@ -0,0 +1,7 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from .prediction import PredictionService
7
+ from .remote import RemoteAcceleration
@@ -3,7 +3,7 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- from ..client import FunctionClient
6
+ from ...client import FunctionClient
7
7
  from .remote import RemotePredictionService
8
8
 
9
9
  class PredictionService:
@@ -15,10 +15,10 @@ from requests import get, put
15
15
  from typing import Literal
16
16
  from urllib.request import urlopen
17
17
 
18
- from ..c import Configuration
19
- from ..client import FunctionClient
20
- from ..services import Value
21
- from ..types import Dtype, Prediction
18
+ from ...c import Configuration
19
+ from ...client import FunctionClient
20
+ from ...services.prediction import Value
21
+ from ...types import Dtype, Prediction
22
22
 
23
23
  RemoteAcceleration = Literal["auto", "cpu", "a40", "a100"]
24
24
 
@@ -0,0 +1,91 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ import typer
7
+
8
+ from ..logging import TracebackMarkupConsole
9
+ from ..version import __version__
10
+
11
+ from .auth import app as auth_app
12
+ from .compile import compile_predictor
13
+ from .misc import cli_options
14
+ from .predictions import create_prediction
15
+ from .predictors import archive_predictor, delete_predictor, retrieve_predictor
16
+ from .sources import retrieve_source
17
+ from ..beta.cli import llm_app, mcp_app
18
+
19
+ # Define CLI
20
+ typer.main.console_stderr = TracebackMarkupConsole()
21
+ app = typer.Typer(
22
+ name=f"Function CLI {__version__}",
23
+ no_args_is_help=True,
24
+ pretty_exceptions_show_locals=False,
25
+ pretty_exceptions_short=True,
26
+ add_completion=False
27
+ )
28
+
29
+ # Add top level options
30
+ app.callback()(cli_options)
31
+
32
+ # Predictions
33
+ app.command(
34
+ name="predict",
35
+ help="Make a prediction.",
36
+ context_settings={ "allow_extra_args": True, "ignore_unknown_options": True },
37
+ rich_help_panel="Predictions"
38
+ )(create_prediction)
39
+ app.command(
40
+ name="source",
41
+ help="Retrieve the generated C++ code for a given prediction.",
42
+ rich_help_panel="Predictions"
43
+ )(retrieve_source)
44
+
45
+ # Predictors
46
+ app.command(
47
+ name="compile",
48
+ help="Create a predictor by compiling a Python function.",
49
+ rich_help_panel="Predictors"
50
+ )(compile_predictor)
51
+ app.command(
52
+ name="retrieve",
53
+ help="Retrieve a predictor.",
54
+ rich_help_panel="Predictors"
55
+ )(retrieve_predictor)
56
+ app.command(
57
+ name="archive",
58
+ help="Archive a predictor." ,
59
+ rich_help_panel="Predictors"
60
+ )(archive_predictor)
61
+ app.command(
62
+ name="delete",
63
+ help="Delete a predictor.",
64
+ rich_help_panel="Predictors"
65
+ )(delete_predictor)
66
+
67
+ # Subcommands
68
+ app.add_typer(
69
+ auth_app,
70
+ name="auth",
71
+ help="Login, logout, and check your authentication status.",
72
+ rich_help_panel="Auth"
73
+ )
74
+ app.add_typer(
75
+ llm_app,
76
+ name="llm",
77
+ hidden=True,
78
+ help="Work with large language models (LLMs).",
79
+ rich_help_panel="Beta"
80
+ )
81
+ app.add_typer(
82
+ mcp_app,
83
+ name="mcp",
84
+ hidden=True,
85
+ help="Provide prediction functions as tools for use by AI assistants.",
86
+ rich_help_panel="Beta"
87
+ )
88
+
89
+ # Run
90
+ if __name__ == "__main__":
91
+ app()
@@ -86,6 +86,8 @@ def _load_predictor_func (path: str) -> Callable[...,object]:
86
86
  if "" not in sys.path:
87
87
  sys.path.insert(0, "")
88
88
  path: Path = Path(path).resolve()
89
+ if not path.exists():
90
+ raise ValueError(f"Cannot compile predictor because no Python module exists at the given path.")
89
91
  sys.path.insert(0, str(path.parent))
90
92
  name = getmodulename(path)
91
93
  spec = spec_from_file_location(name, path)
@@ -15,8 +15,8 @@ from ..logging import CustomProgress, CustomProgressTask
15
15
  from .auth import get_access_key
16
16
 
17
17
  def retrieve_source (
18
- predictor: Annotated[str, Option(help="Predictor tag.")] = None,
19
18
  prediction: Annotated[str, Option(help="Prediction identifier. If specified, this MUST be from a prediction returned by the Function API.")] = None,
19
+ predictor: Annotated[str, Option(help="Predictor tag. If specified, a prediction will be made with this predictor before retrieving the source.")] = None,
20
20
  output: Annotated[Path, Option(help="Path to output source file.")] = Path("predictor.cpp")
21
21
  ):
22
22
  if not ((predictor is not None) ^ (prediction is not None)):
@@ -13,11 +13,11 @@ from typing import Any, Callable, Literal, ParamSpec, TypeVar, cast
13
13
 
14
14
  from .beta import (
15
15
  CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
16
- ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
16
+ OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
17
17
  QnnInferenceMetadata
18
18
  )
19
19
  from .sandbox import Sandbox
20
- from .types import AccessMode
20
+ from .types import PredictorAccess
21
21
 
22
22
  CompileTarget = Literal[
23
23
  "android",
@@ -33,8 +33,8 @@ CompileMetadata = (
33
33
  CoreMLInferenceMetadata |
34
34
  LiteRTInferenceMetadata |
35
35
  LlamaCppInferenceMetadata |
36
- ONNXInferenceMetadata |
37
- ONNXRuntimeInferenceSessionMetadata |
36
+ OnnxInferenceMetadata |
37
+ OnnxRuntimeInferenceSessionMetadata |
38
38
  OpenVINOInferenceMetadata |
39
39
  QnnInferenceMetadata
40
40
  )
@@ -51,7 +51,7 @@ class PredictorSpec (BaseModel):
51
51
  sandbox: Sandbox = Field(description="Sandbox to compile the function.")
52
52
  targets: list[str] | None = Field(description="Targets to compile this predictor for. Pass `None` to compile for our default targets.")
53
53
  metadata: list[object] = Field(default=[], description="Metadata to use while compiling the function.")
54
- access: AccessMode = Field(description="Predictor access.")
54
+ access: PredictorAccess = Field(description="Predictor access.")
55
55
  card: str | None = Field(default=None, description="Predictor card (markdown).")
56
56
  media: str | None = Field(default=None, description="Predictor media URL.")
57
57
  license: str | None = Field(default=None, description="Predictor license URL. This is required for public predictors.")
@@ -65,7 +65,7 @@ def compile (
65
65
  trace_modules: list[ModuleType]=[],
66
66
  targets: list[CompileTarget]=None,
67
67
  metadata: list[CompileMetadata]=[],
68
- access: AccessMode=AccessMode.Private,
68
+ access: PredictorAccess="private",
69
69
  card: str | Path=None,
70
70
  media: Path=None,
71
71
  license: str=None,
@@ -81,7 +81,7 @@ def compile (
81
81
  trace_modules (list): Modules to trace and compile.
82
82
  targets (list): Targets to compile this predictor for. Pass `None` to compile for our default targets.
83
83
  metadata (list): Metadata to use while compiling the function.
84
- access (AccessMode): Predictor access.
84
+ access (PredictorAccess): Predictor access.
85
85
  card (str | Path): Predictor card markdown string or path to card.
86
86
  media (Path): Predictor thumbnail image (jpeg or png) path.
87
87
  license (str): Predictor license URL. This is required for public predictors.
@@ -57,6 +57,8 @@ class EntrypointCommand (UploadableCommand):
57
57
  class PipInstallCommand (BaseModel):
58
58
  kind: Literal["pip_install"] = "pip_install"
59
59
  packages: list[str]
60
+ index_url: str | None
61
+ flags: str
60
62
 
61
63
  class AptInstallCommand (BaseModel):
62
64
  kind: Literal["apt_install"] = "apt_install"
@@ -133,14 +135,25 @@ class Sandbox (BaseModel):
133
135
  )
134
136
  return Sandbox(commands=self.commands + [command])
135
137
 
136
- def pip_install (self, *packages: str) -> Sandbox:
138
+ def pip_install (
139
+ self,
140
+ *packages: str,
141
+ index_url: str=None,
142
+ flags: str=""
143
+ ) -> Sandbox:
137
144
  """
138
145
  Install Python packages in the sandbox.
139
146
 
140
147
  Parameters:
141
148
  packages (list): Packages to install.
149
+ index_url (str | None): Index URL to search for package.
150
+ flags (str): Additional flags to pass to `pip`.
142
151
  """
143
- command = PipInstallCommand(packages=packages)
152
+ command = PipInstallCommand(
153
+ packages=packages,
154
+ index_url=index_url,
155
+ flags=flags
156
+ )
144
157
  return Sandbox(commands=self.commands + [command])
145
158
 
146
159
  def apt_install (self, *packages: str) -> Sandbox:
@@ -43,18 +43,6 @@ class PredictionService:
43
43
  self.__cache_dir = self.__class__.__get_home_dir() / ".fxn" / "cache"
44
44
  self.__cache_dir.mkdir(parents=True, exist_ok=True)
45
45
 
46
- def ready (self, tag: str, **kwargs) -> bool:
47
- """
48
- Check whether a predictor has been preloaded and is ready to make predictions.
49
-
50
- Parameters:
51
- tag (str): Predictor tag.
52
-
53
- Returns:
54
- bool: Whether the predictor is ready to make predictions.
55
- """
56
- return tag in self.__cache
57
-
58
46
  def create (
59
47
  self,
60
48
  tag: str,
@@ -5,5 +5,5 @@
5
5
 
6
6
  from .dtype import Dtype
7
7
  from .prediction import Acceleration, Prediction, PredictionResource
8
- from .predictor import AccessMode, EnumerationMember, Parameter, Predictor, PredictorStatus, Signature
8
+ from .predictor import EnumerationMember, Parameter, Predictor, PredictorAccess, PredictorStatus, Signature
9
9
  from .user import User
@@ -3,28 +3,15 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- from enum import Enum
7
6
  from pydantic import AliasChoices, BaseModel, ConfigDict, Field
8
- from typing import Any
7
+ from typing import Any, Literal
9
8
 
10
9
  from .dtype import Dtype
11
10
  from .user import User
12
11
 
13
- class AccessMode (str, Enum):
14
- """
15
- Predictor access mode.
16
- """
17
- Public = "PUBLIC"
18
- Private = "PRIVATE"
12
+ PredictorAccess = Literal["public", "private", "unlisted"]
19
13
 
20
- class PredictorStatus (str, Enum):
21
- """
22
- Predictor status.
23
- """
24
- Compiling = "COMPILING"
25
- Active = "ACTIVE"
26
- Invalid = "INVALID"
27
- Archived = "ARCHIVED"
14
+ PredictorStatus = Literal["compiling", "active", "archived"]
28
15
 
29
16
  class EnumerationMember (BaseModel):
30
17
  """
@@ -79,7 +66,7 @@ class Predictor (BaseModel):
79
66
  owner (User): Predictor owner.
80
67
  name (str): Predictor name.
81
68
  status (PredictorStatus): Predictor status.
82
- access (AccessMode): Predictor access.
69
+ access (PredictorAccess): Predictor access.
83
70
  signature (Signature): Predictor signature.
84
71
  created (str): Date created.
85
72
  description (str): Predictor description.
@@ -91,7 +78,7 @@ class Predictor (BaseModel):
91
78
  owner: User = Field(description="Predictor owner.")
92
79
  name: str = Field(description="Predictor name.")
93
80
  status: PredictorStatus = Field(description="Predictor status.")
94
- access: AccessMode = Field(description="Predictor access.")
81
+ access: PredictorAccess = Field(description="Predictor access.")
95
82
  signature: Signature = Field(description="Predictor signature.")
96
83
  created: str = Field(description="Date created.")
97
84
  description: str | None = Field(default=None, description="Predictor description.")
@@ -3,4 +3,4 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- __version__ = "0.0.52"
6
+ __version__ = "0.0.54"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fxn
3
- Version: 0.0.52
3
+ Version: 0.0.54
4
4
  Summary: Run prediction functions locally in Python. Register at https://fxn.ai.
5
5
  Author-email: "NatML Inc." <hi@fxn.ai>
6
6
  License: Apache License
@@ -17,8 +17,14 @@ fxn.egg-info/top_level.txt
17
17
  fxn/beta/__init__.py
18
18
  fxn/beta/client.py
19
19
  fxn/beta/metadata.py
20
- fxn/beta/prediction.py
21
- fxn/beta/remote.py
20
+ fxn/beta/cli/__init__.py
21
+ fxn/beta/cli/llm.py
22
+ fxn/beta/cli/mcp.py
23
+ fxn/beta/llm/__init__.py
24
+ fxn/beta/llm/server.py
25
+ fxn/beta/services/__init__.py
26
+ fxn/beta/services/prediction.py
27
+ fxn/beta/services/remote.py
22
28
  fxn/c/__init__.py
23
29
  fxn/c/configuration.py
24
30
  fxn/c/fxnc.py
@@ -1,11 +0,0 @@
1
- #
2
- # Function
3
- # Copyright © 2025 NatML Inc. All Rights Reserved.
4
- #
5
-
6
- from .metadata import (
7
- CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
8
- ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
9
- QnnInferenceMetadata
10
- )
11
- from .remote import RemoteAcceleration
@@ -1,89 +0,0 @@
1
- #
2
- # Function
3
- # Copyright © 2025 NatML Inc. All Rights Reserved.
4
- #
5
-
6
- from pathlib import Path
7
- from pydantic import BaseModel, BeforeValidator, ConfigDict, Field
8
- from typing import Annotated, Literal
9
-
10
- def _validate_torch_module (module: "torch.nn.Module") -> "torch.nn.Module": # type: ignore
11
- try:
12
- from torch.nn import Module # type: ignore
13
- if not isinstance(module, Module):
14
- raise ValueError(f"Expected torch.nn.Module, got {type(module)}")
15
- return module
16
- except ImportError:
17
- raise ImportError("PyTorch is required to create this metadata but is not installed.")
18
-
19
- def _validate_ort_inference_session (session: "onnxruntime.InferenceSession") -> "onnxruntime.InferenceSession": # type: ignore
20
- try:
21
- from onnxruntime import InferenceSession # type: ignore
22
- if not isinstance(session, InferenceSession):
23
- raise ValueError(f"Expected onnxruntime.InferenceSession, got {type(session)}")
24
- return session
25
- except ImportError:
26
- raise ImportError("ONNXRuntime is required to create this metadata but is not installed.")
27
-
28
- class CoreMLInferenceMetadata (BaseModel):
29
- """
30
- Metadata required to lower a PyTorch model for inference on iOS, macOS, and visionOS with CoreML.
31
- """
32
- kind: Literal["meta.inference.coreml"] = "meta.inference.coreml"
33
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
34
- model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
35
- model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
36
-
37
- class ONNXInferenceMetadata (BaseModel):
38
- """
39
- Metadata required to lower a PyTorch model for inference.
40
- """
41
- kind: Literal["meta.inference.onnx"] = "meta.inference.onnx"
42
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
43
- model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
44
- model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
45
-
46
- class ONNXRuntimeInferenceSessionMetadata (BaseModel):
47
- """
48
- Metadata required to lower an ONNXRuntime `InferenceSession` for inference.
49
- """
50
- kind: Literal["meta.inference.onnxruntime"] = "meta.inference.onnxruntime"
51
- session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(description="ONNXRuntime inference session to apply metadata to.", exclude=True)
52
- model_path: Path = Field(description="ONNX model path. The model must exist at this path in the compiler sandbox.", exclude=True)
53
- model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
54
-
55
- class LiteRTInferenceMetadata (BaseModel):
56
- """
57
- Metadata required to lower PyTorch model for inference with LiteRT (fka TensorFlow Lite).
58
- """
59
- kind: Literal["meta.inference.litert"] = "meta.inference.litert"
60
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
61
- model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
62
- model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
63
-
64
- class OpenVINOInferenceMetadata (BaseModel):
65
- """
66
- Metadata required to lower PyTorch model for interence with Intel OpenVINO.
67
- """
68
- kind: Literal["meta.inference.openvino"] = "meta.inference.openvino"
69
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
70
- model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
71
- model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
72
-
73
- class QnnInferenceMetadata (BaseModel):
74
- """
75
- Metadata required to lower a PyTorch model for inference on Qualcomm accelerators with QNN SDK.
76
- """
77
- kind: Literal["meta.inference.qnn"] = "meta.inference.qnn"
78
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
79
- model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
80
- backend: Literal["cpu", "gpu"] = Field(default="cpu", description="QNN backend to execute the model.", exclude=True) # CHECK # Add `htp`
81
- model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
82
-
83
- class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
84
- """
85
- Metadata required to lower a GGUF model for LLM inference.
86
- """
87
- kind: Literal["meta.inference.gguf"] = "meta.inference.gguf"
88
- model_path: Path = Field(description="GGUF model path. The model must exist at this path in the compiler sandbox.", exclude=True)
89
- model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
@@ -1,51 +0,0 @@
1
- #
2
- # Function
3
- # Copyright © 2025 NatML Inc. All Rights Reserved.
4
- #
5
-
6
- import typer
7
-
8
- from ..logging import TracebackMarkupConsole
9
- from ..version import __version__
10
-
11
- from .auth import app as auth_app
12
- from .compile import compile_predictor
13
- from .misc import cli_options
14
- from .predictions import create_prediction
15
- from .predictors import archive_predictor, delete_predictor, retrieve_predictor
16
- from .sources import retrieve_source
17
-
18
- # Define CLI
19
- typer.main.console_stderr = TracebackMarkupConsole()
20
- app = typer.Typer(
21
- name=f"Function CLI {__version__}",
22
- no_args_is_help=True,
23
- pretty_exceptions_show_locals=False,
24
- pretty_exceptions_short=True,
25
- add_completion=False
26
- )
27
-
28
- # Add top level options
29
- app.callback()(cli_options)
30
-
31
- # Add subcommands
32
- app.add_typer(auth_app, name="auth", help="Login, logout, and check your authentication status.")
33
-
34
- # Add top-level commands
35
- app.command(
36
- name="predict",
37
- help="Make a prediction.",
38
- context_settings={ "allow_extra_args": True, "ignore_unknown_options": True }
39
- )(create_prediction)
40
- app.command(
41
- name="compile",
42
- help="Create a predictor by compiling a Python function."
43
- )(compile_predictor)
44
- app.command(name="retrieve", help="Retrieve a predictor.")(retrieve_predictor)
45
- app.command(name="archive", help="Archive a predictor.")(archive_predictor)
46
- app.command(name="delete", help="Delete a predictor.")(delete_predictor)
47
- app.command(name="source", help="Retrieve the native source code for a given prediction.")(retrieve_source)
48
-
49
- # Run
50
- if __name__ == "__main__":
51
- app()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes