fxn 0.0.53__py3-none-any.whl → 0.0.55__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fxn/beta/__init__.py CHANGED
@@ -6,8 +6,9 @@
6
6
  from .metadata import (
7
7
  CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
8
8
  OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
9
- QnnInferenceMetadata, QnnInferenceBackend, QnnInferenceQuantization,
10
- # Deprecated
11
- ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
9
+ QnnInferenceMetadata, QnnInferenceBackend, QnnInferenceQuantization, TensorRTInferenceMetadata
12
10
  )
13
- from .services import RemoteAcceleration
11
+ from .services import RemoteAcceleration
12
+
13
+ # DEPRECATED
14
+ from .metadata import ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
fxn/beta/cli/__init__.py CHANGED
@@ -3,4 +3,5 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- from .llm import app as llm_app
6
+ from .llm import app as llm_app
7
+ from .mcp import app as mcp_app
fxn/beta/cli/mcp.py ADDED
@@ -0,0 +1,16 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from pathlib import Path
7
+ from typer import Argument, Option, Typer
8
+ from typing_extensions import Annotated
9
+
10
+ app = Typer(no_args_is_help=True)
11
+
12
+ @app.command(name="serve", help="Start an MCP server.")
13
+ def serve (
14
+ port: Annotated[int, Option(help="Port to start the server on.")] = 11436
15
+ ):
16
+ pass
fxn/beta/metadata.py CHANGED
@@ -3,28 +3,46 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- from os import PathLike
7
6
  from pathlib import Path
8
7
  from pydantic import BaseModel, BeforeValidator, ConfigDict, Field
9
8
  from typing import Annotated, Literal
10
9
 
11
10
  def _validate_torch_module (module: "torch.nn.Module") -> "torch.nn.Module": # type: ignore
12
11
  try:
13
- from torch.nn import Module # type: ignore
12
+ from torch.nn import Module
14
13
  if not isinstance(module, Module):
15
- raise ValueError(f"Expected torch.nn.Module, got {type(module)}")
14
+ raise ValueError(f"Expected `torch.nn.Module` model but got `{type(module).__qualname__}`")
16
15
  return module
17
16
  except ImportError:
18
- raise ImportError("PyTorch is required to create this metadata but is not installed.")
17
+ raise ImportError("PyTorch is required to create this metadata but it is not installed.")
19
18
 
20
19
  def _validate_ort_inference_session (session: "onnxruntime.InferenceSession") -> "onnxruntime.InferenceSession": # type: ignore
21
20
  try:
22
- from onnxruntime import InferenceSession # type: ignore
21
+ from onnxruntime import InferenceSession
23
22
  if not isinstance(session, InferenceSession):
24
- raise ValueError(f"Expected onnxruntime.InferenceSession, got {type(session)}")
23
+ raise ValueError(f"Expected `onnxruntime.InferenceSession` model but got `{type(session).__qualname__}`")
25
24
  return session
26
25
  except ImportError:
27
- raise ImportError("ONNXRuntime is required to create this metadata but is not installed.")
26
+ raise ImportError("ONNXRuntime is required to create this metadata but it is not installed.")
27
+
28
+ def _validate_torch_tensor_args (args: list) -> list:
29
+ try:
30
+ from torch import Tensor
31
+ for idx, arg in enumerate(args):
32
+ if not isinstance(arg, Tensor):
33
+ raise ValueError(f"Expected `torch.Tensor` instance at `model_args[{idx}]` but got `{type(arg).__qualname__}`")
34
+ return args
35
+ except ImportError:
36
+ raise ImportError("PyTorch is required to create this metadata but it is not installed.")
37
+
38
+ def _validate_llama_cpp_model (model: "llama_cpp.llama.Llama") -> "llama_cpp.llama.Llama": # type: ignore
39
+ try:
40
+ from llama_cpp import Llama
41
+ if not isinstance(model, Llama):
42
+ raise ValueError(f"Expected `llama_cpp.llama.Llama` model but got `{type(model).__qualname__}`")
43
+ return model
44
+ except ImportError:
45
+ raise ImportError("Llama-cpp-python is required to create this metadata but it is not installed.")
28
46
 
29
47
  class CoreMLInferenceMetadata (BaseModel):
30
48
  """
@@ -39,10 +57,15 @@ class CoreMLInferenceMetadata (BaseModel):
39
57
  description="PyTorch module to apply metadata to.",
40
58
  exclude=True
41
59
  )
42
- model_args: list[object] = Field(
60
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
43
61
  description="Positional inputs to the model.",
44
62
  exclude=True
45
63
  )
64
+ output_keys: list[str] | None = Field(
65
+ default=None,
66
+ description="Model output dictionary keys. Use this if the model returns a dictionary.",
67
+ exclude=True
68
+ )
46
69
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
47
70
 
48
71
  class OnnxInferenceMetadata (BaseModel):
@@ -58,10 +81,15 @@ class OnnxInferenceMetadata (BaseModel):
58
81
  description="PyTorch module to apply metadata to.",
59
82
  exclude=True
60
83
  )
61
- model_args: list[object] = Field(
84
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
62
85
  description="Positional inputs to the model.",
63
86
  exclude=True
64
87
  )
88
+ output_keys: list[str] | None = Field(
89
+ default=None,
90
+ description="Model output dictionary keys. Use this if the model returns a dictionary.",
91
+ exclude=True
92
+ )
65
93
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
66
94
 
67
95
  class OnnxRuntimeInferenceSessionMetadata (BaseModel):
@@ -96,10 +124,15 @@ class LiteRTInferenceMetadata (BaseModel):
96
124
  description="PyTorch module to apply metadata to.",
97
125
  exclude=True
98
126
  )
99
- model_args: list[object] = Field(
127
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
100
128
  description="Positional inputs to the model.",
101
129
  exclude=True
102
130
  )
131
+ output_keys: list[str] | None = Field(
132
+ default=None,
133
+ description="Model output dictionary keys. Use this if the model returns a dictionary.",
134
+ exclude=True
135
+ )
103
136
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
104
137
 
105
138
  class OpenVINOInferenceMetadata (BaseModel):
@@ -115,13 +148,18 @@ class OpenVINOInferenceMetadata (BaseModel):
115
148
  description="PyTorch module to apply metadata to.",
116
149
  exclude=True
117
150
  )
118
- model_args: list[object] = Field(
151
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
119
152
  description="Positional inputs to the model.",
120
153
  exclude=True
121
154
  )
155
+ output_keys: list[str] | None = Field(
156
+ default=None,
157
+ description="Model output dictionary keys. Use this if the model returns a dictionary.",
158
+ exclude=True
159
+ )
122
160
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
123
161
 
124
- QnnInferenceBackend = Literal["cpu", "gpu"] # `htp` coming soon
162
+ QnnInferenceBackend = Literal["cpu", "gpu", "htp"]
125
163
  QnnInferenceQuantization = Literal["w8a8", "w8a16", "w4a8", "w4a16"]
126
164
 
127
165
  class QnnInferenceMetadata (BaseModel):
@@ -139,10 +177,15 @@ class QnnInferenceMetadata (BaseModel):
139
177
  description="PyTorch module to apply metadata to.",
140
178
  exclude=True
141
179
  )
142
- model_args: list[object] = Field(
180
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
143
181
  description="Positional inputs to the model.",
144
182
  exclude=True
145
183
  )
184
+ output_keys: list[str] | None = Field(
185
+ default=None,
186
+ description="Model output dictionary keys. Use this if the model returns a dictionary.",
187
+ exclude=True
188
+ )
146
189
  backend: QnnInferenceBackend = Field(
147
190
  default="cpu",
148
191
  description="QNN backend to execute the model.",
@@ -155,13 +198,58 @@ class QnnInferenceMetadata (BaseModel):
155
198
  )
156
199
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
157
200
 
158
- class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
201
+ CudaArchitecture = Literal[
202
+ "sm_80", "sm_86", "sm_87", # Ampere (A100)
203
+ "sm_89", # Ada Lovelace (L40)
204
+ "sm_90", # Hopper (H100)
205
+ "sm_100", # Blackwell (B200)
206
+ ]
207
+
208
+ TensorRTPrecision = Literal["fp32", "fp16", "int8", "int4"]
209
+
210
+ class TensorRTInferenceMetadata (BaseModel):
211
+ """
212
+ Metadata required to lower a PyTorch model for inference on Nvidia GPUs with TensorRT.
213
+
214
+ Members:
215
+ model (torch.nn.Module): PyTorch module to apply metadata to.
216
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
217
+ cuda_arch (CudaArchitecture): Target CUDA architecture for the TensorRT engine. Defaults to `sm_80` (Ampere).
218
+ precision (TensorRTPrecision): TensorRT engine inference precision. Defaults to `fp16`.
219
+ """
220
+ kind: Literal["meta.inference.tensorrt"] = "meta.inference.tensorrt"
221
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
222
+ description="PyTorch module to apply metadata to.",
223
+ exclude=True
224
+ )
225
+ model_args: Annotated[list[object], BeforeValidator(_validate_torch_tensor_args)] = Field(
226
+ description="Positional inputs to the model.",
227
+ exclude=True
228
+ )
229
+ output_keys: list[str] | None = Field(
230
+ default=None,
231
+ description="Model output dictionary keys. Use this if the model returns a dictionary.",
232
+ exclude=True
233
+ )
234
+ cuda_arch: CudaArchitecture = Field(
235
+ default="sm_80",
236
+ description="Target CUDA architecture for the TensorRT engine.",
237
+ exclude=True
238
+ )
239
+ precision: TensorRTPrecision = Field(
240
+ default="fp16",
241
+ description="TensorRT engine inference precision.",
242
+ exclude=True
243
+ )
244
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
245
+
246
+ class LlamaCppInferenceMetadata (BaseModel):
159
247
  """
160
- Metadata required to lower a GGUF model for LLM inference.
248
+ Metadata required to lower a Llama.cpp model for LLM inference.
161
249
  """
162
- kind: Literal["meta.inference.gguf"] = "meta.inference.gguf"
163
- model_path: Path = Field(
164
- description="GGUF model path. The model must exist at this path in the compiler sandbox.",
250
+ kind: Literal["meta.inference.llama_cpp"] = "meta.inference.llama_cpp"
251
+ model: Annotated[object, BeforeValidator(_validate_llama_cpp_model)] = Field(
252
+ description="Llama model that metadata applies to.",
165
253
  exclude=True
166
254
  )
167
255
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
fxn/cli/__init__.py CHANGED
@@ -9,12 +9,12 @@ from ..logging import TracebackMarkupConsole
9
9
  from ..version import __version__
10
10
 
11
11
  from .auth import app as auth_app
12
- from .compile import compile_predictor
12
+ from .compile import compile_predictor, triage_predictor
13
13
  from .misc import cli_options
14
14
  from .predictions import create_prediction
15
15
  from .predictors import archive_predictor, delete_predictor, retrieve_predictor
16
16
  from .sources import retrieve_source
17
- from ..beta.cli import llm_app
17
+ from ..beta.cli import llm_app, mcp_app
18
18
 
19
19
  # Define CLI
20
20
  typer.main.console_stderr = TracebackMarkupConsole()
@@ -29,24 +29,70 @@ app = typer.Typer(
29
29
  # Add top level options
30
30
  app.callback()(cli_options)
31
31
 
32
- # Add subcommands
33
- app.add_typer(auth_app, name="auth", help="Login, logout, and check your authentication status.")
34
- app.add_typer(llm_app, name="llm", hidden=True, help="Work with large language models (LLMs).")
35
-
36
- # Add top-level commands
32
+ # Predictions
37
33
  app.command(
38
34
  name="predict",
39
35
  help="Make a prediction.",
40
- context_settings={ "allow_extra_args": True, "ignore_unknown_options": True }
36
+ context_settings={ "allow_extra_args": True, "ignore_unknown_options": True },
37
+ rich_help_panel="Predictions"
41
38
  )(create_prediction)
39
+ app.command(
40
+ name="source",
41
+ help="Retrieve the generated C++ code for a given prediction.",
42
+ rich_help_panel="Predictions"
43
+ )(retrieve_source)
44
+
45
+ # Predictors
42
46
  app.command(
43
47
  name="compile",
44
- help="Create a predictor by compiling a Python function."
48
+ help="Create a predictor by compiling a Python function.",
49
+ rich_help_panel="Predictors"
45
50
  )(compile_predictor)
46
- app.command(name="retrieve", help="Retrieve a predictor.")(retrieve_predictor)
47
- app.command(name="archive", help="Archive a predictor.")(archive_predictor)
48
- app.command(name="delete", help="Delete a predictor.")(delete_predictor)
49
- app.command(name="source", help="Retrieve the generated native code for a given predictor.")(retrieve_source)
51
+ app.command(
52
+ name="retrieve",
53
+ help="Retrieve a predictor.",
54
+ rich_help_panel="Predictors"
55
+ )(retrieve_predictor)
56
+ app.command(
57
+ name="archive",
58
+ help="Archive a predictor." ,
59
+ rich_help_panel="Predictors"
60
+ )(archive_predictor)
61
+ app.command(
62
+ name="delete",
63
+ help="Delete a predictor.",
64
+ rich_help_panel="Predictors"
65
+ )(delete_predictor)
66
+
67
+ # Subcommands
68
+ app.add_typer(
69
+ auth_app,
70
+ name="auth",
71
+ help="Login, logout, and check your authentication status.",
72
+ rich_help_panel="Auth"
73
+ )
74
+ app.add_typer(
75
+ llm_app,
76
+ name="llm",
77
+ hidden=True,
78
+ help="Work with large language models (LLMs).",
79
+ rich_help_panel="Beta"
80
+ )
81
+ app.add_typer(
82
+ mcp_app,
83
+ name="mcp",
84
+ hidden=True,
85
+ help="Provide prediction functions as tools for use by AI assistants.",
86
+ rich_help_panel="Beta"
87
+ )
88
+
89
+ # Insiders
90
+ app.command(
91
+ name="triage",
92
+ help="Triage a compile error.",
93
+ rich_help_panel="Insiders",
94
+ hidden=True
95
+ )(triage_predictor)
50
96
 
51
97
  # Run
52
98
  if __name__ == "__main__":
fxn/cli/auth.py CHANGED
@@ -12,7 +12,7 @@ from ..function import Function
12
12
  app = Typer(no_args_is_help=True)
13
13
 
14
14
  @app.command(name="login", help="Login to Function.")
15
- def login (
15
+ def login(
16
16
  access_key: str=Argument(..., help="Function access key.", envvar="FXN_ACCESS_KEY")
17
17
  ):
18
18
  fxn = Function(access_key=access_key)
@@ -22,18 +22,18 @@ def login (
22
22
  print_json(data=user)
23
23
 
24
24
  @app.command(name="status", help="Get current authentication status.")
25
- def auth_status ():
25
+ def auth_status():
26
26
  fxn = Function(get_access_key())
27
27
  user = fxn.users.retrieve()
28
28
  user = user.model_dump() if user else None
29
29
  print_json(data=user)
30
30
 
31
31
  @app.command(name="logout", help="Logout from Function.")
32
- def logout ():
32
+ def logout():
33
33
  _set_access_key(None)
34
34
  print("Successfully logged out of Function")
35
35
 
36
- def get_access_key () -> str:
36
+ def get_access_key() -> str:
37
37
  """
38
38
  Get the CLI access key.
39
39
 
@@ -46,7 +46,7 @@ def get_access_key () -> str:
46
46
  with open(credentials_path) as f:
47
47
  return f.read()
48
48
 
49
- def _set_access_key (key: str):
49
+ def _set_access_key(key: str):
50
50
  """
51
51
  Set the CLI access key.
52
52
 
fxn/cli/compile.py CHANGED
@@ -9,9 +9,11 @@ from inspect import getmembers, getmodulename, isfunction
9
9
  from pathlib import Path
10
10
  from pydantic import BaseModel
11
11
  from rich import print as print_rich
12
+ from rich.panel import Panel
12
13
  import sys
13
14
  from typer import Argument, Option
14
15
  from typing import Callable, Literal
16
+ from typing_extensions import Annotated
15
17
  from urllib.parse import urlparse, urlunparse
16
18
 
17
19
  from ..client import FunctionAPIError
@@ -21,16 +23,39 @@ from ..sandbox import EntrypointCommand
21
23
  from ..logging import CustomProgress, CustomProgressTask
22
24
  from .auth import get_access_key
23
25
 
24
- class CompileError (Exception):
25
- pass
26
-
27
- def compile_predictor (
26
+ def compile_predictor(
28
27
  path: str=Argument(..., help="Predictor path."),
29
28
  overwrite: bool=Option(False, "--overwrite", help="Whether to delete any existing predictor with the same tag before compiling."),
30
29
  ):
31
30
  run_async(_compile_predictor_async(path, overwrite=overwrite))
32
31
 
33
- async def _compile_predictor_async (
32
+ def triage_predictor(
33
+ reference_code: Annotated[str, Argument(help="Predictor compilation reference code.")]
34
+ ):
35
+ fxn = Function(get_access_key())
36
+ error = fxn.client.request(
37
+ method="GET",
38
+ path=f"/predictors/triage?referenceCode={reference_code}",
39
+ response_type=_TriagedCompileError
40
+ )
41
+ user_panel = Panel(
42
+ error.user,
43
+ title="User Error",
44
+ title_align="left",
45
+ highlight=True,
46
+ border_style="bright_red"
47
+ )
48
+ internal_panel = Panel(
49
+ error.internal,
50
+ title="Internal Error",
51
+ title_align="left",
52
+ highlight=True,
53
+ border_style="gold1"
54
+ )
55
+ print_rich(user_panel)
56
+ print_rich(internal_panel)
57
+
58
+ async def _compile_predictor_async(
34
59
  path: str,
35
60
  *,
36
61
  overwrite: bool
@@ -78,11 +103,11 @@ async def _compile_predictor_async (
78
103
  task_queue.push_log(event)
79
104
  elif isinstance(event, _ErrorEvent):
80
105
  task_queue.push_error(event)
81
- raise CompileError(event.data.error)
106
+ raise _CompileError(event.data.error)
82
107
  predictor_url = _compute_predictor_url(fxn.client.api_url, spec.tag)
83
108
  print_rich(f"\n[bold spring_green3]🎉 Predictor is now being compiled.[/bold spring_green3] Check it out at [link={predictor_url}]{predictor_url}[/link]")
84
109
 
85
- def _load_predictor_func (path: str) -> Callable[...,object]:
110
+ def _load_predictor_func(path: str) -> Callable[...,object]:
86
111
  if "" not in sys.path:
87
112
  sys.path.insert(0, "")
88
113
  path: Path = Path(path).resolve()
@@ -97,7 +122,7 @@ def _load_predictor_func (path: str) -> Callable[...,object]:
97
122
  main_func = next(func for _, func in getmembers(module, isfunction) if hasattr(func, "__predictor_spec"))
98
123
  return main_func
99
124
 
100
- def _compute_predictor_url (api_url: str, tag: str) -> str:
125
+ def _compute_predictor_url(api_url: str, tag: str) -> str:
101
126
  parsed_url = urlparse(api_url)
102
127
  hostname_parts = parsed_url.hostname.split(".")
103
128
  if hostname_parts[0] == "api":
@@ -107,32 +132,39 @@ def _compute_predictor_url (api_url: str, tag: str) -> str:
107
132
  predictor_url = urlunparse(parsed_url._replace(netloc=netloc, path=f"{tag}"))
108
133
  return predictor_url
109
134
 
110
- class _Predictor (BaseModel):
135
+ class _Predictor(BaseModel):
111
136
  tag: str
112
137
 
113
- class _LogData (BaseModel):
138
+ class _LogData(BaseModel):
114
139
  message: str
115
140
  level: int = 0
116
141
  status: Literal["success", "error"] = "success"
117
142
  update: bool = False
118
143
 
119
- class _LogEvent (BaseModel):
144
+ class _LogEvent(BaseModel):
120
145
  event: Literal["log"]
121
146
  data: _LogData
122
147
 
123
- class _ErrorData (BaseModel):
148
+ class _ErrorData(BaseModel):
124
149
  error: str
125
150
 
126
- class _ErrorEvent (BaseModel):
151
+ class _ErrorEvent(BaseModel):
127
152
  event: Literal["error"]
128
153
  data: _ErrorData
129
154
 
155
+ class _CompileError(Exception):
156
+ pass
157
+
158
+ class _TriagedCompileError(BaseModel):
159
+ user: str
160
+ internal: str
161
+
130
162
  class ProgressLogQueue:
131
163
 
132
- def __init__ (self):
164
+ def __init__(self):
133
165
  self.queue: list[tuple[int, CustomProgressTask]] = []
134
166
 
135
- def push_log (self, event: _LogEvent):
167
+ def push_log(self, event: _LogEvent):
136
168
  # Check for update
137
169
  if event.data.update and self.queue:
138
170
  current_level, current_task = self.queue[-1]
@@ -149,15 +181,15 @@ class ProgressLogQueue:
149
181
  task.__enter__()
150
182
  self.queue.append((event.data.level, task))
151
183
 
152
- def push_error (self, error: _ErrorEvent):
184
+ def push_error(self, error: _ErrorEvent):
153
185
  while self.queue:
154
186
  _, current_task = self.queue.pop()
155
187
  current_task.__exit__(RuntimeError, None, None)
156
188
 
157
- def __enter__ (self):
189
+ def __enter__(self):
158
190
  return self
159
191
 
160
- def __exit__ (self, exc_type, exc_value, traceback):
192
+ def __exit__(self, exc_type, exc_value, traceback):
161
193
  while self.queue:
162
194
  _, current_task = self.queue.pop()
163
195
  current_task.__exit__(None, None, None)
fxn/cli/misc.py CHANGED
@@ -9,22 +9,22 @@ from webbrowser import open as open_browser
9
9
 
10
10
  from ..version import __version__
11
11
 
12
- def _explore (value: bool):
12
+ def _explore(value: bool):
13
13
  if value:
14
14
  open_browser("https://fxn.ai/explore")
15
15
  raise Exit()
16
16
 
17
- def _learn (value: bool):
17
+ def _learn(value: bool):
18
18
  if value:
19
19
  open_browser("https://docs.fxn.ai")
20
20
  raise Exit()
21
21
 
22
- def _version (value: bool):
22
+ def _version(value: bool):
23
23
  if value:
24
24
  print(__version__)
25
25
  raise Exit()
26
26
 
27
- def cli_options (
27
+ def cli_options(
28
28
  explore: bool = Option(None, "--explore", callback=_explore, help="Explore predictors on Function."),
29
29
  learn: bool = Option(None, "--learn", callback=_learn, help="Learn about Function."),
30
30
  version: bool = Option(None, "--version", callback=_version, help="Get the Function CLI version.")
fxn/cli/predictions.py CHANGED
@@ -24,7 +24,7 @@ def create_prediction (
24
24
  ):
25
25
  run_async(_predict_async(tag, quiet=quiet, context=context))
26
26
 
27
- async def _predict_async (tag: str, quiet: bool, context: Context):
27
+ async def _predict_async(tag: str, quiet: bool, context: Context):
28
28
  # Preload
29
29
  with CustomProgress(transient=True, disable=quiet):
30
30
  fxn = Function(get_access_key())
@@ -42,16 +42,7 @@ async def _predict_async (tag: str, quiet: bool, context: Context):
42
42
  prediction = fxn.predictions.create(tag, inputs=inputs)
43
43
  _log_prediction(prediction)
44
44
 
45
- def _parse_value (value: str):
46
- """
47
- Parse a value from a CLI argument.
48
-
49
- Parameters:
50
- value (str): CLI input argument.
51
-
52
- Returns:
53
- bool | int | float | str | Path: Parsed value.
54
- """
45
+ def _parse_value (value: str) -> float | int | bool | str | Image.Image | BytesIO:
55
46
  # Boolean
56
47
  if value == "true":
57
48
  return True
@@ -81,14 +72,14 @@ def _parse_value (value: str):
81
72
  # String
82
73
  return value
83
74
 
84
- def _log_prediction (prediction: Prediction):
75
+ def _log_prediction(prediction: Prediction):
85
76
  images = [value for value in prediction.results or [] if isinstance(value, Image.Image)]
86
77
  prediction.results = [_serialize_value(value) for value in prediction.results] if prediction.results is not None else None
87
78
  print_json(data=prediction.model_dump())
88
79
  for image in images:
89
80
  image.show()
90
81
 
91
- def _serialize_value (value):
82
+ def _serialize_value(value) -> str:
92
83
  if isinstance(value, ndarray):
93
84
  return array_repr(value)
94
85
  if isinstance(value, Image.Image):
fxn/cli/predictors.py CHANGED
@@ -10,7 +10,7 @@ from ..function import Function
10
10
  from ..logging import CustomProgress, CustomProgressTask
11
11
  from .auth import get_access_key
12
12
 
13
- def retrieve_predictor (
13
+ def retrieve_predictor(
14
14
  tag: str=Argument(..., help="Predictor tag.")
15
15
  ):
16
16
  with CustomProgress(transient=True):
@@ -20,7 +20,7 @@ def retrieve_predictor (
20
20
  predictor = predictor.model_dump() if predictor else None
21
21
  print_json(data=predictor)
22
22
 
23
- def archive_predictor (
23
+ def archive_predictor(
24
24
  tag: str=Argument(..., help="Predictor tag.")
25
25
  ):
26
26
  with CustomProgress():
@@ -34,7 +34,7 @@ def archive_predictor (
34
34
  path=f"/predictors/{tag}/archive"
35
35
  )
36
36
 
37
- def delete_predictor (
37
+ def delete_predictor(
38
38
  tag: str=Argument(..., help="Predictor tag.")
39
39
  ):
40
40
  with CustomProgress():
fxn/cli/sources.py CHANGED
@@ -14,9 +14,9 @@ from ..function import Function
14
14
  from ..logging import CustomProgress, CustomProgressTask
15
15
  from .auth import get_access_key
16
16
 
17
- def retrieve_source (
18
- predictor: Annotated[str, Option(help="Predictor tag.")] = None,
17
+ def retrieve_source(
19
18
  prediction: Annotated[str, Option(help="Prediction identifier. If specified, this MUST be from a prediction returned by the Function API.")] = None,
19
+ predictor: Annotated[str, Option(help="Predictor tag. If specified, a prediction will be made with this predictor before retrieving the source.")] = None,
20
20
  output: Annotated[Path, Option(help="Path to output source file.")] = Path("predictor.cpp")
21
21
  ):
22
22
  if not ((predictor is not None) ^ (prediction is not None)):
@@ -37,7 +37,7 @@ def retrieve_source (
37
37
  source.code = str(output.resolve())
38
38
  print_json(data=source.model_dump(mode="json", by_alias=True))
39
39
 
40
- class _PredictionSource (BaseModel):
40
+ class _PredictionSource(BaseModel):
41
41
  tag: str
42
42
  target: str
43
43
  code: str
fxn/compile.py CHANGED
@@ -14,10 +14,10 @@ from typing import Any, Callable, Literal, ParamSpec, TypeVar, cast
14
14
  from .beta import (
15
15
  CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
16
16
  OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
17
- QnnInferenceMetadata
17
+ QnnInferenceMetadata, TensorRTInferenceMetadata
18
18
  )
19
19
  from .sandbox import Sandbox
20
- from .types import AccessMode
20
+ from .types import PredictorAccess
21
21
 
22
22
  CompileTarget = Literal[
23
23
  "android",
@@ -36,7 +36,8 @@ CompileMetadata = (
36
36
  OnnxInferenceMetadata |
37
37
  OnnxRuntimeInferenceSessionMetadata |
38
38
  OpenVINOInferenceMetadata |
39
- QnnInferenceMetadata
39
+ QnnInferenceMetadata |
40
+ TensorRTInferenceMetadata
40
41
  )
41
42
 
42
43
  P = ParamSpec("P")
@@ -51,7 +52,7 @@ class PredictorSpec (BaseModel):
51
52
  sandbox: Sandbox = Field(description="Sandbox to compile the function.")
52
53
  targets: list[str] | None = Field(description="Targets to compile this predictor for. Pass `None` to compile for our default targets.")
53
54
  metadata: list[object] = Field(default=[], description="Metadata to use while compiling the function.")
54
- access: AccessMode = Field(description="Predictor access.")
55
+ access: PredictorAccess = Field(description="Predictor access.")
55
56
  card: str | None = Field(default=None, description="Predictor card (markdown).")
56
57
  media: str | None = Field(default=None, description="Predictor media URL.")
57
58
  license: str | None = Field(default=None, description="Predictor license URL. This is required for public predictors.")
@@ -65,7 +66,7 @@ def compile (
65
66
  trace_modules: list[ModuleType]=[],
66
67
  targets: list[CompileTarget]=None,
67
68
  metadata: list[CompileMetadata]=[],
68
- access: AccessMode=AccessMode.Private,
69
+ access: PredictorAccess="private",
69
70
  card: str | Path=None,
70
71
  media: Path=None,
71
72
  license: str=None,
@@ -81,7 +82,7 @@ def compile (
81
82
  trace_modules (list): Modules to trace and compile.
82
83
  targets (list): Targets to compile this predictor for. Pass `None` to compile for our default targets.
83
84
  metadata (list): Metadata to use while compiling the function.
84
- access (AccessMode): Predictor access.
85
+ access (PredictorAccess): Predictor access.
85
86
  card (str | Path): Predictor card markdown string or path to card.
86
87
  media (Path): Predictor thumbnail image (jpeg or png) path.
87
88
  license (str): Predictor license URL. This is required for public predictors.
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
fxn/sandbox.py CHANGED
@@ -57,6 +57,8 @@ class EntrypointCommand (UploadableCommand):
57
57
  class PipInstallCommand (BaseModel):
58
58
  kind: Literal["pip_install"] = "pip_install"
59
59
  packages: list[str]
60
+ index_url: str | None
61
+ flags: str
60
62
 
61
63
  class AptInstallCommand (BaseModel):
62
64
  kind: Literal["apt_install"] = "apt_install"
@@ -133,14 +135,25 @@ class Sandbox (BaseModel):
133
135
  )
134
136
  return Sandbox(commands=self.commands + [command])
135
137
 
136
- def pip_install (self, *packages: str) -> Sandbox:
138
+ def pip_install (
139
+ self,
140
+ *packages: str,
141
+ index_url: str=None,
142
+ flags: str=""
143
+ ) -> Sandbox:
137
144
  """
138
145
  Install Python packages in the sandbox.
139
146
 
140
147
  Parameters:
141
148
  packages (list): Packages to install.
149
+ index_url (str | None): Index URL to search for package.
150
+ flags (str): Additional flags to pass to `pip`.
142
151
  """
143
- command = PipInstallCommand(packages=packages)
152
+ command = PipInstallCommand(
153
+ packages=packages,
154
+ index_url=index_url,
155
+ flags=flags
156
+ )
144
157
  return Sandbox(commands=self.commands + [command])
145
158
 
146
159
  def apt_install (self, *packages: str) -> Sandbox:
@@ -117,6 +117,21 @@ class PredictionService:
117
117
  with prediction:
118
118
  yield self.__to_prediction(tag, prediction)
119
119
 
120
+ def delete (self, tag: str) -> bool:
121
+ """
122
+ Delete a predictor that is loaded in memory.
123
+
124
+ Parameters:
125
+ tag (str): Predictor tag.
126
+
127
+ Returns:
128
+ bool: Whether the predictor was successfully deleted from memory.
129
+ """
130
+ if tag not in self.__cache:
131
+ return False
132
+ with self.__cache.pop(tag):
133
+ return True
134
+
120
135
  def __create_raw_prediction (
121
136
  self,
122
137
  tag: str,
@@ -245,7 +260,7 @@ class PredictionService:
245
260
  tmp_file.write(chunk)
246
261
  completed += len(chunk)
247
262
  task.update(total=size, completed=completed)
248
- Path(tmp_file.name).replace(path)
263
+ Path(tmp_file.name).replace(path)
249
264
  return path
250
265
 
251
266
  def __get_resource_path (self, resource: PredictionResource) -> Path:
fxn/types/__init__.py CHANGED
@@ -5,5 +5,5 @@
5
5
 
6
6
  from .dtype import Dtype
7
7
  from .prediction import Acceleration, Prediction, PredictionResource
8
- from .predictor import AccessMode, EnumerationMember, Parameter, Predictor, PredictorStatus, Signature
8
+ from .predictor import EnumerationMember, Parameter, Predictor, PredictorAccess, PredictorStatus, Signature
9
9
  from .user import User
fxn/types/predictor.py CHANGED
@@ -3,28 +3,15 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- from enum import Enum
7
6
  from pydantic import AliasChoices, BaseModel, ConfigDict, Field
8
- from typing import Any
7
+ from typing import Any, Literal
9
8
 
10
9
  from .dtype import Dtype
11
10
  from .user import User
12
11
 
13
- class AccessMode (str, Enum):
14
- """
15
- Predictor access mode.
16
- """
17
- Public = "PUBLIC"
18
- Private = "PRIVATE"
12
+ PredictorAccess = Literal["public", "private", "unlisted"]
19
13
 
20
- class PredictorStatus (str, Enum):
21
- """
22
- Predictor status.
23
- """
24
- Compiling = "COMPILING"
25
- Active = "ACTIVE"
26
- Invalid = "INVALID"
27
- Archived = "ARCHIVED"
14
+ PredictorStatus = Literal["compiling", "active", "archived"]
28
15
 
29
16
  class EnumerationMember (BaseModel):
30
17
  """
@@ -79,7 +66,7 @@ class Predictor (BaseModel):
79
66
  owner (User): Predictor owner.
80
67
  name (str): Predictor name.
81
68
  status (PredictorStatus): Predictor status.
82
- access (AccessMode): Predictor access.
69
+ access (PredictorAccess): Predictor access.
83
70
  signature (Signature): Predictor signature.
84
71
  created (str): Date created.
85
72
  description (str): Predictor description.
@@ -91,7 +78,7 @@ class Predictor (BaseModel):
91
78
  owner: User = Field(description="Predictor owner.")
92
79
  name: str = Field(description="Predictor name.")
93
80
  status: PredictorStatus = Field(description="Predictor status.")
94
- access: AccessMode = Field(description="Predictor access.")
81
+ access: PredictorAccess = Field(description="Predictor access.")
95
82
  signature: Signature = Field(description="Predictor signature.")
96
83
  created: str = Field(description="Date created.")
97
84
  description: str | None = Field(default=None, description="Predictor description.")
fxn/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- __version__ = "0.0.53"
6
+ __version__ = "0.0.55"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fxn
3
- Version: 0.0.53
3
+ Version: 0.0.55
4
4
  Summary: Run prediction functions locally in Python. Register at https://fxn.ai.
5
5
  Author-email: "NatML Inc." <hi@fxn.ai>
6
6
  License: Apache License
@@ -0,0 +1,55 @@
1
+ fxn/__init__.py,sha256=gnJK7iOmMVWFhluW9bOvTNxJbpT-GwzDJTMmjA_XxOE,284
2
+ fxn/client.py,sha256=Deje8eiS1VOHX85tQnV34viv2CPVx2ljwHSbyVB5Z1o,3790
3
+ fxn/compile.py,sha256=R2TchZ33Eq5tqiHmXlQybCEEbnM9DghNHVQfLasfDv4,4456
4
+ fxn/function.py,sha256=XeEuALkbVhkvwEBUfP0A2fu3tdimwHemoR17oomhzc8,1407
5
+ fxn/logging.py,sha256=MsTSf0GZxrHNDwVAXDOh8_zRUg9hkeZ8DfhFUJs7D8A,7250
6
+ fxn/sandbox.py,sha256=31jDvp55T-Bp9mm1UnoWC1Da9YtarGobR6U8mr13Amw,7637
7
+ fxn/version.py,sha256=4RfcRHIykQUZsWX7fKiagbG2lLLV7kfJQBBZX6jfKSw,95
8
+ fxn/beta/__init__.py,sha256=5X-P6A9JPNHS2tIuo2Eiu__X2Rv-FhjvECtyCq8DStc,509
9
+ fxn/beta/client.py,sha256=s0BpkQM4V_816pyzB8sbo-QQg0S7tY0APTpYACWsxQM,2590
10
+ fxn/beta/metadata.py,sha256=vVCjCEFNGvXcct-3kwT7RmyOGGscjh_oEVWkxIMGWrw,10894
11
+ fxn/beta/cli/__init__.py,sha256=6eyBhW8l3tLWiC36vx5BCtG3taNVvUkPuGLuEyhwnWQ,136
12
+ fxn/beta/cli/llm.py,sha256=loL87unr1o_TfsaBTQOb3d7CEpm1Qcf5WJa-qv51iXE,517
13
+ fxn/beta/cli/mcp.py,sha256=U6W03-j_6tKbibv4fcomEeBRVcl_srNy7XjFE9KvvAk,373
14
+ fxn/beta/llm/__init__.py,sha256=bJB5i1eY8zXnixVc34iubxnEXTIoz1UtnoGI5hXDg18,73
15
+ fxn/beta/llm/server.py,sha256=bJB5i1eY8zXnixVc34iubxnEXTIoz1UtnoGI5hXDg18,73
16
+ fxn/beta/services/__init__.py,sha256=6XNWEcXXbFX2O_P-rpR0xMuAp9gN7Q0xuxzp7BGt8Xc,153
17
+ fxn/beta/services/prediction.py,sha256=VbZIY292rIP6CHzR4GlL4DvFkAymM4qEQW2_ii2R1-k,357
18
+ fxn/beta/services/remote.py,sha256=jQcYxOUFp3xnnLljPK5TYNEWpmDCwv0vjM0oQkK8h54,7544
19
+ fxn/c/__init__.py,sha256=NMIduqO_MYtI9jVCu6ZxvbBtYQXoQyNEWblNy3m2UPY,313
20
+ fxn/c/configuration.py,sha256=56_-NNT4yoHDNfvB6jJNYF2eKJYMRLVrv3mIg7g6qaE,5597
21
+ fxn/c/fxnc.py,sha256=YrvwOlzPmTlSDuz2zmKZfws2WK5BY4YZ62edoplcMJU,1381
22
+ fxn/c/map.py,sha256=47fBJ0Q6uB_xeW3sn9aCLYJ539edg8ff9DU-EIfWRGA,2352
23
+ fxn/c/prediction.py,sha256=-d-5yreFAaRS-nDHzhfabRNtgYcmJGiY_N2dt09gk84,2689
24
+ fxn/c/predictor.py,sha256=48poLj1AthzCgU9n6Wv9gL8o4gFucIlOnBO2wdor6r0,1925
25
+ fxn/c/stream.py,sha256=Y1Xv1Bt3_qlnWg9rCn7NWESpouF1eKMzDiQjhZWbXTg,1105
26
+ fxn/c/value.py,sha256=h5n91nm8C3YvEEFORfJBUdncZ29DFIdUKGWQ_KpLsWc,7420
27
+ fxn/cli/__init__.py,sha256=IBp79GveNu_eEO5tdiapBkjAlETPT774CM7ZeLR5LiQ,2344
28
+ fxn/cli/auth.py,sha256=B4ORnTfMzeCdfbZRY-6fqYbgr5glUczlmcq3qcT2Euw,1683
29
+ fxn/cli/compile.py,sha256=CJXVFI2wg6f3xM-NmAYqCvtWfT_2OddWAS2rMRZLphM,7013
30
+ fxn/cli/misc.py,sha256=Kiw6C5j7oAopkM2nCRYln66CXR40-FGHozFJo6pRq14,839
31
+ fxn/cli/predictions.py,sha256=O1zUX_AnFt3Hwh5rAN_RtOs94_FCr8r2tPZi7IszPng,3056
32
+ fxn/cli/predictors.py,sha256=boDOBD4oEDmoN30KHxE7VNv7yCFTWjNtLR0pveWFWYE,1542
33
+ fxn/cli/sources.py,sha256=ejzLQ-gh4rn1LLDwFhh_FcJ-2z69k9npKnO1B-PGRI0,1869
34
+ fxn/lib/__init__.py,sha256=-w1ikmmki5NMpzJjERW-O4SwOfBNkimej_0jL8ujYRk,71
35
+ fxn/lib/linux/arm64/libFunction.so,sha256=1OnIZwh3g_9gIajWWwI6lDnmPKKxGDyLCGtqL-lnMJk,211736
36
+ fxn/lib/linux/x86_64/libFunction.so,sha256=J4VT3WhNEFEVlqcC-s-96sPPQp9fFvWQ7PykKxwuW6o,236336
37
+ fxn/lib/macos/arm64/Function.dylib,sha256=pO6mtYlJe8MbuXSyyUH20H3DPIM6F7y9U9E82mC3XmE,247376
38
+ fxn/lib/macos/x86_64/Function.dylib,sha256=5UFrdoIo3KA_rFYv9RTjCsw50LnNbH_r8jq73f_pxcE,255616
39
+ fxn/lib/windows/arm64/Function.dll,sha256=K3M_vcDWviszJVx5Io-aEa-_1bhBXZN8FLXn3a1f4_A,403456
40
+ fxn/lib/windows/x86_64/Function.dll,sha256=pMgipA9iXnGP6ENXwuNWDXlW-HclszuyDk-HSyVQ4Rs,442368
41
+ fxn/services/__init__.py,sha256=Bif8IttwJ089mSRsd3MFdob7z2eF-MKigKu4ZQFZBCQ,190
42
+ fxn/services/prediction.py,sha256=IZFOKFFMUTe4KlyzdrWLT6T0XIX0FlBHvGQnb6vaIBo,10420
43
+ fxn/services/predictor.py,sha256=Wl_7YKiD5mTpC5x2Zaq4BpatRjwRUX8Th9GIrwd38MA,791
44
+ fxn/services/user.py,sha256=ADl5MFLsk4K0altgKHnI-i64E3g1wU3e56Noq_ciRuk,685
45
+ fxn/types/__init__.py,sha256=jiGcZo1RyRg1wJiEulHjVU36EI7TS3sgE4TCeAeMNfk,297
46
+ fxn/types/dtype.py,sha256=71Tuu4IydmELcBcSBbmWswhCE-7WqBSQ4VkETsFRzjA,617
47
+ fxn/types/prediction.py,sha256=BdLTxnKiSFbz5warX8g_Z4DedNxXK3gaNjSKR2FP8tA,2051
48
+ fxn/types/predictor.py,sha256=lxFPpo3hP38LfJYYQ703RehjQvnPsVZyptBX2oaxQ5A,3825
49
+ fxn/types/user.py,sha256=Z44TwEocyxSrfKyzcNfmAXUrpX_Ry8fJ7MffSxRn4oU,1071
50
+ fxn-0.0.55.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
51
+ fxn-0.0.55.dist-info/METADATA,sha256=Hrrtt5i0YMMrNriddOV-rZ3RV4E2yTvpTP1PxzcAbOY,16136
52
+ fxn-0.0.55.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
53
+ fxn-0.0.55.dist-info/entry_points.txt,sha256=O_AwD5dYaeB-YT1F9hPAPuDYCkw_W0tdNGYbc5RVR2k,45
54
+ fxn-0.0.55.dist-info/top_level.txt,sha256=1ULIEGrnMlhId8nYAkjmRn9g3KEFuHKboq193SEKQkA,4
55
+ fxn-0.0.55.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.8.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,54 +0,0 @@
1
- fxn/__init__.py,sha256=gnJK7iOmMVWFhluW9bOvTNxJbpT-GwzDJTMmjA_XxOE,284
2
- fxn/client.py,sha256=Deje8eiS1VOHX85tQnV34viv2CPVx2ljwHSbyVB5Z1o,3790
3
- fxn/compile.py,sha256=tKLRdbFPf0c3Q7UMtKa1Wbpf4Vx1XxbMzh3ltfVb_eo,4371
4
- fxn/function.py,sha256=XeEuALkbVhkvwEBUfP0A2fu3tdimwHemoR17oomhzc8,1407
5
- fxn/logging.py,sha256=MsTSf0GZxrHNDwVAXDOh8_zRUg9hkeZ8DfhFUJs7D8A,7250
6
- fxn/sandbox.py,sha256=50yY2GDdkAFl-6pXTleaD1LXYM6-pJ3C1epKsr0xdrM,7313
7
- fxn/version.py,sha256=n-WFHj2EhODMEevfcqzzFWZYnuAaaKG2T-dxo23t23U,95
8
- fxn/beta/__init__.py,sha256=h5PwE5PtYu9BgdysuAG51KMJ2N_clwixufXgTzC0dTg,464
9
- fxn/beta/client.py,sha256=s0BpkQM4V_816pyzB8sbo-QQg0S7tY0APTpYACWsxQM,2590
10
- fxn/beta/metadata.py,sha256=Z3bJwVd-8GeaAly1LgZE3Yej7y7sQUu_IY2qY2ISYFk,6935
11
- fxn/beta/cli/__init__.py,sha256=_X_lreE4q_CY8AzRmcFzRI1OIV8x1xrEyOqAV7fsQlk,104
12
- fxn/beta/cli/llm.py,sha256=loL87unr1o_TfsaBTQOb3d7CEpm1Qcf5WJa-qv51iXE,517
13
- fxn/beta/llm/__init__.py,sha256=bJB5i1eY8zXnixVc34iubxnEXTIoz1UtnoGI5hXDg18,73
14
- fxn/beta/llm/server.py,sha256=bJB5i1eY8zXnixVc34iubxnEXTIoz1UtnoGI5hXDg18,73
15
- fxn/beta/services/__init__.py,sha256=6XNWEcXXbFX2O_P-rpR0xMuAp9gN7Q0xuxzp7BGt8Xc,153
16
- fxn/beta/services/prediction.py,sha256=VbZIY292rIP6CHzR4GlL4DvFkAymM4qEQW2_ii2R1-k,357
17
- fxn/beta/services/remote.py,sha256=jQcYxOUFp3xnnLljPK5TYNEWpmDCwv0vjM0oQkK8h54,7544
18
- fxn/c/__init__.py,sha256=NMIduqO_MYtI9jVCu6ZxvbBtYQXoQyNEWblNy3m2UPY,313
19
- fxn/c/configuration.py,sha256=56_-NNT4yoHDNfvB6jJNYF2eKJYMRLVrv3mIg7g6qaE,5597
20
- fxn/c/fxnc.py,sha256=YrvwOlzPmTlSDuz2zmKZfws2WK5BY4YZ62edoplcMJU,1381
21
- fxn/c/map.py,sha256=47fBJ0Q6uB_xeW3sn9aCLYJ539edg8ff9DU-EIfWRGA,2352
22
- fxn/c/prediction.py,sha256=-d-5yreFAaRS-nDHzhfabRNtgYcmJGiY_N2dt09gk84,2689
23
- fxn/c/predictor.py,sha256=48poLj1AthzCgU9n6Wv9gL8o4gFucIlOnBO2wdor6r0,1925
24
- fxn/c/stream.py,sha256=Y1Xv1Bt3_qlnWg9rCn7NWESpouF1eKMzDiQjhZWbXTg,1105
25
- fxn/c/value.py,sha256=h5n91nm8C3YvEEFORfJBUdncZ29DFIdUKGWQ_KpLsWc,7420
26
- fxn/cli/__init__.py,sha256=gMn7pj8287M8KhB0cStQOcgo5fGGqKsR4i3nKAJQGow,1671
27
- fxn/cli/auth.py,sha256=6iGbNbjxfCr8OZT3_neLThXdWeKRBZATwru8vU0XmRw,1688
28
- fxn/cli/compile.py,sha256=BSUBUiXhI7vDfztHCGgmA4Dsvco7J9i1aJXK9EQBKHc,6168
29
- fxn/cli/misc.py,sha256=LcJbCj_GAgtGraTRva2zHHOPpNwI6SOFntRksxwlqvM,843
30
- fxn/cli/predictions.py,sha256=ma7wbsKD5CFCRTU_TtJ8N0nN1fgFX2BZPGG8qm8HlNI,3182
31
- fxn/cli/predictors.py,sha256=bVQAuBue_Jxb79X85RTCzOerWRRT2Ny1oF5DNYAsx4M,1545
32
- fxn/cli/sources.py,sha256=HQ_PBLXY2CZ5tGuuqQeJQTpM9S9rKtBzyNVTK-ywG84,1781
33
- fxn/lib/__init__.py,sha256=-w1ikmmki5NMpzJjERW-O4SwOfBNkimej_0jL8ujYRk,71
34
- fxn/lib/linux/arm64/libFunction.so,sha256=T9HE_dkC4yKXsyAc_sa7iWTXRDcwGOjZ1MAi1j9-ZCw,211736
35
- fxn/lib/linux/x86_64/libFunction.so,sha256=66r5ZzUMlSjIfqfIwbhwCJk1AQF2iBL5OZzeT2ibZDQ,236336
36
- fxn/lib/macos/arm64/Function.dylib,sha256=NO5ZzDvMFsDkgYNL8louztyaRxuM9Hl_BosQYFi6rKg,263856
37
- fxn/lib/macos/x86_64/Function.dylib,sha256=SO55PHLhhl8sh_Gr3IKgTHPV1-pnhLb30qbqHKF0_1M,255600
38
- fxn/lib/windows/arm64/Function.dll,sha256=ol6LyOVtF7tq-hnPLS9RRXAkobYMSC9T_JF1kx3l2IY,411136
39
- fxn/lib/windows/x86_64/Function.dll,sha256=P43RXFNAdjTrDEnynGRy8CjCgY1KWzB_1Cz5W6394bg,447488
40
- fxn/services/__init__.py,sha256=Bif8IttwJ089mSRsd3MFdob7z2eF-MKigKu4ZQFZBCQ,190
41
- fxn/services/prediction.py,sha256=2BNwzl4K7-7AXyZFE5TanIYWXJ4M4WVPWCCBbqqBC3M,10029
42
- fxn/services/predictor.py,sha256=Wl_7YKiD5mTpC5x2Zaq4BpatRjwRUX8Th9GIrwd38MA,791
43
- fxn/services/user.py,sha256=ADl5MFLsk4K0altgKHnI-i64E3g1wU3e56Noq_ciRuk,685
44
- fxn/types/__init__.py,sha256=MEg71rzbGgoWfgB4Yi5QvxbnovHTZRIzCUZLtWtWP1E,292
45
- fxn/types/dtype.py,sha256=71Tuu4IydmELcBcSBbmWswhCE-7WqBSQ4VkETsFRzjA,617
46
- fxn/types/prediction.py,sha256=BdLTxnKiSFbz5warX8g_Z4DedNxXK3gaNjSKR2FP8tA,2051
47
- fxn/types/predictor.py,sha256=KRGZEuDt7WPMCyRcZvQq4y2FMocfVrLEUNJCJgfDY9Y,4000
48
- fxn/types/user.py,sha256=Z44TwEocyxSrfKyzcNfmAXUrpX_Ry8fJ7MffSxRn4oU,1071
49
- fxn-0.0.53.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
50
- fxn-0.0.53.dist-info/METADATA,sha256=p4nALBoGifSFNlFX4iFqxiGEGu5nR--cbZjkuhMpSq8,16136
51
- fxn-0.0.53.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
52
- fxn-0.0.53.dist-info/entry_points.txt,sha256=O_AwD5dYaeB-YT1F9hPAPuDYCkw_W0tdNGYbc5RVR2k,45
53
- fxn-0.0.53.dist-info/top_level.txt,sha256=1ULIEGrnMlhId8nYAkjmRn9g3KEFuHKboq193SEKQkA,4
54
- fxn-0.0.53.dist-info/RECORD,,