fxn 0.0.51__py3-none-any.whl → 0.0.53__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fxn/beta/__init__.py CHANGED
@@ -5,6 +5,9 @@
5
5
 
6
6
  from .metadata import (
7
7
  CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
8
+ OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
9
+ QnnInferenceMetadata, QnnInferenceBackend, QnnInferenceQuantization,
10
+ # Deprecated
8
11
  ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
9
12
  )
10
- from .remote import RemoteAcceleration
13
+ from .services import RemoteAcceleration
@@ -0,0 +1,6 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from .llm import app as llm_app
fxn/beta/cli/llm.py ADDED
@@ -0,0 +1,22 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from pathlib import Path
7
+ from typer import Argument, Option, Typer
8
+ from typing_extensions import Annotated
9
+
10
+ app = Typer(no_args_is_help=True)
11
+
12
+ @app.command(name="chat", help="Start a chat session.")
13
+ def chat (
14
+ model: Annotated[str, Argument(help="Model to chat with.")]
15
+ ):
16
+ pass
17
+
18
+ @app.command(name="serve", help="Start an LLM server.")
19
+ def serve (
20
+ port: Annotated[int, Option(help="Port to start the server on.")] = 11435
21
+ ):
22
+ pass
fxn/beta/client.py CHANGED
@@ -10,8 +10,7 @@ from typing import get_origin, Callable, Generator, Iterator, TypeVar
10
10
  from ..client import FunctionClient
11
11
  from ..services import PredictionService as EdgePredictionService
12
12
  from ..types import Acceleration
13
- from .prediction import PredictionService
14
- from .remote import RemoteAcceleration
13
+ from .services import PredictionService, RemoteAcceleration
15
14
 
16
15
  F = TypeVar("F", bound=Callable[..., object])
17
16
 
@@ -0,0 +1,5 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
fxn/beta/llm/server.py ADDED
@@ -0,0 +1,5 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
fxn/beta/metadata.py CHANGED
@@ -3,6 +3,7 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
+ from os import PathLike
6
7
  from pathlib import Path
7
8
  from pydantic import BaseModel, BeforeValidator, ConfigDict, Field
8
9
  from typing import Annotated, Literal
@@ -28,37 +29,130 @@ def _validate_ort_inference_session (session: "onnxruntime.InferenceSession") ->
28
29
  class CoreMLInferenceMetadata (BaseModel):
29
30
  """
30
31
  Metadata required to lower a PyTorch model for inference on iOS, macOS, and visionOS with CoreML.
32
+
33
+ Members:
34
+ model (torch.nn.Module): PyTorch module to apply metadata to.
35
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
31
36
  """
32
37
  kind: Literal["meta.inference.coreml"] = "meta.inference.coreml"
33
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
34
- model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
38
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
39
+ description="PyTorch module to apply metadata to.",
40
+ exclude=True
41
+ )
42
+ model_args: list[object] = Field(
43
+ description="Positional inputs to the model.",
44
+ exclude=True
45
+ )
35
46
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
36
47
 
37
- class ONNXInferenceMetadata (BaseModel):
48
+ class OnnxInferenceMetadata (BaseModel):
38
49
  """
39
50
  Metadata required to lower a PyTorch model for inference.
51
+
52
+ Members:
53
+ model (torch.nn.Module): PyTorch module to apply metadata to.
54
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
40
55
  """
41
56
  kind: Literal["meta.inference.onnx"] = "meta.inference.onnx"
42
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
43
- model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
57
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
58
+ description="PyTorch module to apply metadata to.",
59
+ exclude=True
60
+ )
61
+ model_args: list[object] = Field(
62
+ description="Positional inputs to the model.",
63
+ exclude=True
64
+ )
44
65
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
45
66
 
46
- class ONNXRuntimeInferenceSessionMetadata (BaseModel):
67
+ class OnnxRuntimeInferenceSessionMetadata (BaseModel):
47
68
  """
48
69
  Metadata required to lower an ONNXRuntime `InferenceSession` for inference.
70
+
71
+ Members:
72
+ session (onnxruntime.InferenceSession): ONNXRuntime inference session to apply metadata to.
73
+ model_path (str | Path): ONNX model path. The model must exist at this path in the compiler sandbox.
49
74
  """
50
75
  kind: Literal["meta.inference.onnxruntime"] = "meta.inference.onnxruntime"
51
- session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(description="ONNXRuntime inference session to apply metadata to.", exclude=True)
52
- model_path: Path = Field(description="ONNX model path. The model must exist at this path in the compiler sandbox.", exclude=True)
76
+ session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(
77
+ description="ONNXRuntime inference session to apply metadata to.",
78
+ exclude=True
79
+ )
80
+ model_path: str | Path = Field(
81
+ description="ONNX model path. The model must exist at this path in the compiler sandbox.",
82
+ exclude=True
83
+ )
53
84
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
54
85
 
55
86
  class LiteRTInferenceMetadata (BaseModel):
56
87
  """
57
88
  Metadata required to lower PyTorch model for inference with LiteRT (fka TensorFlow Lite).
89
+
90
+ Members:
91
+ model (torch.nn.Module): PyTorch module to apply metadata to.
92
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
58
93
  """
59
94
  kind: Literal["meta.inference.litert"] = "meta.inference.litert"
60
- model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(description="PyTorch module to apply metadata to.", exclude=True)
61
- model_args: list[object] = Field(description="Positional inputs to the model.", exclude=True)
95
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
96
+ description="PyTorch module to apply metadata to.",
97
+ exclude=True
98
+ )
99
+ model_args: list[object] = Field(
100
+ description="Positional inputs to the model.",
101
+ exclude=True
102
+ )
103
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
104
+
105
+ class OpenVINOInferenceMetadata (BaseModel):
106
+ """
107
+ Metadata required to lower PyTorch model for interence with Intel OpenVINO.
108
+
109
+ Members:
110
+ model (torch.nn.Module): PyTorch module to apply metadata to.
111
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
112
+ """
113
+ kind: Literal["meta.inference.openvino"] = "meta.inference.openvino"
114
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
115
+ description="PyTorch module to apply metadata to.",
116
+ exclude=True
117
+ )
118
+ model_args: list[object] = Field(
119
+ description="Positional inputs to the model.",
120
+ exclude=True
121
+ )
122
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
123
+
124
+ QnnInferenceBackend = Literal["cpu", "gpu"] # `htp` coming soon
125
+ QnnInferenceQuantization = Literal["w8a8", "w8a16", "w4a8", "w4a16"]
126
+
127
+ class QnnInferenceMetadata (BaseModel):
128
+ """
129
+ Metadata required to lower a PyTorch model for inference on Qualcomm accelerators with QNN SDK.
130
+
131
+ Members:
132
+ model (torch.nn.Module): PyTorch module to apply metadata to.
133
+ model_args (tuple[Tensor,...]): Positional inputs to the model.
134
+ backend (QnnInferenceBackend): QNN inference backend. Defaults to `cpu`.
135
+ quantization (QnnInferenceQuantization): QNN model quantization mode. This MUST only be specified when backend is `htp`.
136
+ """
137
+ kind: Literal["meta.inference.qnn"] = "meta.inference.qnn"
138
+ model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
139
+ description="PyTorch module to apply metadata to.",
140
+ exclude=True
141
+ )
142
+ model_args: list[object] = Field(
143
+ description="Positional inputs to the model.",
144
+ exclude=True
145
+ )
146
+ backend: QnnInferenceBackend = Field(
147
+ default="cpu",
148
+ description="QNN backend to execute the model.",
149
+ exclude=True
150
+ )
151
+ quantization: QnnInferenceQuantization | None = Field(
152
+ default=None,
153
+ description="QNN model quantization mode. This MUST only be specified when backend is `htp`.",
154
+ exclude=True
155
+ )
62
156
  model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
63
157
 
64
158
  class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
@@ -66,5 +160,12 @@ class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
66
160
  Metadata required to lower a GGUF model for LLM inference.
67
161
  """
68
162
  kind: Literal["meta.inference.gguf"] = "meta.inference.gguf"
69
- model_path: Path = Field(description="GGUF model path. The model must exist at this path in the compiler sandbox.", exclude=True)
70
- model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
163
+ model_path: Path = Field(
164
+ description="GGUF model path. The model must exist at this path in the compiler sandbox.",
165
+ exclude=True
166
+ )
167
+ model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
168
+
169
+ # DEPRECATED
170
+ ONNXInferenceMetadata = OnnxInferenceMetadata
171
+ ONNXRuntimeInferenceSessionMetadata = OnnxRuntimeInferenceSessionMetadata
@@ -0,0 +1,7 @@
1
+ #
2
+ # Function
3
+ # Copyright © 2025 NatML Inc. All Rights Reserved.
4
+ #
5
+
6
+ from .prediction import PredictionService
7
+ from .remote import RemoteAcceleration
@@ -3,7 +3,7 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- from ..client import FunctionClient
6
+ from ...client import FunctionClient
7
7
  from .remote import RemotePredictionService
8
8
 
9
9
  class PredictionService:
@@ -15,10 +15,10 @@ from requests import get, put
15
15
  from typing import Literal
16
16
  from urllib.request import urlopen
17
17
 
18
- from ..c import Configuration
19
- from ..client import FunctionClient
20
- from ..services import Value
21
- from ..types import Dtype, Prediction
18
+ from ...c import Configuration
19
+ from ...client import FunctionClient
20
+ from ...services.prediction import Value
21
+ from ...types import Dtype, Prediction
22
22
 
23
23
  RemoteAcceleration = Literal["auto", "cpu", "a40", "a100"]
24
24
 
fxn/cli/__init__.py CHANGED
@@ -14,6 +14,7 @@ from .misc import cli_options
14
14
  from .predictions import create_prediction
15
15
  from .predictors import archive_predictor, delete_predictor, retrieve_predictor
16
16
  from .sources import retrieve_source
17
+ from ..beta.cli import llm_app
17
18
 
18
19
  # Define CLI
19
20
  typer.main.console_stderr = TracebackMarkupConsole()
@@ -30,6 +31,7 @@ app.callback()(cli_options)
30
31
 
31
32
  # Add subcommands
32
33
  app.add_typer(auth_app, name="auth", help="Login, logout, and check your authentication status.")
34
+ app.add_typer(llm_app, name="llm", hidden=True, help="Work with large language models (LLMs).")
33
35
 
34
36
  # Add top-level commands
35
37
  app.command(
@@ -44,7 +46,7 @@ app.command(
44
46
  app.command(name="retrieve", help="Retrieve a predictor.")(retrieve_predictor)
45
47
  app.command(name="archive", help="Archive a predictor.")(archive_predictor)
46
48
  app.command(name="delete", help="Delete a predictor.")(delete_predictor)
47
- app.command(name="source", help="Retrieve the native source code for a given prediction.")(retrieve_source)
49
+ app.command(name="source", help="Retrieve the generated native code for a given predictor.")(retrieve_source)
48
50
 
49
51
  # Run
50
52
  if __name__ == "__main__":
fxn/cli/compile.py CHANGED
@@ -86,6 +86,8 @@ def _load_predictor_func (path: str) -> Callable[...,object]:
86
86
  if "" not in sys.path:
87
87
  sys.path.insert(0, "")
88
88
  path: Path = Path(path).resolve()
89
+ if not path.exists():
90
+ raise ValueError(f"Cannot compile predictor because no Python module exists at the given path.")
89
91
  sys.path.insert(0, str(path.parent))
90
92
  name = getmodulename(path)
91
93
  spec = spec_from_file_location(name, path)
fxn/compile.py CHANGED
@@ -9,25 +9,39 @@ from inspect import isasyncgenfunction, iscoroutinefunction
9
9
  from pathlib import Path
10
10
  from pydantic import BaseModel, ConfigDict, Field
11
11
  from types import ModuleType
12
- from typing import Literal
12
+ from typing import Any, Callable, Literal, ParamSpec, TypeVar, cast
13
13
 
14
14
  from .beta import (
15
15
  CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
16
- ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
16
+ OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
17
+ QnnInferenceMetadata
17
18
  )
18
19
  from .sandbox import Sandbox
19
20
  from .types import AccessMode
20
21
 
21
- CompileTarget = Literal["android", "ios", "linux", "macos", "visionos", "wasm", "windows"]
22
+ CompileTarget = Literal[
23
+ "android",
24
+ "ios",
25
+ "linux",
26
+ "macos",
27
+ "visionos",
28
+ "wasm",
29
+ "windows"
30
+ ]
22
31
 
23
32
  CompileMetadata = (
24
33
  CoreMLInferenceMetadata |
25
34
  LiteRTInferenceMetadata |
26
35
  LlamaCppInferenceMetadata |
27
- ONNXInferenceMetadata |
28
- ONNXRuntimeInferenceSessionMetadata
36
+ OnnxInferenceMetadata |
37
+ OnnxRuntimeInferenceSessionMetadata |
38
+ OpenVINOInferenceMetadata |
39
+ QnnInferenceMetadata
29
40
  )
30
41
 
42
+ P = ParamSpec("P")
43
+ R = TypeVar("R")
44
+
31
45
  class PredictorSpec (BaseModel):
32
46
  """
33
47
  Descriptor of a predictor to be compiled.
@@ -56,7 +70,7 @@ def compile (
56
70
  media: Path=None,
57
71
  license: str=None,
58
72
  **kwargs
59
- ):
73
+ ) -> Callable[[Callable[P, R]], Callable[P, R]]:
60
74
  """
61
75
  Create a predictor by compiling a stateless function.
62
76
 
@@ -97,5 +111,5 @@ def compile (
97
111
  def wrapper (*args, **kwargs):
98
112
  return func(*args, **kwargs)
99
113
  wrapper.__predictor_spec = spec
100
- return wrapper
114
+ return cast(Callable[P, R], wrapper)
101
115
  return decorator
@@ -43,18 +43,6 @@ class PredictionService:
43
43
  self.__cache_dir = self.__class__.__get_home_dir() / ".fxn" / "cache"
44
44
  self.__cache_dir.mkdir(parents=True, exist_ok=True)
45
45
 
46
- def ready (self, tag: str, **kwargs) -> bool:
47
- """
48
- Check whether a predictor has been preloaded and is ready to make predictions.
49
-
50
- Parameters:
51
- tag (str): Predictor tag.
52
-
53
- Returns:
54
- bool: Whether the predictor is ready to make predictions.
55
- """
56
- return tag in self.__cache
57
-
58
46
  def create (
59
47
  self,
60
48
  tag: str,
fxn/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  # Copyright © 2025 NatML Inc. All Rights Reserved.
4
4
  #
5
5
 
6
- __version__ = "0.0.51"
6
+ __version__ = "0.0.53"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fxn
3
- Version: 0.0.51
3
+ Version: 0.0.53
4
4
  Summary: Run prediction functions locally in Python. Register at https://fxn.ai.
5
5
  Author-email: "NatML Inc." <hi@fxn.ai>
6
6
  License: Apache License
@@ -1,15 +1,20 @@
1
1
  fxn/__init__.py,sha256=gnJK7iOmMVWFhluW9bOvTNxJbpT-GwzDJTMmjA_XxOE,284
2
2
  fxn/client.py,sha256=Deje8eiS1VOHX85tQnV34viv2CPVx2ljwHSbyVB5Z1o,3790
3
- fxn/compile.py,sha256=Iw6JyhpmL3dZEI7PkVb5IpTeq3YhdUNjxgYzU99kbK0,4073
3
+ fxn/compile.py,sha256=tKLRdbFPf0c3Q7UMtKa1Wbpf4Vx1XxbMzh3ltfVb_eo,4371
4
4
  fxn/function.py,sha256=XeEuALkbVhkvwEBUfP0A2fu3tdimwHemoR17oomhzc8,1407
5
5
  fxn/logging.py,sha256=MsTSf0GZxrHNDwVAXDOh8_zRUg9hkeZ8DfhFUJs7D8A,7250
6
6
  fxn/sandbox.py,sha256=50yY2GDdkAFl-6pXTleaD1LXYM6-pJ3C1epKsr0xdrM,7313
7
- fxn/version.py,sha256=2zCxqyFmURc8C37OeDuXXS3OeTDER7TROCbHoE5sCLU,95
8
- fxn/beta/__init__.py,sha256=5V58p4doyZOvmAETCxrGUu4hG04tNq8ejlfSxnEYHxE,281
9
- fxn/beta/client.py,sha256=0lfwQPcB9ToIJC7AcCXO6DlJKkmId8EChhd9bk29GGE,2611
10
- fxn/beta/metadata.py,sha256=1Jo7jjMkhdBaIw5IyiDJ32kukJbym-el3T328GBanW8,3713
11
- fxn/beta/prediction.py,sha256=9DTBahNF6m0TicLab2o9e8IKpiSV6K7cUSTYaFju0ZU,356
12
- fxn/beta/remote.py,sha256=psPNcGFQKMGHAJG4NRDIQ2trStvTj2NOZeQqVI-e29Q,7529
7
+ fxn/version.py,sha256=n-WFHj2EhODMEevfcqzzFWZYnuAaaKG2T-dxo23t23U,95
8
+ fxn/beta/__init__.py,sha256=h5PwE5PtYu9BgdysuAG51KMJ2N_clwixufXgTzC0dTg,464
9
+ fxn/beta/client.py,sha256=s0BpkQM4V_816pyzB8sbo-QQg0S7tY0APTpYACWsxQM,2590
10
+ fxn/beta/metadata.py,sha256=Z3bJwVd-8GeaAly1LgZE3Yej7y7sQUu_IY2qY2ISYFk,6935
11
+ fxn/beta/cli/__init__.py,sha256=_X_lreE4q_CY8AzRmcFzRI1OIV8x1xrEyOqAV7fsQlk,104
12
+ fxn/beta/cli/llm.py,sha256=loL87unr1o_TfsaBTQOb3d7CEpm1Qcf5WJa-qv51iXE,517
13
+ fxn/beta/llm/__init__.py,sha256=bJB5i1eY8zXnixVc34iubxnEXTIoz1UtnoGI5hXDg18,73
14
+ fxn/beta/llm/server.py,sha256=bJB5i1eY8zXnixVc34iubxnEXTIoz1UtnoGI5hXDg18,73
15
+ fxn/beta/services/__init__.py,sha256=6XNWEcXXbFX2O_P-rpR0xMuAp9gN7Q0xuxzp7BGt8Xc,153
16
+ fxn/beta/services/prediction.py,sha256=VbZIY292rIP6CHzR4GlL4DvFkAymM4qEQW2_ii2R1-k,357
17
+ fxn/beta/services/remote.py,sha256=jQcYxOUFp3xnnLljPK5TYNEWpmDCwv0vjM0oQkK8h54,7544
13
18
  fxn/c/__init__.py,sha256=NMIduqO_MYtI9jVCu6ZxvbBtYQXoQyNEWblNy3m2UPY,313
14
19
  fxn/c/configuration.py,sha256=56_-NNT4yoHDNfvB6jJNYF2eKJYMRLVrv3mIg7g6qaE,5597
15
20
  fxn/c/fxnc.py,sha256=YrvwOlzPmTlSDuz2zmKZfws2WK5BY4YZ62edoplcMJU,1381
@@ -18,9 +23,9 @@ fxn/c/prediction.py,sha256=-d-5yreFAaRS-nDHzhfabRNtgYcmJGiY_N2dt09gk84,2689
18
23
  fxn/c/predictor.py,sha256=48poLj1AthzCgU9n6Wv9gL8o4gFucIlOnBO2wdor6r0,1925
19
24
  fxn/c/stream.py,sha256=Y1Xv1Bt3_qlnWg9rCn7NWESpouF1eKMzDiQjhZWbXTg,1105
20
25
  fxn/c/value.py,sha256=h5n91nm8C3YvEEFORfJBUdncZ29DFIdUKGWQ_KpLsWc,7420
21
- fxn/cli/__init__.py,sha256=vLCNLiXneZzMCFViDOngg3kQZ1rZwnZXzUSEOB38Il0,1542
26
+ fxn/cli/__init__.py,sha256=gMn7pj8287M8KhB0cStQOcgo5fGGqKsR4i3nKAJQGow,1671
22
27
  fxn/cli/auth.py,sha256=6iGbNbjxfCr8OZT3_neLThXdWeKRBZATwru8vU0XmRw,1688
23
- fxn/cli/compile.py,sha256=dd3IV1bhBCbMEo0Py6KGxn3vfv_TSDCLL2f-4qLzfiw,6037
28
+ fxn/cli/compile.py,sha256=BSUBUiXhI7vDfztHCGgmA4Dsvco7J9i1aJXK9EQBKHc,6168
24
29
  fxn/cli/misc.py,sha256=LcJbCj_GAgtGraTRva2zHHOPpNwI6SOFntRksxwlqvM,843
25
30
  fxn/cli/predictions.py,sha256=ma7wbsKD5CFCRTU_TtJ8N0nN1fgFX2BZPGG8qm8HlNI,3182
26
31
  fxn/cli/predictors.py,sha256=bVQAuBue_Jxb79X85RTCzOerWRRT2Ny1oF5DNYAsx4M,1545
@@ -33,7 +38,7 @@ fxn/lib/macos/x86_64/Function.dylib,sha256=SO55PHLhhl8sh_Gr3IKgTHPV1-pnhLb30qbqH
33
38
  fxn/lib/windows/arm64/Function.dll,sha256=ol6LyOVtF7tq-hnPLS9RRXAkobYMSC9T_JF1kx3l2IY,411136
34
39
  fxn/lib/windows/x86_64/Function.dll,sha256=P43RXFNAdjTrDEnynGRy8CjCgY1KWzB_1Cz5W6394bg,447488
35
40
  fxn/services/__init__.py,sha256=Bif8IttwJ089mSRsd3MFdob7z2eF-MKigKu4ZQFZBCQ,190
36
- fxn/services/prediction.py,sha256=QCop-f7ojkGR7DI5tLJe3FPnr0BvPJ_vWhCk4kg8Fqg,10373
41
+ fxn/services/prediction.py,sha256=2BNwzl4K7-7AXyZFE5TanIYWXJ4M4WVPWCCBbqqBC3M,10029
37
42
  fxn/services/predictor.py,sha256=Wl_7YKiD5mTpC5x2Zaq4BpatRjwRUX8Th9GIrwd38MA,791
38
43
  fxn/services/user.py,sha256=ADl5MFLsk4K0altgKHnI-i64E3g1wU3e56Noq_ciRuk,685
39
44
  fxn/types/__init__.py,sha256=MEg71rzbGgoWfgB4Yi5QvxbnovHTZRIzCUZLtWtWP1E,292
@@ -41,9 +46,9 @@ fxn/types/dtype.py,sha256=71Tuu4IydmELcBcSBbmWswhCE-7WqBSQ4VkETsFRzjA,617
41
46
  fxn/types/prediction.py,sha256=BdLTxnKiSFbz5warX8g_Z4DedNxXK3gaNjSKR2FP8tA,2051
42
47
  fxn/types/predictor.py,sha256=KRGZEuDt7WPMCyRcZvQq4y2FMocfVrLEUNJCJgfDY9Y,4000
43
48
  fxn/types/user.py,sha256=Z44TwEocyxSrfKyzcNfmAXUrpX_Ry8fJ7MffSxRn4oU,1071
44
- fxn-0.0.51.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
45
- fxn-0.0.51.dist-info/METADATA,sha256=VyONkRDW3tvdHDnCPVmglWp0G7hKI1iaMLxDfm1enU4,16136
46
- fxn-0.0.51.dist-info/WHEEL,sha256=ooBFpIzZCPdw3uqIQsOo4qqbA4ZRPxHnOH7peeONza0,91
47
- fxn-0.0.51.dist-info/entry_points.txt,sha256=O_AwD5dYaeB-YT1F9hPAPuDYCkw_W0tdNGYbc5RVR2k,45
48
- fxn-0.0.51.dist-info/top_level.txt,sha256=1ULIEGrnMlhId8nYAkjmRn9g3KEFuHKboq193SEKQkA,4
49
- fxn-0.0.51.dist-info/RECORD,,
49
+ fxn-0.0.53.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
50
+ fxn-0.0.53.dist-info/METADATA,sha256=p4nALBoGifSFNlFX4iFqxiGEGu5nR--cbZjkuhMpSq8,16136
51
+ fxn-0.0.53.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
52
+ fxn-0.0.53.dist-info/entry_points.txt,sha256=O_AwD5dYaeB-YT1F9hPAPuDYCkw_W0tdNGYbc5RVR2k,45
53
+ fxn-0.0.53.dist-info/top_level.txt,sha256=1ULIEGrnMlhId8nYAkjmRn9g3KEFuHKboq193SEKQkA,4
54
+ fxn-0.0.53.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.0.1)
2
+ Generator: setuptools (80.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5