fxn 0.0.52__py3-none-any.whl → 0.0.53__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fxn/beta/__init__.py +5 -3
- fxn/beta/cli/__init__.py +6 -0
- fxn/beta/cli/llm.py +22 -0
- fxn/beta/client.py +1 -2
- fxn/beta/llm/__init__.py +5 -0
- fxn/beta/llm/server.py +5 -0
- fxn/beta/metadata.py +99 -17
- fxn/beta/services/__init__.py +7 -0
- fxn/beta/{prediction.py → services/prediction.py} +1 -1
- fxn/beta/{remote.py → services/remote.py} +4 -4
- fxn/cli/__init__.py +3 -1
- fxn/cli/compile.py +2 -0
- fxn/compile.py +3 -3
- fxn/services/prediction.py +0 -12
- fxn/version.py +1 -1
- {fxn-0.0.52.dist-info → fxn-0.0.53.dist-info}/METADATA +1 -1
- {fxn-0.0.52.dist-info → fxn-0.0.53.dist-info}/RECORD +21 -16
- {fxn-0.0.52.dist-info → fxn-0.0.53.dist-info}/WHEEL +1 -1
- {fxn-0.0.52.dist-info → fxn-0.0.53.dist-info}/entry_points.txt +0 -0
- {fxn-0.0.52.dist-info → fxn-0.0.53.dist-info}/licenses/LICENSE +0 -0
- {fxn-0.0.52.dist-info → fxn-0.0.53.dist-info}/top_level.txt +0 -0
fxn/beta/__init__.py
CHANGED
@@ -5,7 +5,9 @@
|
|
5
5
|
|
6
6
|
from .metadata import (
|
7
7
|
CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
|
8
|
-
|
9
|
-
QnnInferenceMetadata
|
8
|
+
OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
|
9
|
+
QnnInferenceMetadata, QnnInferenceBackend, QnnInferenceQuantization,
|
10
|
+
# Deprecated
|
11
|
+
ONNXInferenceMetadata, ONNXRuntimeInferenceSessionMetadata
|
10
12
|
)
|
11
|
-
from .
|
13
|
+
from .services import RemoteAcceleration
|
fxn/beta/cli/__init__.py
ADDED
fxn/beta/cli/llm.py
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
#
|
2
|
+
# Function
|
3
|
+
# Copyright © 2025 NatML Inc. All Rights Reserved.
|
4
|
+
#
|
5
|
+
|
6
|
+
from pathlib import Path
|
7
|
+
from typer import Argument, Option, Typer
|
8
|
+
from typing_extensions import Annotated
|
9
|
+
|
10
|
+
app = Typer(no_args_is_help=True)
|
11
|
+
|
12
|
+
@app.command(name="chat", help="Start a chat session.")
|
13
|
+
def chat (
|
14
|
+
model: Annotated[str, Argument(help="Model to chat with.")]
|
15
|
+
):
|
16
|
+
pass
|
17
|
+
|
18
|
+
@app.command(name="serve", help="Start an LLM server.")
|
19
|
+
def serve (
|
20
|
+
port: Annotated[int, Option(help="Port to start the server on.")] = 11435
|
21
|
+
):
|
22
|
+
pass
|
fxn/beta/client.py
CHANGED
@@ -10,8 +10,7 @@ from typing import get_origin, Callable, Generator, Iterator, TypeVar
|
|
10
10
|
from ..client import FunctionClient
|
11
11
|
from ..services import PredictionService as EdgePredictionService
|
12
12
|
from ..types import Acceleration
|
13
|
-
from .
|
14
|
-
from .remote import RemoteAcceleration
|
13
|
+
from .services import PredictionService, RemoteAcceleration
|
15
14
|
|
16
15
|
F = TypeVar("F", bound=Callable[..., object])
|
17
16
|
|
fxn/beta/llm/__init__.py
ADDED
fxn/beta/llm/server.py
ADDED
fxn/beta/metadata.py
CHANGED
@@ -3,6 +3,7 @@
|
|
3
3
|
# Copyright © 2025 NatML Inc. All Rights Reserved.
|
4
4
|
#
|
5
5
|
|
6
|
+
from os import PathLike
|
6
7
|
from pathlib import Path
|
7
8
|
from pydantic import BaseModel, BeforeValidator, ConfigDict, Field
|
8
9
|
from typing import Annotated, Literal
|
@@ -28,56 +29,130 @@ def _validate_ort_inference_session (session: "onnxruntime.InferenceSession") ->
|
|
28
29
|
class CoreMLInferenceMetadata (BaseModel):
|
29
30
|
"""
|
30
31
|
Metadata required to lower a PyTorch model for inference on iOS, macOS, and visionOS with CoreML.
|
32
|
+
|
33
|
+
Members:
|
34
|
+
model (torch.nn.Module): PyTorch module to apply metadata to.
|
35
|
+
model_args (tuple[Tensor,...]): Positional inputs to the model.
|
31
36
|
"""
|
32
37
|
kind: Literal["meta.inference.coreml"] = "meta.inference.coreml"
|
33
|
-
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
34
|
-
|
38
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
39
|
+
description="PyTorch module to apply metadata to.",
|
40
|
+
exclude=True
|
41
|
+
)
|
42
|
+
model_args: list[object] = Field(
|
43
|
+
description="Positional inputs to the model.",
|
44
|
+
exclude=True
|
45
|
+
)
|
35
46
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
36
47
|
|
37
|
-
class
|
48
|
+
class OnnxInferenceMetadata (BaseModel):
|
38
49
|
"""
|
39
50
|
Metadata required to lower a PyTorch model for inference.
|
51
|
+
|
52
|
+
Members:
|
53
|
+
model (torch.nn.Module): PyTorch module to apply metadata to.
|
54
|
+
model_args (tuple[Tensor,...]): Positional inputs to the model.
|
40
55
|
"""
|
41
56
|
kind: Literal["meta.inference.onnx"] = "meta.inference.onnx"
|
42
|
-
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
43
|
-
|
57
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
58
|
+
description="PyTorch module to apply metadata to.",
|
59
|
+
exclude=True
|
60
|
+
)
|
61
|
+
model_args: list[object] = Field(
|
62
|
+
description="Positional inputs to the model.",
|
63
|
+
exclude=True
|
64
|
+
)
|
44
65
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
45
66
|
|
46
|
-
class
|
67
|
+
class OnnxRuntimeInferenceSessionMetadata (BaseModel):
|
47
68
|
"""
|
48
69
|
Metadata required to lower an ONNXRuntime `InferenceSession` for inference.
|
70
|
+
|
71
|
+
Members:
|
72
|
+
session (onnxruntime.InferenceSession): ONNXRuntime inference session to apply metadata to.
|
73
|
+
model_path (str | Path): ONNX model path. The model must exist at this path in the compiler sandbox.
|
49
74
|
"""
|
50
75
|
kind: Literal["meta.inference.onnxruntime"] = "meta.inference.onnxruntime"
|
51
|
-
session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(
|
52
|
-
|
76
|
+
session: Annotated[object, BeforeValidator(_validate_ort_inference_session)] = Field(
|
77
|
+
description="ONNXRuntime inference session to apply metadata to.",
|
78
|
+
exclude=True
|
79
|
+
)
|
80
|
+
model_path: str | Path = Field(
|
81
|
+
description="ONNX model path. The model must exist at this path in the compiler sandbox.",
|
82
|
+
exclude=True
|
83
|
+
)
|
53
84
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
54
85
|
|
55
86
|
class LiteRTInferenceMetadata (BaseModel):
|
56
87
|
"""
|
57
88
|
Metadata required to lower PyTorch model for inference with LiteRT (fka TensorFlow Lite).
|
89
|
+
|
90
|
+
Members:
|
91
|
+
model (torch.nn.Module): PyTorch module to apply metadata to.
|
92
|
+
model_args (tuple[Tensor,...]): Positional inputs to the model.
|
58
93
|
"""
|
59
94
|
kind: Literal["meta.inference.litert"] = "meta.inference.litert"
|
60
|
-
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
61
|
-
|
95
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
96
|
+
description="PyTorch module to apply metadata to.",
|
97
|
+
exclude=True
|
98
|
+
)
|
99
|
+
model_args: list[object] = Field(
|
100
|
+
description="Positional inputs to the model.",
|
101
|
+
exclude=True
|
102
|
+
)
|
62
103
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
63
104
|
|
64
105
|
class OpenVINOInferenceMetadata (BaseModel):
|
65
106
|
"""
|
66
107
|
Metadata required to lower PyTorch model for interence with Intel OpenVINO.
|
108
|
+
|
109
|
+
Members:
|
110
|
+
model (torch.nn.Module): PyTorch module to apply metadata to.
|
111
|
+
model_args (tuple[Tensor,...]): Positional inputs to the model.
|
67
112
|
"""
|
68
113
|
kind: Literal["meta.inference.openvino"] = "meta.inference.openvino"
|
69
|
-
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
70
|
-
|
114
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
115
|
+
description="PyTorch module to apply metadata to.",
|
116
|
+
exclude=True
|
117
|
+
)
|
118
|
+
model_args: list[object] = Field(
|
119
|
+
description="Positional inputs to the model.",
|
120
|
+
exclude=True
|
121
|
+
)
|
71
122
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
72
123
|
|
124
|
+
QnnInferenceBackend = Literal["cpu", "gpu"] # `htp` coming soon
|
125
|
+
QnnInferenceQuantization = Literal["w8a8", "w8a16", "w4a8", "w4a16"]
|
126
|
+
|
73
127
|
class QnnInferenceMetadata (BaseModel):
|
74
128
|
"""
|
75
129
|
Metadata required to lower a PyTorch model for inference on Qualcomm accelerators with QNN SDK.
|
130
|
+
|
131
|
+
Members:
|
132
|
+
model (torch.nn.Module): PyTorch module to apply metadata to.
|
133
|
+
model_args (tuple[Tensor,...]): Positional inputs to the model.
|
134
|
+
backend (QnnInferenceBackend): QNN inference backend. Defaults to `cpu`.
|
135
|
+
quantization (QnnInferenceQuantization): QNN model quantization mode. This MUST only be specified when backend is `htp`.
|
76
136
|
"""
|
77
137
|
kind: Literal["meta.inference.qnn"] = "meta.inference.qnn"
|
78
|
-
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
79
|
-
|
80
|
-
|
138
|
+
model: Annotated[object, BeforeValidator(_validate_torch_module)] = Field(
|
139
|
+
description="PyTorch module to apply metadata to.",
|
140
|
+
exclude=True
|
141
|
+
)
|
142
|
+
model_args: list[object] = Field(
|
143
|
+
description="Positional inputs to the model.",
|
144
|
+
exclude=True
|
145
|
+
)
|
146
|
+
backend: QnnInferenceBackend = Field(
|
147
|
+
default="cpu",
|
148
|
+
description="QNN backend to execute the model.",
|
149
|
+
exclude=True
|
150
|
+
)
|
151
|
+
quantization: QnnInferenceQuantization | None = Field(
|
152
|
+
default=None,
|
153
|
+
description="QNN model quantization mode. This MUST only be specified when backend is `htp`.",
|
154
|
+
exclude=True
|
155
|
+
)
|
81
156
|
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
82
157
|
|
83
158
|
class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
|
@@ -85,5 +160,12 @@ class LlamaCppInferenceMetadata (BaseModel): # INCOMPLETE
|
|
85
160
|
Metadata required to lower a GGUF model for LLM inference.
|
86
161
|
"""
|
87
162
|
kind: Literal["meta.inference.gguf"] = "meta.inference.gguf"
|
88
|
-
model_path: Path = Field(
|
89
|
-
|
163
|
+
model_path: Path = Field(
|
164
|
+
description="GGUF model path. The model must exist at this path in the compiler sandbox.",
|
165
|
+
exclude=True
|
166
|
+
)
|
167
|
+
model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True)
|
168
|
+
|
169
|
+
# DEPRECATED
|
170
|
+
ONNXInferenceMetadata = OnnxInferenceMetadata
|
171
|
+
ONNXRuntimeInferenceSessionMetadata = OnnxRuntimeInferenceSessionMetadata
|
@@ -15,10 +15,10 @@ from requests import get, put
|
|
15
15
|
from typing import Literal
|
16
16
|
from urllib.request import urlopen
|
17
17
|
|
18
|
-
from
|
19
|
-
from
|
20
|
-
from
|
21
|
-
from
|
18
|
+
from ...c import Configuration
|
19
|
+
from ...client import FunctionClient
|
20
|
+
from ...services.prediction import Value
|
21
|
+
from ...types import Dtype, Prediction
|
22
22
|
|
23
23
|
RemoteAcceleration = Literal["auto", "cpu", "a40", "a100"]
|
24
24
|
|
fxn/cli/__init__.py
CHANGED
@@ -14,6 +14,7 @@ from .misc import cli_options
|
|
14
14
|
from .predictions import create_prediction
|
15
15
|
from .predictors import archive_predictor, delete_predictor, retrieve_predictor
|
16
16
|
from .sources import retrieve_source
|
17
|
+
from ..beta.cli import llm_app
|
17
18
|
|
18
19
|
# Define CLI
|
19
20
|
typer.main.console_stderr = TracebackMarkupConsole()
|
@@ -30,6 +31,7 @@ app.callback()(cli_options)
|
|
30
31
|
|
31
32
|
# Add subcommands
|
32
33
|
app.add_typer(auth_app, name="auth", help="Login, logout, and check your authentication status.")
|
34
|
+
app.add_typer(llm_app, name="llm", hidden=True, help="Work with large language models (LLMs).")
|
33
35
|
|
34
36
|
# Add top-level commands
|
35
37
|
app.command(
|
@@ -44,7 +46,7 @@ app.command(
|
|
44
46
|
app.command(name="retrieve", help="Retrieve a predictor.")(retrieve_predictor)
|
45
47
|
app.command(name="archive", help="Archive a predictor.")(archive_predictor)
|
46
48
|
app.command(name="delete", help="Delete a predictor.")(delete_predictor)
|
47
|
-
app.command(name="source", help="Retrieve the native
|
49
|
+
app.command(name="source", help="Retrieve the generated native code for a given predictor.")(retrieve_source)
|
48
50
|
|
49
51
|
# Run
|
50
52
|
if __name__ == "__main__":
|
fxn/cli/compile.py
CHANGED
@@ -86,6 +86,8 @@ def _load_predictor_func (path: str) -> Callable[...,object]:
|
|
86
86
|
if "" not in sys.path:
|
87
87
|
sys.path.insert(0, "")
|
88
88
|
path: Path = Path(path).resolve()
|
89
|
+
if not path.exists():
|
90
|
+
raise ValueError(f"Cannot compile predictor because no Python module exists at the given path.")
|
89
91
|
sys.path.insert(0, str(path.parent))
|
90
92
|
name = getmodulename(path)
|
91
93
|
spec = spec_from_file_location(name, path)
|
fxn/compile.py
CHANGED
@@ -13,7 +13,7 @@ from typing import Any, Callable, Literal, ParamSpec, TypeVar, cast
|
|
13
13
|
|
14
14
|
from .beta import (
|
15
15
|
CoreMLInferenceMetadata, LiteRTInferenceMetadata, LlamaCppInferenceMetadata,
|
16
|
-
|
16
|
+
OnnxInferenceMetadata, OnnxRuntimeInferenceSessionMetadata, OpenVINOInferenceMetadata,
|
17
17
|
QnnInferenceMetadata
|
18
18
|
)
|
19
19
|
from .sandbox import Sandbox
|
@@ -33,8 +33,8 @@ CompileMetadata = (
|
|
33
33
|
CoreMLInferenceMetadata |
|
34
34
|
LiteRTInferenceMetadata |
|
35
35
|
LlamaCppInferenceMetadata |
|
36
|
-
|
37
|
-
|
36
|
+
OnnxInferenceMetadata |
|
37
|
+
OnnxRuntimeInferenceSessionMetadata |
|
38
38
|
OpenVINOInferenceMetadata |
|
39
39
|
QnnInferenceMetadata
|
40
40
|
)
|
fxn/services/prediction.py
CHANGED
@@ -43,18 +43,6 @@ class PredictionService:
|
|
43
43
|
self.__cache_dir = self.__class__.__get_home_dir() / ".fxn" / "cache"
|
44
44
|
self.__cache_dir.mkdir(parents=True, exist_ok=True)
|
45
45
|
|
46
|
-
def ready (self, tag: str, **kwargs) -> bool:
|
47
|
-
"""
|
48
|
-
Check whether a predictor has been preloaded and is ready to make predictions.
|
49
|
-
|
50
|
-
Parameters:
|
51
|
-
tag (str): Predictor tag.
|
52
|
-
|
53
|
-
Returns:
|
54
|
-
bool: Whether the predictor is ready to make predictions.
|
55
|
-
"""
|
56
|
-
return tag in self.__cache
|
57
|
-
|
58
46
|
def create (
|
59
47
|
self,
|
60
48
|
tag: str,
|
fxn/version.py
CHANGED
@@ -1,15 +1,20 @@
|
|
1
1
|
fxn/__init__.py,sha256=gnJK7iOmMVWFhluW9bOvTNxJbpT-GwzDJTMmjA_XxOE,284
|
2
2
|
fxn/client.py,sha256=Deje8eiS1VOHX85tQnV34viv2CPVx2ljwHSbyVB5Z1o,3790
|
3
|
-
fxn/compile.py,sha256=
|
3
|
+
fxn/compile.py,sha256=tKLRdbFPf0c3Q7UMtKa1Wbpf4Vx1XxbMzh3ltfVb_eo,4371
|
4
4
|
fxn/function.py,sha256=XeEuALkbVhkvwEBUfP0A2fu3tdimwHemoR17oomhzc8,1407
|
5
5
|
fxn/logging.py,sha256=MsTSf0GZxrHNDwVAXDOh8_zRUg9hkeZ8DfhFUJs7D8A,7250
|
6
6
|
fxn/sandbox.py,sha256=50yY2GDdkAFl-6pXTleaD1LXYM6-pJ3C1epKsr0xdrM,7313
|
7
|
-
fxn/version.py,sha256=
|
8
|
-
fxn/beta/__init__.py,sha256=
|
9
|
-
fxn/beta/client.py,sha256=
|
10
|
-
fxn/beta/metadata.py,sha256=
|
11
|
-
fxn/beta/
|
12
|
-
fxn/beta/
|
7
|
+
fxn/version.py,sha256=n-WFHj2EhODMEevfcqzzFWZYnuAaaKG2T-dxo23t23U,95
|
8
|
+
fxn/beta/__init__.py,sha256=h5PwE5PtYu9BgdysuAG51KMJ2N_clwixufXgTzC0dTg,464
|
9
|
+
fxn/beta/client.py,sha256=s0BpkQM4V_816pyzB8sbo-QQg0S7tY0APTpYACWsxQM,2590
|
10
|
+
fxn/beta/metadata.py,sha256=Z3bJwVd-8GeaAly1LgZE3Yej7y7sQUu_IY2qY2ISYFk,6935
|
11
|
+
fxn/beta/cli/__init__.py,sha256=_X_lreE4q_CY8AzRmcFzRI1OIV8x1xrEyOqAV7fsQlk,104
|
12
|
+
fxn/beta/cli/llm.py,sha256=loL87unr1o_TfsaBTQOb3d7CEpm1Qcf5WJa-qv51iXE,517
|
13
|
+
fxn/beta/llm/__init__.py,sha256=bJB5i1eY8zXnixVc34iubxnEXTIoz1UtnoGI5hXDg18,73
|
14
|
+
fxn/beta/llm/server.py,sha256=bJB5i1eY8zXnixVc34iubxnEXTIoz1UtnoGI5hXDg18,73
|
15
|
+
fxn/beta/services/__init__.py,sha256=6XNWEcXXbFX2O_P-rpR0xMuAp9gN7Q0xuxzp7BGt8Xc,153
|
16
|
+
fxn/beta/services/prediction.py,sha256=VbZIY292rIP6CHzR4GlL4DvFkAymM4qEQW2_ii2R1-k,357
|
17
|
+
fxn/beta/services/remote.py,sha256=jQcYxOUFp3xnnLljPK5TYNEWpmDCwv0vjM0oQkK8h54,7544
|
13
18
|
fxn/c/__init__.py,sha256=NMIduqO_MYtI9jVCu6ZxvbBtYQXoQyNEWblNy3m2UPY,313
|
14
19
|
fxn/c/configuration.py,sha256=56_-NNT4yoHDNfvB6jJNYF2eKJYMRLVrv3mIg7g6qaE,5597
|
15
20
|
fxn/c/fxnc.py,sha256=YrvwOlzPmTlSDuz2zmKZfws2WK5BY4YZ62edoplcMJU,1381
|
@@ -18,9 +23,9 @@ fxn/c/prediction.py,sha256=-d-5yreFAaRS-nDHzhfabRNtgYcmJGiY_N2dt09gk84,2689
|
|
18
23
|
fxn/c/predictor.py,sha256=48poLj1AthzCgU9n6Wv9gL8o4gFucIlOnBO2wdor6r0,1925
|
19
24
|
fxn/c/stream.py,sha256=Y1Xv1Bt3_qlnWg9rCn7NWESpouF1eKMzDiQjhZWbXTg,1105
|
20
25
|
fxn/c/value.py,sha256=h5n91nm8C3YvEEFORfJBUdncZ29DFIdUKGWQ_KpLsWc,7420
|
21
|
-
fxn/cli/__init__.py,sha256=
|
26
|
+
fxn/cli/__init__.py,sha256=gMn7pj8287M8KhB0cStQOcgo5fGGqKsR4i3nKAJQGow,1671
|
22
27
|
fxn/cli/auth.py,sha256=6iGbNbjxfCr8OZT3_neLThXdWeKRBZATwru8vU0XmRw,1688
|
23
|
-
fxn/cli/compile.py,sha256=
|
28
|
+
fxn/cli/compile.py,sha256=BSUBUiXhI7vDfztHCGgmA4Dsvco7J9i1aJXK9EQBKHc,6168
|
24
29
|
fxn/cli/misc.py,sha256=LcJbCj_GAgtGraTRva2zHHOPpNwI6SOFntRksxwlqvM,843
|
25
30
|
fxn/cli/predictions.py,sha256=ma7wbsKD5CFCRTU_TtJ8N0nN1fgFX2BZPGG8qm8HlNI,3182
|
26
31
|
fxn/cli/predictors.py,sha256=bVQAuBue_Jxb79X85RTCzOerWRRT2Ny1oF5DNYAsx4M,1545
|
@@ -33,7 +38,7 @@ fxn/lib/macos/x86_64/Function.dylib,sha256=SO55PHLhhl8sh_Gr3IKgTHPV1-pnhLb30qbqH
|
|
33
38
|
fxn/lib/windows/arm64/Function.dll,sha256=ol6LyOVtF7tq-hnPLS9RRXAkobYMSC9T_JF1kx3l2IY,411136
|
34
39
|
fxn/lib/windows/x86_64/Function.dll,sha256=P43RXFNAdjTrDEnynGRy8CjCgY1KWzB_1Cz5W6394bg,447488
|
35
40
|
fxn/services/__init__.py,sha256=Bif8IttwJ089mSRsd3MFdob7z2eF-MKigKu4ZQFZBCQ,190
|
36
|
-
fxn/services/prediction.py,sha256=
|
41
|
+
fxn/services/prediction.py,sha256=2BNwzl4K7-7AXyZFE5TanIYWXJ4M4WVPWCCBbqqBC3M,10029
|
37
42
|
fxn/services/predictor.py,sha256=Wl_7YKiD5mTpC5x2Zaq4BpatRjwRUX8Th9GIrwd38MA,791
|
38
43
|
fxn/services/user.py,sha256=ADl5MFLsk4K0altgKHnI-i64E3g1wU3e56Noq_ciRuk,685
|
39
44
|
fxn/types/__init__.py,sha256=MEg71rzbGgoWfgB4Yi5QvxbnovHTZRIzCUZLtWtWP1E,292
|
@@ -41,9 +46,9 @@ fxn/types/dtype.py,sha256=71Tuu4IydmELcBcSBbmWswhCE-7WqBSQ4VkETsFRzjA,617
|
|
41
46
|
fxn/types/prediction.py,sha256=BdLTxnKiSFbz5warX8g_Z4DedNxXK3gaNjSKR2FP8tA,2051
|
42
47
|
fxn/types/predictor.py,sha256=KRGZEuDt7WPMCyRcZvQq4y2FMocfVrLEUNJCJgfDY9Y,4000
|
43
48
|
fxn/types/user.py,sha256=Z44TwEocyxSrfKyzcNfmAXUrpX_Ry8fJ7MffSxRn4oU,1071
|
44
|
-
fxn-0.0.
|
45
|
-
fxn-0.0.
|
46
|
-
fxn-0.0.
|
47
|
-
fxn-0.0.
|
48
|
-
fxn-0.0.
|
49
|
-
fxn-0.0.
|
49
|
+
fxn-0.0.53.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
50
|
+
fxn-0.0.53.dist-info/METADATA,sha256=p4nALBoGifSFNlFX4iFqxiGEGu5nR--cbZjkuhMpSq8,16136
|
51
|
+
fxn-0.0.53.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
52
|
+
fxn-0.0.53.dist-info/entry_points.txt,sha256=O_AwD5dYaeB-YT1F9hPAPuDYCkw_W0tdNGYbc5RVR2k,45
|
53
|
+
fxn-0.0.53.dist-info/top_level.txt,sha256=1ULIEGrnMlhId8nYAkjmRn9g3KEFuHKboq193SEKQkA,4
|
54
|
+
fxn-0.0.53.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|