likelihood 2.1.3__tar.gz → 2.2.0rc1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- likelihood-2.2.0rc1/Cargo.toml +11 -0
- likelihood-2.2.0rc1/MANIFEST.in +2 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/PKG-INFO +5 -18
- likelihood-2.2.0rc1/likelihood/VERSION +1 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/graph/_nn.py +2 -139
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/graph/nn.py +1 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood.egg-info/PKG-INFO +5 -18
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood.egg-info/SOURCES.txt +8 -1
- likelihood-2.2.0rc1/likelihood.egg-info/not-zip-safe +1 -0
- likelihood-2.2.0rc1/likelihood.egg-info/top_level.txt +5 -0
- likelihood-2.2.0rc1/pyproject.toml +46 -0
- likelihood-2.2.0rc1/requirements.txt +17 -0
- likelihood-2.2.0rc1/setup.py +14 -0
- likelihood-2.2.0rc1/src/lib.rs +12 -0
- likelihood-2.1.3/likelihood.egg-info/top_level.txt +0 -1
- likelihood-2.1.3/setup.py +0 -42
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/LICENSE +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/README.md +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/__init__.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/graph/__init__.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/graph/graph.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/main.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/__init__.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/deep/__init__.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/deep/_autoencoders.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/deep/_predictor.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/deep/autoencoders.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/deep/bandit.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/deep/gan.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/deep/predictor.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/deep/rl.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/environments.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/hmm.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/regression.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/simulation.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/models/utils.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/pipes.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/tools/__init__.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/tools/cat_embed.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/tools/figures.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/tools/impute.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/tools/models_tools.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/tools/numeric_tools.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/tools/reports.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood/tools/tools.py +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood.egg-info/dependency_links.txt +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/likelihood.egg-info/requires.txt +0 -0
- {likelihood-2.1.3 → likelihood-2.2.0rc1}/setup.cfg +0 -0
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: likelihood
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.2.0rc1
|
|
4
4
|
Summary: A package that performs the maximum likelihood algorithm.
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
Maintainer-email: jafetcc17@gmail.com
|
|
5
|
+
Author-email: "J. A. Moreno-Guerra" <jzs.gm27@gmail.com>
|
|
6
|
+
Maintainer-email: Jafet Castañeda <jafetcc17@gmail.com>
|
|
7
|
+
License: MIT
|
|
8
|
+
Project-URL: Homepage, https://github.com/jzsmoreno/likelihood/
|
|
10
9
|
Classifier: Programming Language :: Python :: 3
|
|
11
10
|
Classifier: License :: OSI Approved :: MIT License
|
|
12
11
|
Classifier: Operating System :: OS Independent
|
|
@@ -37,19 +36,7 @@ Requires-Dist: tensorflow>=2.15.0; extra == "full"
|
|
|
37
36
|
Requires-Dist: keras-tuner; extra == "full"
|
|
38
37
|
Requires-Dist: scikit-learn; extra == "full"
|
|
39
38
|
Requires-Dist: torch; extra == "full"
|
|
40
|
-
Dynamic: author
|
|
41
|
-
Dynamic: author-email
|
|
42
|
-
Dynamic: classifier
|
|
43
|
-
Dynamic: description
|
|
44
|
-
Dynamic: description-content-type
|
|
45
|
-
Dynamic: home-page
|
|
46
39
|
Dynamic: license-file
|
|
47
|
-
Dynamic: maintainer
|
|
48
|
-
Dynamic: maintainer-email
|
|
49
|
-
Dynamic: provides-extra
|
|
50
|
-
Dynamic: requires-dist
|
|
51
|
-
Dynamic: requires-python
|
|
52
|
-
Dynamic: summary
|
|
53
40
|
|
|
54
41
|

|
|
55
42
|
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
2.2.0rc1
|
|
@@ -4,9 +4,8 @@ import os
|
|
|
4
4
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
|
|
5
5
|
logging.getLogger("tensorflow").setLevel(logging.ERROR)
|
|
6
6
|
|
|
7
|
-
import warnings
|
|
8
7
|
from multiprocessing import Pool, cpu_count
|
|
9
|
-
from typing import
|
|
8
|
+
from typing import List, Tuple
|
|
10
9
|
|
|
11
10
|
import numpy as np
|
|
12
11
|
import pandas as pd
|
|
@@ -18,143 +17,7 @@ tf.get_logger().setLevel("ERROR")
|
|
|
18
17
|
|
|
19
18
|
from likelihood.tools import LoRALayer
|
|
20
19
|
|
|
21
|
-
|
|
22
|
-
def compare_similarity_np(arr1: np.ndarray, arr2: np.ndarray, threshold: float = 0.05) -> int:
|
|
23
|
-
"""Vectorized similarity comparison between two numeric/categorical arrays."""
|
|
24
|
-
arr1 = np.asarray(arr1)
|
|
25
|
-
arr2 = np.asarray(arr2)
|
|
26
|
-
|
|
27
|
-
is_numeric = np.vectorize(
|
|
28
|
-
lambda a, b: isinstance(a, (int, float)) and isinstance(b, (int, float))
|
|
29
|
-
)(arr1, arr2)
|
|
30
|
-
|
|
31
|
-
similarity = np.zeros_like(arr1, dtype=bool)
|
|
32
|
-
|
|
33
|
-
if np.any(is_numeric):
|
|
34
|
-
a_num = arr1[is_numeric].astype(float)
|
|
35
|
-
b_num = arr2[is_numeric].astype(float)
|
|
36
|
-
|
|
37
|
-
both_zero = (a_num == 0) & (b_num == 0)
|
|
38
|
-
nonzero = ~both_zero & (a_num != 0) & (b_num != 0)
|
|
39
|
-
ratio = np.zeros_like(a_num)
|
|
40
|
-
ratio[nonzero] = np.maximum(a_num[nonzero], b_num[nonzero]) / np.minimum(
|
|
41
|
-
a_num[nonzero], b_num[nonzero]
|
|
42
|
-
)
|
|
43
|
-
numeric_similar = both_zero | ((1 - threshold <= ratio) & (ratio <= 1 + threshold))
|
|
44
|
-
|
|
45
|
-
similarity[is_numeric] = numeric_similar
|
|
46
|
-
|
|
47
|
-
similarity[~is_numeric] = arr1[~is_numeric] == arr2[~is_numeric]
|
|
48
|
-
|
|
49
|
-
return np.count_nonzero(similarity)
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def compare_pair(pair, data, similarity, threshold):
|
|
53
|
-
i, j = pair
|
|
54
|
-
sim = compare_similarity_np(data[i], data[j], threshold=threshold)
|
|
55
|
-
return (i, j, 1 if sim >= similarity else 0)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
def cal_adjacency_matrix(
|
|
59
|
-
df: pd.DataFrame, exclude_subset: List[str] = [], sparse: bool = True, **kwargs
|
|
60
|
-
) -> Tuple[dict, np.ndarray]:
|
|
61
|
-
"""
|
|
62
|
-
Calculates the adjacency matrix for a given DataFrame using parallel processing.
|
|
63
|
-
|
|
64
|
-
Parameters
|
|
65
|
-
----------
|
|
66
|
-
df : `pd.DataFrame`
|
|
67
|
-
The input DataFrame containing the features.
|
|
68
|
-
exclude_subset : `List[str]`, `optional`
|
|
69
|
-
A list of features to exclude from the calculation of the adjacency matrix.
|
|
70
|
-
sparse : `bool`, `optional`
|
|
71
|
-
Whether to return a sparse matrix or a dense matrix.
|
|
72
|
-
**kwargs : `dict`
|
|
73
|
-
Additional keyword arguments to pass to the `compare_similarity` function.
|
|
74
|
-
|
|
75
|
-
Returns
|
|
76
|
-
-------
|
|
77
|
-
adj_dict : `dict`
|
|
78
|
-
A dictionary containing the features.
|
|
79
|
-
adjacency_matrix : `np.ndarray`
|
|
80
|
-
The adjacency matrix.
|
|
81
|
-
|
|
82
|
-
Keyword Arguments
|
|
83
|
-
-----------------
|
|
84
|
-
similarity: `int`
|
|
85
|
-
The minimum number of features that must be the same in both arrays to be considered similar.
|
|
86
|
-
threshold : `float`
|
|
87
|
-
The threshold value used in the `compare_similarity` function. Default is 0.0
|
|
88
|
-
"""
|
|
89
|
-
if len(exclude_subset) > 0:
|
|
90
|
-
columns = [col for col in df.columns if col not in exclude_subset]
|
|
91
|
-
df_ = df[columns].copy()
|
|
92
|
-
else:
|
|
93
|
-
df_ = df.copy()
|
|
94
|
-
|
|
95
|
-
assert len(df_) > 0
|
|
96
|
-
|
|
97
|
-
similarity = kwargs.get("similarity", len(df_.columns) - 1)
|
|
98
|
-
threshold = kwargs.get("threshold", 0.05)
|
|
99
|
-
assert similarity <= df_.shape[1]
|
|
100
|
-
|
|
101
|
-
data = df_.to_numpy()
|
|
102
|
-
n = len(data)
|
|
103
|
-
|
|
104
|
-
adj_dict = {i: data[i].tolist() for i in range(n)}
|
|
105
|
-
|
|
106
|
-
def pair_generator():
|
|
107
|
-
for i in range(n):
|
|
108
|
-
for j in range(i, n):
|
|
109
|
-
yield (i, j)
|
|
110
|
-
|
|
111
|
-
with Pool(cpu_count()) as pool:
|
|
112
|
-
results = pool.starmap(
|
|
113
|
-
compare_pair, ((pair, data, similarity, threshold) for pair in pair_generator())
|
|
114
|
-
)
|
|
115
|
-
|
|
116
|
-
adjacency_matrix = np.zeros((n, n), dtype=np.uint8)
|
|
117
|
-
for i, j, val in results:
|
|
118
|
-
if val:
|
|
119
|
-
adjacency_matrix[i, j] = 1
|
|
120
|
-
adjacency_matrix[j, i] = 1
|
|
121
|
-
|
|
122
|
-
if sparse:
|
|
123
|
-
num_nodes = adjacency_matrix.shape[0]
|
|
124
|
-
|
|
125
|
-
indices = np.argwhere(adjacency_matrix != 0.0)
|
|
126
|
-
indices = tf.constant(indices, dtype=tf.int64)
|
|
127
|
-
values = tf.constant(adjacency_matrix[indices[:, 0], indices[:, 1]], dtype=tf.float32)
|
|
128
|
-
adjacency_matrix = tf.sparse.SparseTensor(
|
|
129
|
-
indices=indices, values=values, dense_shape=(num_nodes, num_nodes)
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
return adj_dict, adjacency_matrix
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
class Data:
|
|
136
|
-
def __init__(
|
|
137
|
-
self,
|
|
138
|
-
df: pd.DataFrame,
|
|
139
|
-
target: str | None = None,
|
|
140
|
-
exclude_subset: List[str] = [],
|
|
141
|
-
**kwargs,
|
|
142
|
-
):
|
|
143
|
-
sparse = kwargs.get("sparse", True)
|
|
144
|
-
threshold = kwargs.get("threshold", 0.05)
|
|
145
|
-
_, adjacency = cal_adjacency_matrix(
|
|
146
|
-
df, exclude_subset=exclude_subset, sparse=sparse, threshold=threshold
|
|
147
|
-
)
|
|
148
|
-
if target is not None:
|
|
149
|
-
X = df.drop(columns=[target] + exclude_subset)
|
|
150
|
-
else:
|
|
151
|
-
X = df.drop(columns=exclude_subset)
|
|
152
|
-
self.columns = X.columns
|
|
153
|
-
X = X.to_numpy()
|
|
154
|
-
self.x = np.asarray(X).astype(np.float32)
|
|
155
|
-
self.adjacency = adjacency
|
|
156
|
-
if target is not None:
|
|
157
|
-
self.y = np.asarray(df[target].values).astype(np.int32)
|
|
20
|
+
from .nn import Data, cal_adjacency_matrix, compare_pair, compare_similarity_np
|
|
158
21
|
|
|
159
22
|
|
|
160
23
|
@tf.keras.utils.register_keras_serializable(package="Custom", name="VanillaGNNLayer")
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: likelihood
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.2.0rc1
|
|
4
4
|
Summary: A package that performs the maximum likelihood algorithm.
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
Maintainer-email: jafetcc17@gmail.com
|
|
5
|
+
Author-email: "J. A. Moreno-Guerra" <jzs.gm27@gmail.com>
|
|
6
|
+
Maintainer-email: Jafet Castañeda <jafetcc17@gmail.com>
|
|
7
|
+
License: MIT
|
|
8
|
+
Project-URL: Homepage, https://github.com/jzsmoreno/likelihood/
|
|
10
9
|
Classifier: Programming Language :: Python :: 3
|
|
11
10
|
Classifier: License :: OSI Approved :: MIT License
|
|
12
11
|
Classifier: Operating System :: OS Independent
|
|
@@ -37,19 +36,7 @@ Requires-Dist: tensorflow>=2.15.0; extra == "full"
|
|
|
37
36
|
Requires-Dist: keras-tuner; extra == "full"
|
|
38
37
|
Requires-Dist: scikit-learn; extra == "full"
|
|
39
38
|
Requires-Dist: torch; extra == "full"
|
|
40
|
-
Dynamic: author
|
|
41
|
-
Dynamic: author-email
|
|
42
|
-
Dynamic: classifier
|
|
43
|
-
Dynamic: description
|
|
44
|
-
Dynamic: description-content-type
|
|
45
|
-
Dynamic: home-page
|
|
46
39
|
Dynamic: license-file
|
|
47
|
-
Dynamic: maintainer
|
|
48
|
-
Dynamic: maintainer-email
|
|
49
|
-
Dynamic: provides-extra
|
|
50
|
-
Dynamic: requires-dist
|
|
51
|
-
Dynamic: requires-python
|
|
52
|
-
Dynamic: summary
|
|
53
40
|
|
|
54
41
|

|
|
55
42
|
|
|
@@ -1,12 +1,18 @@
|
|
|
1
|
+
Cargo.toml
|
|
1
2
|
LICENSE
|
|
3
|
+
MANIFEST.in
|
|
2
4
|
README.md
|
|
5
|
+
pyproject.toml
|
|
6
|
+
requirements.txt
|
|
3
7
|
setup.py
|
|
8
|
+
likelihood/VERSION
|
|
4
9
|
likelihood/__init__.py
|
|
5
10
|
likelihood/main.py
|
|
6
11
|
likelihood/pipes.py
|
|
7
12
|
likelihood.egg-info/PKG-INFO
|
|
8
13
|
likelihood.egg-info/SOURCES.txt
|
|
9
14
|
likelihood.egg-info/dependency_links.txt
|
|
15
|
+
likelihood.egg-info/not-zip-safe
|
|
10
16
|
likelihood.egg-info/requires.txt
|
|
11
17
|
likelihood.egg-info/top_level.txt
|
|
12
18
|
likelihood/graph/__init__.py
|
|
@@ -34,4 +40,5 @@ likelihood/tools/impute.py
|
|
|
34
40
|
likelihood/tools/models_tools.py
|
|
35
41
|
likelihood/tools/numeric_tools.py
|
|
36
42
|
likelihood/tools/reports.py
|
|
37
|
-
likelihood/tools/tools.py
|
|
43
|
+
likelihood/tools/tools.py
|
|
44
|
+
src/lib.rs
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = [
|
|
3
|
+
"setuptools>=61",
|
|
4
|
+
"wheel",
|
|
5
|
+
"setuptools-rust>=1.8",
|
|
6
|
+
]
|
|
7
|
+
build-backend = "setuptools.build_meta"
|
|
8
|
+
|
|
9
|
+
[project]
|
|
10
|
+
name = "likelihood"
|
|
11
|
+
authors = [
|
|
12
|
+
{ name = "J. A. Moreno-Guerra", email = "jzs.gm27@gmail.com" }
|
|
13
|
+
]
|
|
14
|
+
maintainers = [
|
|
15
|
+
{ name = "Jafet Castañeda", email = "jafetcc17@gmail.com" }
|
|
16
|
+
]
|
|
17
|
+
description = "A package that performs the maximum likelihood algorithm."
|
|
18
|
+
readme = { file = "README.md", content-type = "text/markdown" }
|
|
19
|
+
requires-python = ">=3.10"
|
|
20
|
+
license = { text = "MIT" }
|
|
21
|
+
urls = { Homepage = "https://github.com/jzsmoreno/likelihood/" }
|
|
22
|
+
|
|
23
|
+
dynamic = ["version", "dependencies"]
|
|
24
|
+
|
|
25
|
+
classifiers = [
|
|
26
|
+
"Programming Language :: Python :: 3",
|
|
27
|
+
"License :: OSI Approved :: MIT License",
|
|
28
|
+
"Operating System :: OS Independent",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
[project.optional-dependencies]
|
|
32
|
+
full = [
|
|
33
|
+
"networkx",
|
|
34
|
+
"pyvis",
|
|
35
|
+
"tensorflow>=2.15.0",
|
|
36
|
+
"keras-tuner",
|
|
37
|
+
"scikit-learn",
|
|
38
|
+
"torch",
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
[tool.setuptools]
|
|
42
|
+
packages = { find = {} }
|
|
43
|
+
|
|
44
|
+
[tool.setuptools.dynamic]
|
|
45
|
+
version = { file = ["likelihood/VERSION"] }
|
|
46
|
+
dependencies = { file = ["requirements.txt"] }
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
black[jupyter]>=24.3.0
|
|
2
|
+
mypy-extensions>=1.0.0
|
|
3
|
+
types-openpyxl>=3.1.0.15
|
|
4
|
+
pydocstyle>=6.3.0
|
|
5
|
+
flake8>=6.0.0
|
|
6
|
+
isort>=5.12.0
|
|
7
|
+
mypy>=1.4.1
|
|
8
|
+
numpy<3.0.0,>=1.26.4
|
|
9
|
+
pydot==2.0.0
|
|
10
|
+
matplotlib
|
|
11
|
+
packaging
|
|
12
|
+
graphviz
|
|
13
|
+
seaborn
|
|
14
|
+
pyyaml
|
|
15
|
+
pandas
|
|
16
|
+
corner
|
|
17
|
+
tqdm
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from setuptools import find_packages, setup
|
|
2
|
+
from setuptools_rust import Binding, RustExtension
|
|
3
|
+
|
|
4
|
+
setup(
|
|
5
|
+
packages=find_packages(),
|
|
6
|
+
rust_extensions=[
|
|
7
|
+
RustExtension(
|
|
8
|
+
"likelihood.rust_py_integration",
|
|
9
|
+
path="Cargo.toml",
|
|
10
|
+
binding=Binding.PyO3,
|
|
11
|
+
)
|
|
12
|
+
],
|
|
13
|
+
zip_safe=False,
|
|
14
|
+
)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
use pyo3::prelude::*;
|
|
2
|
+
|
|
3
|
+
#[pyfunction]
|
|
4
|
+
fn print_hello() {
|
|
5
|
+
println!("Hello from Rust integration!");
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
#[pymodule]
|
|
9
|
+
fn rust_py_integration(_py: Python, m: &PyModule) -> PyResult<()> {
|
|
10
|
+
m.add_function(wrap_pyfunction!(print_hello, m)?)?;
|
|
11
|
+
Ok(())
|
|
12
|
+
}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
likelihood
|
likelihood-2.1.3/setup.py
DELETED
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
from pathlib import Path
|
|
2
|
-
|
|
3
|
-
import setuptools
|
|
4
|
-
|
|
5
|
-
# Parse the requirements.txt file
|
|
6
|
-
with open("requirements.txt", "r") as f:
|
|
7
|
-
install_requires = f.read().splitlines()
|
|
8
|
-
|
|
9
|
-
with open("README.md", "r") as fh:
|
|
10
|
-
long_description = fh.read()
|
|
11
|
-
|
|
12
|
-
about = {}
|
|
13
|
-
ROOT_DIR = Path(__file__).resolve().parent
|
|
14
|
-
PACKAGE_DIR = ROOT_DIR / "likelihood"
|
|
15
|
-
with open(PACKAGE_DIR / "VERSION") as f:
|
|
16
|
-
_version = f.read().strip()
|
|
17
|
-
about["__version__"] = _version
|
|
18
|
-
|
|
19
|
-
setuptools.setup(
|
|
20
|
-
name="likelihood",
|
|
21
|
-
version=about["__version__"],
|
|
22
|
-
author="J. A. Moreno-Guerra",
|
|
23
|
-
author_email="jzs.gm27@gmail.com",
|
|
24
|
-
maintainer="Jafet Castañeda",
|
|
25
|
-
maintainer_email="jafetcc17@gmail.com",
|
|
26
|
-
description="A package that performs the maximum likelihood algorithm.",
|
|
27
|
-
py_modules=["likelihood"],
|
|
28
|
-
long_description=long_description,
|
|
29
|
-
long_description_content_type="text/markdown",
|
|
30
|
-
url="https://github.com/jzsmoreno/likelihood/",
|
|
31
|
-
packages=setuptools.find_packages(),
|
|
32
|
-
install_requires=install_requires,
|
|
33
|
-
extras_require={
|
|
34
|
-
"full": ["networkx", "pyvis", "tensorflow>=2.15.0", "keras-tuner", "scikit-learn", "torch"],
|
|
35
|
-
},
|
|
36
|
-
classifiers=[
|
|
37
|
-
"Programming Language :: Python :: 3",
|
|
38
|
-
"License :: OSI Approved :: MIT License",
|
|
39
|
-
"Operating System :: OS Independent",
|
|
40
|
-
],
|
|
41
|
-
python_requires=">=3.10",
|
|
42
|
-
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|