libmultilabel 0.7.3__tar.gz → 0.8.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/PKG-INFO +10 -7
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/README.md +1 -1
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/linear/linear.py +18 -4
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/linear/tree.py +123 -39
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel.egg-info/PKG-INFO +10 -7
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel.egg-info/requires.txt +5 -3
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/setup.cfg +8 -6
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/LICENSE +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/__init__.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/common_utils.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/linear/__init__.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/linear/data_utils.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/linear/metrics.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/linear/preprocessor.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/linear/utils.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/logging.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/__init__.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/attentionxml.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/data_utils.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/metrics.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/model.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/networks/__init__.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/networks/bert.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/networks/bert_attention.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/networks/caml.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/networks/kim_cnn.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/networks/labelwise_attention_networks.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/networks/modules.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/networks/xml_cnn.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel/nn/nn_utils.py +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel.egg-info/SOURCES.txt +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel.egg-info/dependency_links.txt +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/libmultilabel.egg-info/top_level.txt +0 -0
- {libmultilabel-0.7.3 → libmultilabel-0.8.0}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: libmultilabel
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.8.0
|
|
4
4
|
Summary: A library for multi-class and multi-label classification
|
|
5
5
|
Home-page: https://github.com/ASUS-AICS/LibMultiLabel
|
|
6
6
|
Author: LibMultiLabel Team
|
|
@@ -16,22 +16,25 @@ Classifier: Intended Audience :: Science/Research
|
|
|
16
16
|
Classifier: License :: OSI Approved :: MIT License
|
|
17
17
|
Classifier: Operating System :: OS Independent
|
|
18
18
|
Classifier: Programming Language :: Python :: 3
|
|
19
|
-
Classifier: Programming Language :: Python :: 3.
|
|
20
|
-
Requires-Python: >=3.
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
21
|
License-File: LICENSE
|
|
22
22
|
Requires-Dist: liblinear-multicore>=2.49.0
|
|
23
23
|
Requires-Dist: numba
|
|
24
24
|
Requires-Dist: pandas>1.3.0
|
|
25
25
|
Requires-Dist: PyYAML
|
|
26
26
|
Requires-Dist: scikit-learn
|
|
27
|
-
Requires-Dist: scipy
|
|
27
|
+
Requires-Dist: scipy
|
|
28
28
|
Requires-Dist: tqdm
|
|
29
|
+
Requires-Dist: psutil
|
|
30
|
+
Requires-Dist: sparsekmeans
|
|
29
31
|
Provides-Extra: nn
|
|
30
|
-
Requires-Dist: lightning
|
|
32
|
+
Requires-Dist: lightning; extra == "nn"
|
|
31
33
|
Requires-Dist: nltk; extra == "nn"
|
|
32
34
|
Requires-Dist: torch<=2.3; extra == "nn"
|
|
33
35
|
Requires-Dist: torchmetrics==0.10.3; extra == "nn"
|
|
34
36
|
Requires-Dist: torchtext; extra == "nn"
|
|
35
|
-
Requires-Dist: transformers; extra == "nn"
|
|
37
|
+
Requires-Dist: transformers<=4.51.3; extra == "nn"
|
|
38
|
+
Dynamic: license-file
|
|
36
39
|
|
|
37
40
|
See documentation here: https://www.csie.ntu.edu.tw/~cjlin/libmultilabel
|
|
@@ -9,7 +9,7 @@ LibMultiLabel is a library for binary, multi-class, and multi-label classificati
|
|
|
9
9
|
This is an on-going development so many improvements are still being made. Comments are very welcome.
|
|
10
10
|
|
|
11
11
|
## Environments
|
|
12
|
-
- Python: 3.
|
|
12
|
+
- Python: 3.10+
|
|
13
13
|
- CUDA: 11.8, 12.1 (if training neural networks by GPU)
|
|
14
14
|
- Pytorch: 2.0.1+
|
|
15
15
|
|
|
@@ -27,7 +27,7 @@ class FlatModel:
|
|
|
27
27
|
def __init__(
|
|
28
28
|
self,
|
|
29
29
|
name: str,
|
|
30
|
-
weights: np.matrix,
|
|
30
|
+
weights: np.matrix | sparse.csr_matrix,
|
|
31
31
|
bias: float,
|
|
32
32
|
thresholds: float | np.ndarray,
|
|
33
33
|
multiclass: bool,
|
|
@@ -69,7 +69,21 @@ class FlatModel:
|
|
|
69
69
|
"csr",
|
|
70
70
|
)
|
|
71
71
|
|
|
72
|
-
return (x * self.weights)
|
|
72
|
+
return self._to_dense_array(x * self.weights) + self.thresholds
|
|
73
|
+
|
|
74
|
+
def _to_dense_array(self, matrix: np.matrix | sparse.csr_matrix) -> np.ndarray:
|
|
75
|
+
"""Convert a numpy or scipy matrix to a dense ndarray.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
matrix (np.matrix | sparse.csr_matrix): A numpy or scipy sparse matrix.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
np.ndarray: A dense ndarray of `matrix`.
|
|
82
|
+
"""
|
|
83
|
+
if sparse.issparse(matrix):
|
|
84
|
+
return matrix.toarray()
|
|
85
|
+
elif isinstance(matrix, np.matrix):
|
|
86
|
+
return np.asarray(matrix)
|
|
73
87
|
|
|
74
88
|
|
|
75
89
|
def train_1vsrest(
|
|
@@ -458,7 +472,7 @@ def _cost_sensitive_one_label(y: np.ndarray, x: sparse.csr_matrix, options: str)
|
|
|
458
472
|
|
|
459
473
|
param_space = [1, 1.33, 1.8, 2.5, 3.67, 6, 13]
|
|
460
474
|
|
|
461
|
-
bestScore = -np.
|
|
475
|
+
bestScore = -np.inf
|
|
462
476
|
for a in param_space:
|
|
463
477
|
cv_options = f"{options} -w1 {a}"
|
|
464
478
|
pred = _cross_validate(y, x, cv_options, perm)
|
|
@@ -532,7 +546,7 @@ def train_cost_sensitive_micro(
|
|
|
532
546
|
l = y.shape[0]
|
|
533
547
|
perm = np.random.permutation(l)
|
|
534
548
|
param_space = [1, 1.33, 1.8, 2.5, 3.67, 6, 13]
|
|
535
|
-
bestScore = -np.
|
|
549
|
+
bestScore = -np.inf
|
|
536
550
|
|
|
537
551
|
if verbose:
|
|
538
552
|
logging.info(f"Training cost-sensitive model for Micro-F1 on {num_class} labels")
|
|
@@ -4,7 +4,7 @@ from typing import Callable
|
|
|
4
4
|
|
|
5
5
|
import numpy as np
|
|
6
6
|
import scipy.sparse as sparse
|
|
7
|
-
import
|
|
7
|
+
from sparsekmeans import LloydKmeans, ElkanKmeans
|
|
8
8
|
import sklearn.preprocessing
|
|
9
9
|
from tqdm import tqdm
|
|
10
10
|
import psutil
|
|
@@ -46,13 +46,14 @@ class TreeModel:
|
|
|
46
46
|
self,
|
|
47
47
|
root: Node,
|
|
48
48
|
flat_model: linear.FlatModel,
|
|
49
|
-
|
|
49
|
+
node_ptr: np.ndarray,
|
|
50
50
|
):
|
|
51
51
|
self.name = "tree"
|
|
52
52
|
self.root = root
|
|
53
53
|
self.flat_model = flat_model
|
|
54
|
-
self.
|
|
54
|
+
self.node_ptr = node_ptr
|
|
55
55
|
self.multiclass = False
|
|
56
|
+
self._model_separated = False # Indicates whether the model has been separated for pruning tree.
|
|
56
57
|
|
|
57
58
|
def predict_values(
|
|
58
59
|
self,
|
|
@@ -68,10 +69,93 @@ class TreeModel:
|
|
|
68
69
|
Returns:
|
|
69
70
|
np.ndarray: A matrix with dimension number of instances * number of classes.
|
|
70
71
|
"""
|
|
71
|
-
|
|
72
|
-
|
|
72
|
+
if beam_width >= len(self.root.children):
|
|
73
|
+
# Beam_width is sufficiently large; pruning not applied.
|
|
74
|
+
# Calculates decision values for all nodes.
|
|
75
|
+
all_preds = linear.predict_values(self.flat_model, x) # number of instances * (number of labels + total number of metalabels)
|
|
76
|
+
else:
|
|
77
|
+
# Beam_width is small; pruning applied to reduce computation.
|
|
78
|
+
if not self._model_separated:
|
|
79
|
+
self._separate_model_for_pruning_tree()
|
|
80
|
+
self._model_separated = True
|
|
81
|
+
all_preds = self._prune_tree_and_predict_values(x, beam_width) # number of instances * (number of labels + total number of metalabels)
|
|
73
82
|
return np.vstack([self._beam_search(all_preds[i], beam_width) for i in range(all_preds.shape[0])])
|
|
74
83
|
|
|
84
|
+
def _separate_model_for_pruning_tree(self):
|
|
85
|
+
"""
|
|
86
|
+
This function separates the weights for the root node and its children into (K+1) FlatModel
|
|
87
|
+
for efficient beam search traversal in Python.
|
|
88
|
+
"""
|
|
89
|
+
tree_flat_model_params = {
|
|
90
|
+
'bias': self.root.model.bias,
|
|
91
|
+
'thresholds': 0,
|
|
92
|
+
'multiclass': False
|
|
93
|
+
}
|
|
94
|
+
slice = np.s_[:, self.node_ptr[self.root.index] : self.node_ptr[self.root.index + 1]]
|
|
95
|
+
self.root_model = linear.FlatModel(
|
|
96
|
+
name="root-flattened-tree",
|
|
97
|
+
weights=self.flat_model.weights[slice].tocsr(),
|
|
98
|
+
**tree_flat_model_params
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
self.subtree_models = []
|
|
102
|
+
for i in range(len(self.root.children)):
|
|
103
|
+
subtree_weights_start = self.node_ptr[self.root.children[i].index]
|
|
104
|
+
subtree_weights_end = self.node_ptr[self.root.children[i+1].index] if i+1 < len(self.root.children) else self.node_ptr[-1]
|
|
105
|
+
slice = np.s_[:, subtree_weights_start:subtree_weights_end]
|
|
106
|
+
subtree_flatmodel = linear.FlatModel(
|
|
107
|
+
name="subtree-flattened-tree",
|
|
108
|
+
weights=self.flat_model.weights[slice].tocsr(),
|
|
109
|
+
**tree_flat_model_params
|
|
110
|
+
)
|
|
111
|
+
self.subtree_models.append(subtree_flatmodel)
|
|
112
|
+
|
|
113
|
+
def _prune_tree_and_predict_values(self, x: sparse.csr_matrix, beam_width: int) -> np.ndarray:
|
|
114
|
+
"""Calculates the selective decision values associated with instances x by evaluating only the most relevant subtrees.
|
|
115
|
+
|
|
116
|
+
Only subtrees corresponding to the top beam_width candidates from the root are evaluated,
|
|
117
|
+
skipping the rest to avoid unnecessary computation.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
x (sparse.csr_matrix): A matrix with dimension number of instances * number of features.
|
|
121
|
+
beam_width (int): Number of top candidate branches considered for prediction.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
np.ndarray: A matrix with dimension number of instances * (number of labels + total number of metalabels).
|
|
125
|
+
"""
|
|
126
|
+
# Initialize space for all predictions with negative infinity
|
|
127
|
+
num_instances, num_labels = x.shape[0], self.node_ptr[-1]
|
|
128
|
+
all_preds = np.full((num_instances, num_labels), -np.inf)
|
|
129
|
+
|
|
130
|
+
# Calculate root decision values and scores
|
|
131
|
+
root_preds = linear.predict_values(self.root_model, x)
|
|
132
|
+
children_scores = 0.0 - np.square(np.maximum(0, 1 - root_preds))
|
|
133
|
+
|
|
134
|
+
slice = np.s_[:, self.node_ptr[self.root.index] : self.node_ptr[self.root.index + 1]]
|
|
135
|
+
all_preds[slice] = root_preds
|
|
136
|
+
|
|
137
|
+
# Select indices of the top beam_width subtrees for each instance
|
|
138
|
+
top_beam_width_indices = np.argsort(-children_scores, axis=1, kind="stable")[:, :beam_width]
|
|
139
|
+
|
|
140
|
+
# Build a mask where mask[i, j] is True if the j-th subtree is among the top beam_width subtrees for the i-th instance
|
|
141
|
+
mask = np.zeros_like(children_scores, dtype=np.bool_)
|
|
142
|
+
np.put_along_axis(mask, top_beam_width_indices, True, axis=1)
|
|
143
|
+
|
|
144
|
+
# Calculate predictions for each subtree with its corresponding instances
|
|
145
|
+
for subtree_idx in range(len(self.root.children)):
|
|
146
|
+
subtree_model = self.subtree_models[subtree_idx]
|
|
147
|
+
instances_mask = mask[:, subtree_idx]
|
|
148
|
+
reduced_instances = x[np.s_[instances_mask], :]
|
|
149
|
+
|
|
150
|
+
# Locate the position of the subtree root in the weight mapping of all nodes
|
|
151
|
+
subtree_weights_start = self.node_ptr[self.root.children[subtree_idx].index]
|
|
152
|
+
subtree_weights_end = subtree_weights_start + subtree_model.weights.shape[1]
|
|
153
|
+
|
|
154
|
+
slice = np.s_[instances_mask, subtree_weights_start:subtree_weights_end]
|
|
155
|
+
all_preds[slice] = linear.predict_values(subtree_model, reduced_instances)
|
|
156
|
+
|
|
157
|
+
return all_preds
|
|
158
|
+
|
|
75
159
|
def _beam_search(self, instance_preds: np.ndarray, beam_width: int) -> np.ndarray:
|
|
76
160
|
"""Predict with beam search using cached probability estimates for a single instance.
|
|
77
161
|
|
|
@@ -93,7 +177,7 @@ class TreeModel:
|
|
|
93
177
|
if node.isLeaf():
|
|
94
178
|
next_level.append((node, score))
|
|
95
179
|
continue
|
|
96
|
-
slice = np.s_[self.
|
|
180
|
+
slice = np.s_[self.node_ptr[node.index] : self.node_ptr[node.index + 1]]
|
|
97
181
|
pred = instance_preds[slice]
|
|
98
182
|
children_score = score - np.square(np.maximum(0, 1 - pred))
|
|
99
183
|
next_level.extend(zip(node.children, children_score.tolist()))
|
|
@@ -102,9 +186,9 @@ class TreeModel:
|
|
|
102
186
|
next_level = []
|
|
103
187
|
|
|
104
188
|
num_labels = len(self.root.label_map)
|
|
105
|
-
scores = np.
|
|
189
|
+
scores = np.zeros(num_labels)
|
|
106
190
|
for node, score in cur_level:
|
|
107
|
-
slice = np.s_[self.
|
|
191
|
+
slice = np.s_[self.node_ptr[node.index] : self.node_ptr[node.index + 1]]
|
|
108
192
|
pred = instance_preds[slice]
|
|
109
193
|
scores[node.label_map] = np.exp(score - np.square(np.maximum(0, 1 - pred)))
|
|
110
194
|
return scores
|
|
@@ -130,7 +214,7 @@ def train_tree(
|
|
|
130
214
|
verbose (bool, optional): Output extra progress information. Defaults to True.
|
|
131
215
|
|
|
132
216
|
Returns:
|
|
133
|
-
A model which can be used in predict_values.
|
|
217
|
+
TreeModel: A model which can be used in predict_values.
|
|
134
218
|
"""
|
|
135
219
|
label_representation = (y.T * x).tocsr()
|
|
136
220
|
label_representation = sklearn.preprocessing.normalize(label_representation, norm="l2", axis=1)
|
|
@@ -173,8 +257,8 @@ def train_tree(
|
|
|
173
257
|
root.dfs(visit)
|
|
174
258
|
pbar.close()
|
|
175
259
|
|
|
176
|
-
flat_model,
|
|
177
|
-
return TreeModel(root, flat_model,
|
|
260
|
+
flat_model, node_ptr = _flatten_model(root)
|
|
261
|
+
return TreeModel(root, flat_model, node_ptr)
|
|
178
262
|
|
|
179
263
|
|
|
180
264
|
def _build_tree(label_representation: sparse.csr_matrix, label_map: np.ndarray, d: int, K: int, dmax: int) -> Node:
|
|
@@ -188,30 +272,31 @@ def _build_tree(label_representation: sparse.csr_matrix, label_map: np.ndarray,
|
|
|
188
272
|
dmax (int): Maximum depth of the tree.
|
|
189
273
|
|
|
190
274
|
Returns:
|
|
191
|
-
Node:
|
|
275
|
+
Node: Root of the (sub)tree built from label_representation.
|
|
192
276
|
"""
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
max_iter=300,
|
|
202
|
-
tol=0.0001,
|
|
203
|
-
algorithm="elkan",
|
|
277
|
+
children = []
|
|
278
|
+
if d < dmax and label_representation.shape[0] > K:
|
|
279
|
+
if label_representation.shape[0] > 10000:
|
|
280
|
+
kmeans_algo = ElkanKmeans
|
|
281
|
+
else:
|
|
282
|
+
kmeans_algo = LloydKmeans
|
|
283
|
+
|
|
284
|
+
kmeans = kmeans_algo(
|
|
285
|
+
n_clusters=K, max_iter=300, tol=0.0001, random_state=np.random.randint(2**31 - 1), verbose=True
|
|
204
286
|
)
|
|
205
|
-
.fit(label_representation)
|
|
206
|
-
.labels_
|
|
207
|
-
)
|
|
287
|
+
metalabels = kmeans.fit(label_representation)
|
|
208
288
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
289
|
+
unique_labels = np.unique(metalabels)
|
|
290
|
+
if len(unique_labels) == K:
|
|
291
|
+
create_child_node = lambda i: _build_tree(
|
|
292
|
+
label_representation[metalabels == i], label_map[metalabels == i], d + 1, K, dmax
|
|
293
|
+
)
|
|
294
|
+
else:
|
|
295
|
+
create_child_node = lambda i: Node(label_map=label_map[metalabels == i], children=[])
|
|
296
|
+
|
|
297
|
+
for i in range(K):
|
|
298
|
+
child = create_child_node(i)
|
|
299
|
+
children.append(child)
|
|
215
300
|
|
|
216
301
|
return Node(label_map=label_map, children=children)
|
|
217
302
|
|
|
@@ -261,11 +346,10 @@ def _flatten_model(root: Node) -> tuple[linear.FlatModel, np.ndarray]:
|
|
|
261
346
|
"""Flatten tree weight matrices into a single weight matrix. The flattened weight
|
|
262
347
|
matrix is used to predict all possible values, which is cached for beam search.
|
|
263
348
|
This pessimizes complexity but is faster in practice.
|
|
264
|
-
Consecutive values of the returned
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
weight_map[node.index+1]]
|
|
349
|
+
Consecutive values of the returned array denote the start and end indices of each node in the tree.
|
|
350
|
+
To extract a node's classifiers:
|
|
351
|
+
slice = np.s_[node_ptr[node.index]:
|
|
352
|
+
node_ptr[node.index+1]]
|
|
269
353
|
node.model.weights == flat_model.weights[:, slice]
|
|
270
354
|
|
|
271
355
|
Args:
|
|
@@ -296,6 +380,6 @@ def _flatten_model(root: Node) -> tuple[linear.FlatModel, np.ndarray]:
|
|
|
296
380
|
)
|
|
297
381
|
|
|
298
382
|
# w.shape[1] is the number of labels/metalabels of each node
|
|
299
|
-
|
|
383
|
+
node_ptr = np.cumsum([0] + list(map(lambda w: w.shape[1], weights)))
|
|
300
384
|
|
|
301
|
-
return model,
|
|
385
|
+
return model, node_ptr
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: libmultilabel
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.8.0
|
|
4
4
|
Summary: A library for multi-class and multi-label classification
|
|
5
5
|
Home-page: https://github.com/ASUS-AICS/LibMultiLabel
|
|
6
6
|
Author: LibMultiLabel Team
|
|
@@ -16,22 +16,25 @@ Classifier: Intended Audience :: Science/Research
|
|
|
16
16
|
Classifier: License :: OSI Approved :: MIT License
|
|
17
17
|
Classifier: Operating System :: OS Independent
|
|
18
18
|
Classifier: Programming Language :: Python :: 3
|
|
19
|
-
Classifier: Programming Language :: Python :: 3.
|
|
20
|
-
Requires-Python: >=3.
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
21
|
License-File: LICENSE
|
|
22
22
|
Requires-Dist: liblinear-multicore>=2.49.0
|
|
23
23
|
Requires-Dist: numba
|
|
24
24
|
Requires-Dist: pandas>1.3.0
|
|
25
25
|
Requires-Dist: PyYAML
|
|
26
26
|
Requires-Dist: scikit-learn
|
|
27
|
-
Requires-Dist: scipy
|
|
27
|
+
Requires-Dist: scipy
|
|
28
28
|
Requires-Dist: tqdm
|
|
29
|
+
Requires-Dist: psutil
|
|
30
|
+
Requires-Dist: sparsekmeans
|
|
29
31
|
Provides-Extra: nn
|
|
30
|
-
Requires-Dist: lightning
|
|
32
|
+
Requires-Dist: lightning; extra == "nn"
|
|
31
33
|
Requires-Dist: nltk; extra == "nn"
|
|
32
34
|
Requires-Dist: torch<=2.3; extra == "nn"
|
|
33
35
|
Requires-Dist: torchmetrics==0.10.3; extra == "nn"
|
|
34
36
|
Requires-Dist: torchtext; extra == "nn"
|
|
35
|
-
Requires-Dist: transformers; extra == "nn"
|
|
37
|
+
Requires-Dist: transformers<=4.51.3; extra == "nn"
|
|
38
|
+
Dynamic: license-file
|
|
36
39
|
|
|
37
40
|
See documentation here: https://www.csie.ntu.edu.tw/~cjlin/libmultilabel
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[metadata]
|
|
2
2
|
name = libmultilabel
|
|
3
|
-
version = 0.
|
|
3
|
+
version = 0.8.0
|
|
4
4
|
author = LibMultiLabel Team
|
|
5
5
|
license = MIT License
|
|
6
6
|
license_file = LICENSE
|
|
@@ -20,7 +20,7 @@ classifiers =
|
|
|
20
20
|
License :: OSI Approved :: MIT License
|
|
21
21
|
Operating System :: OS Independent
|
|
22
22
|
Programming Language :: Python :: 3
|
|
23
|
-
Programming Language :: Python :: 3.
|
|
23
|
+
Programming Language :: Python :: 3.10
|
|
24
24
|
|
|
25
25
|
[options]
|
|
26
26
|
packages = find:
|
|
@@ -30,18 +30,20 @@ install_requires =
|
|
|
30
30
|
pandas>1.3.0
|
|
31
31
|
PyYAML
|
|
32
32
|
scikit-learn
|
|
33
|
-
scipy
|
|
33
|
+
scipy
|
|
34
34
|
tqdm
|
|
35
|
-
|
|
35
|
+
psutil
|
|
36
|
+
sparsekmeans
|
|
37
|
+
python_requires = >=3.10
|
|
36
38
|
|
|
37
39
|
[options.extras_require]
|
|
38
40
|
nn =
|
|
39
|
-
lightning
|
|
41
|
+
lightning
|
|
40
42
|
nltk
|
|
41
43
|
torch<=2.3
|
|
42
44
|
torchmetrics==0.10.3
|
|
43
45
|
torchtext
|
|
44
|
-
transformers
|
|
46
|
+
transformers<=4.51.3
|
|
45
47
|
|
|
46
48
|
[options.packages.find]
|
|
47
49
|
exclude =
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|