ddi-fw 0.0.131__py3-none-any.whl → 0.0.133__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ddi_fw/ml/ml_helper.py +45 -15
- ddi_fw/ml/model_wrapper.py +3 -3
- ddi_fw/ml/tensorflow_wrapper.py +67 -72
- ddi_fw/pipeline/multi_pipeline.py +15 -11
- ddi_fw/pipeline/pipeline.py +8 -4
- ddi_fw/test/__init__.py +0 -0
- {ddi_fw-0.0.131.dist-info → ddi_fw-0.0.133.dist-info}/METADATA +1 -1
- {ddi_fw-0.0.131.dist-info → ddi_fw-0.0.133.dist-info}/RECORD +10 -9
- {ddi_fw-0.0.131.dist-info → ddi_fw-0.0.133.dist-info}/WHEEL +0 -0
- {ddi_fw-0.0.131.dist-info → ddi_fw-0.0.133.dist-info}/top_level.txt +0 -0
ddi_fw/ml/ml_helper.py
CHANGED
@@ -3,6 +3,7 @@ from matplotlib import pyplot as plt
|
|
3
3
|
from ddi_fw.ml.model_wrapper import Result
|
4
4
|
from ddi_fw.ml.pytorch_wrapper import PTModelWrapper
|
5
5
|
from ddi_fw.ml.tensorflow_wrapper import TFModelWrapper
|
6
|
+
from ddi_fw.utils.package_helper import get_import
|
6
7
|
import tensorflow as tf
|
7
8
|
from tensorflow import keras
|
8
9
|
from keras.models import Model, Sequential
|
@@ -30,11 +31,9 @@ import ddi_fw.utils as utils
|
|
30
31
|
|
31
32
|
class MultiModalRunner:
|
32
33
|
# todo model related parameters to config
|
33
|
-
def __init__(self, library
|
34
|
+
def __init__(self, library, multi_modal):
|
34
35
|
self.library = library
|
35
|
-
self.
|
36
|
-
self.batch_size = batch_size
|
37
|
-
self.epochs = epochs
|
36
|
+
self.epochs = multi_modal
|
38
37
|
self.result = Result()
|
39
38
|
|
40
39
|
def set_data(self, items, train_idx_arr, val_idx_arr, y_test_label):
|
@@ -43,7 +42,7 @@ class MultiModalRunner:
|
|
43
42
|
self.val_idx_arr = val_idx_arr
|
44
43
|
self.y_test_label = y_test_label
|
45
44
|
|
46
|
-
def
|
45
|
+
def __create_model(self,library):
|
47
46
|
if library == 'tensorflow':
|
48
47
|
return TFModelWrapper
|
49
48
|
elif library == 'pytorch':
|
@@ -66,18 +65,49 @@ class MultiModalRunner:
|
|
66
65
|
|
67
66
|
with mlflow.start_run(run_name=self.prefix, description="***") as run:
|
68
67
|
self.level_0_run_id = run.info.run_id
|
69
|
-
for
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
68
|
+
item_dict = {t[0]: t for t in self.items}
|
69
|
+
for m in self.multi_modal:
|
70
|
+
name = m.get('name')
|
71
|
+
input_type = m.get('input_type')
|
72
|
+
input = m.get('input')
|
73
|
+
inputs = m.get('inputs')
|
74
|
+
model_type = get_import(m.get("model_type"))
|
75
|
+
kwargs = m.get('params')
|
76
|
+
T = self.__create_model(self.library)
|
77
|
+
single_modal=T(self.date, name, model_type, **kwargs)
|
78
|
+
if input_type == '1D':
|
79
|
+
item = item_dict[input]
|
80
|
+
single_modal.set_data(
|
81
|
+
self.train_idx_arr, self.val_idx_arr, item[1], item[2], item[3], item[4])
|
82
|
+
elif input_type == '2D':
|
83
|
+
filtered_dict = {k: item_dict[k] for k in inputs if k in item_dict}
|
84
|
+
first_input = next(iter(item_dict.values()))
|
85
|
+
train_data_list = [f[1] for f in filtered_dict.values()]
|
86
|
+
test_data_list = [f[3] for f in filtered_dict.values()]
|
87
|
+
train_data = np.stack(train_data_list, axis=1)
|
88
|
+
test_data = np.stack(test_data_list, axis=1)
|
89
|
+
train_label = first_input[2]
|
90
|
+
test_label = first_input[2]
|
91
|
+
single_modal.set_data(
|
92
|
+
self.train_idx_arr, self.val_idx_arr, train_data, train_label, test_data, test_label)
|
75
93
|
logs, metrics, prediction = single_modal.predict()
|
76
|
-
# self.result.add_log(item[0], logs)
|
77
|
-
#Check
|
78
94
|
self.result.add_metric(item[0], metrics)
|
79
|
-
single_results[
|
80
|
-
|
95
|
+
single_results[name] = prediction
|
96
|
+
|
97
|
+
|
98
|
+
# for item in self.items:
|
99
|
+
# print(item[0])
|
100
|
+
# T = self.__create_model(self.library)
|
101
|
+
# # parameters of model should be dictionary
|
102
|
+
# single_modal=T(self.date, item[0], self.model_func, self.batch_size, self.epochs)
|
103
|
+
# single_modal.set_data(
|
104
|
+
# self.train_idx_arr, self.val_idx_arr, item[1], item[2], item[3], item[4])
|
105
|
+
# logs, metrics, prediction = single_modal.predict()
|
106
|
+
# # self.result.add_log(item[0], logs)
|
107
|
+
# #Check
|
108
|
+
# self.result.add_metric(item[0], metrics)
|
109
|
+
# single_results[item[0]] = prediction
|
110
|
+
# # sum = sum + prediction
|
81
111
|
|
82
112
|
if combinations:
|
83
113
|
self.evaluate_combinations(single_results, combinations)
|
ddi_fw/ml/model_wrapper.py
CHANGED
@@ -15,12 +15,12 @@ class Result:
|
|
15
15
|
|
16
16
|
|
17
17
|
class ModelWrapper:
|
18
|
-
def __init__(self, date, descriptor, model_func
|
18
|
+
def __init__(self, date, descriptor, model_func ,**kwargs):
|
19
19
|
self.date = date
|
20
20
|
self.descriptor = descriptor
|
21
21
|
self.model_func = model_func
|
22
|
-
self.
|
23
|
-
|
22
|
+
self.kwargs = kwargs
|
23
|
+
|
24
24
|
|
25
25
|
def set_data(self, train_idx_arr, val_idx_arr, train_data, train_label, test_data, test_label):
|
26
26
|
self.train_idx_arr = train_idx_arr
|
ddi_fw/ml/tensorflow_wrapper.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
from ddi_fw.ml.model_wrapper import ModelWrapper
|
2
2
|
import tensorflow as tf
|
3
3
|
from tensorflow import keras
|
4
|
-
from keras.callbacks import EarlyStopping,ModelCheckpoint
|
4
|
+
from keras.callbacks import EarlyStopping, ModelCheckpoint
|
5
5
|
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
|
6
6
|
import numpy as np
|
7
7
|
|
@@ -15,91 +15,86 @@ from ddi_fw.ml.evaluation_helper import Metrics, evaluate
|
|
15
15
|
# import onnx
|
16
16
|
|
17
17
|
import ddi_fw.utils as utils
|
18
|
+
import os
|
18
19
|
|
19
20
|
|
20
21
|
class TFModelWrapper(ModelWrapper):
|
21
|
-
|
22
|
-
def
|
22
|
+
|
23
|
+
def __init__(self, date, descriptor, model_func, **kwargs):
|
24
|
+
super().__init__(date, descriptor, model_func, **kwargs)
|
25
|
+
self.batch_size = kwargs.get('batch_size',128)
|
26
|
+
self.epochs = kwargs.get('epochs',100)
|
27
|
+
|
28
|
+
def fit_model(self, X_train, y_train, X_valid, y_valid):
|
29
|
+
self.kwargs['input_shape'] = self.train_data.shape
|
30
|
+
model = self.model_func(**self.kwargs)
|
31
|
+
checkpoint = ModelCheckpoint(
|
32
|
+
filepath=f'{self.descriptor}_validation.weights.h5',
|
33
|
+
monitor='val_loss',
|
34
|
+
save_best_only=True,
|
35
|
+
save_weights_only=True,
|
36
|
+
verbose=1,
|
37
|
+
mode='min'
|
38
|
+
)
|
39
|
+
early_stopping = EarlyStopping(
|
40
|
+
monitor='val_loss', patience=10, mode='auto')
|
41
|
+
custom_callback = CustomCallback()
|
42
|
+
|
43
|
+
history = model.fit(
|
44
|
+
X_train, y_train,
|
45
|
+
batch_size=self.batch_size,
|
46
|
+
epochs=self.epochs,
|
47
|
+
validation_data=(X_valid, y_valid),
|
48
|
+
callbacks=[early_stopping, checkpoint, custom_callback]
|
49
|
+
)
|
50
|
+
|
51
|
+
if os.path.exists(f'{self.descriptor}_validation.weights.h5'):
|
52
|
+
os.remove(f'{self.descriptor}_validation.weights.h5')
|
53
|
+
|
54
|
+
return model, checkpoint
|
55
|
+
|
56
|
+
def fit(self):
|
23
57
|
print(self.train_data.shape)
|
58
|
+
models = {}
|
59
|
+
models_val_acc = {}
|
60
|
+
for i, (train_idx, val_idx) in enumerate(zip(self.train_idx_arr, self.val_idx_arr)):
|
61
|
+
print(f"Validation {i}")
|
62
|
+
with mlflow.start_run(run_name=f'Validation {i}', description='CV models', nested=True) as cv_fit:
|
63
|
+
X_train_cv = self.train_data[train_idx]
|
64
|
+
y_train_cv = self.train_label[train_idx]
|
65
|
+
X_valid_cv = self.train_data[val_idx]
|
66
|
+
y_valid_cv = self.train_label[val_idx]
|
67
|
+
model, best_val_acc = self.fit_model(
|
68
|
+
X_train_cv, y_train_cv, X_valid_cv, y_valid_cv)
|
69
|
+
models[f'validation_{i}'] = model
|
70
|
+
models_val_acc[f'{self.descriptor}_validation_{i}'] = best_val_acc
|
71
|
+
|
72
|
+
best_model_key = max(models_val_acc, key=models_val_acc.get)
|
73
|
+
best_model = models[best_model_key]
|
74
|
+
return best_model, best_model_key
|
24
75
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
print(f"Validation {i}")
|
33
|
-
|
34
|
-
with mlflow.start_run(run_name=f'Validation {i}', description='CV models', nested=True) as cv_fit:
|
35
|
-
model = self.model_func(self.train_data.shape[1])
|
36
|
-
models[f'validation_{i}'] = model
|
37
|
-
X_train_cv = self.train_data[train_idx]
|
38
|
-
y_train_cv = self.train_label[train_idx]
|
39
|
-
X_valid_cv = self.train_data[val_idx]
|
40
|
-
y_valid_cv = self.train_label[val_idx]
|
41
|
-
|
42
|
-
checkpoint = ModelCheckpoint(
|
43
|
-
filepath=f'{self.descriptor}_validation_{i}.weights.h5',
|
44
|
-
monitor='val_loss',
|
45
|
-
save_best_only=True,
|
46
|
-
save_weights_only=True,
|
47
|
-
verbose=1,
|
48
|
-
mode='min'
|
49
|
-
)
|
50
|
-
|
51
|
-
early_stopping = EarlyStopping(
|
52
|
-
monitor='val_loss', patience=10, verbose=0, mode='auto')
|
53
|
-
custom_callback = CustomCallback()
|
54
|
-
history = model.fit(X_train_cv, y_train_cv,
|
55
|
-
batch_size=self.batch_size,
|
56
|
-
epochs=self.epochs,
|
57
|
-
validation_data=(
|
58
|
-
X_valid_cv, y_valid_cv),
|
59
|
-
callbacks=[early_stopping, checkpoint, custom_callback])
|
60
|
-
# histories[f'validation_{i}'] = history
|
61
|
-
# models_val_acc[f'validation_{i}'] = history.history['val_accuracy'][-1]
|
62
|
-
models_val_acc[f'{self.descriptor}_validation_{i}'] = checkpoint.best
|
63
|
-
models[f'{self.descriptor}_validation_{i}'] = checkpoint.model
|
64
|
-
import os
|
65
|
-
if os.path.exists(f'{self.descriptor}_validation_{i}.weights.h5'):
|
66
|
-
os.remove(f'{self.descriptor}_validation_{i}.weights.h5')
|
67
|
-
# Saving each CV model
|
68
|
-
|
69
|
-
best_model_key = max(models_val_acc, key=models_val_acc.get)
|
70
|
-
best_model = models[best_model_key]
|
71
|
-
# mlflow.tensorflow.log_model(best_model, "model")
|
72
|
-
# best_model.evaluate(self.test_data, self.test_label,
|
73
|
-
# callbacks=[custom_callback])
|
74
|
-
pred = best_model.predict(self.test_data)
|
76
|
+
# https://github.com/mlflow/mlflow/blob/master/examples/tensorflow/train.py
|
77
|
+
|
78
|
+
def predict(self, best_model):
|
79
|
+
pred = best_model.predict(self.test_data)
|
80
|
+
return pred
|
81
|
+
|
82
|
+
def fit_and_evaluate(self):
|
75
83
|
|
84
|
+
with mlflow.start_run(run_name=self.descriptor, description="***", nested=True) as run:
|
85
|
+
print(run.info.artifact_uri)
|
86
|
+
best_model, best_model_key = self.fit()
|
87
|
+
pred = self.predict(best_model)
|
76
88
|
logs, metrics = evaluate(
|
77
89
|
actual=self.test_label, pred=pred, info=self.descriptor)
|
78
90
|
metrics.format_float()
|
79
91
|
mlflow.log_metrics(logs)
|
80
92
|
mlflow.log_param('best_cv', best_model_key)
|
81
|
-
|
82
|
-
# self.train_data,
|
83
|
-
# # generate_signature_output(model,X_valid_cv)
|
84
|
-
# # params=params,
|
85
|
-
# )
|
86
|
-
|
87
|
-
# mlflow.keras.save_model(
|
88
|
-
# best_model,
|
89
|
-
# path=run.info.artifact_uri + '/model',
|
90
|
-
# signature=signature,
|
91
|
-
# )
|
92
|
-
print(run.info.artifact_uri)
|
93
|
-
# todo tf2onnx not compatible with keras > 2.15
|
94
|
-
# onnx_model, _ = tf2onnx.convert.from_keras(
|
95
|
-
# best_model, input_signature=None, opset=13)
|
96
|
-
# onnx.save(onnx_model, run.info.artifact_uri +
|
97
|
-
# '/model/model.onnx')
|
93
|
+
|
98
94
|
utils.compress_and_save_data(
|
99
95
|
metrics.__dict__, run.info.artifact_uri, f'{self.date}_metrics.gzip')
|
100
96
|
|
101
|
-
|
102
|
-
|
97
|
+
return logs, metrics, pred
|
103
98
|
|
104
99
|
class CustomCallback(keras.callbacks.Callback):
|
105
100
|
def on_train_begin(self, logs=None):
|
@@ -49,8 +49,8 @@ class MultiPipeline():
|
|
49
49
|
def __create_pipeline(self, config):
|
50
50
|
type = config.get("type")
|
51
51
|
library = config.get("library")
|
52
|
-
batch_size = config.get("batch_size")
|
53
|
-
epochs = config.get("epochs")
|
52
|
+
# batch_size = config.get("batch_size")
|
53
|
+
# epochs = config.get("epochs")
|
54
54
|
|
55
55
|
# dataset_module = config.get("dataset_module")
|
56
56
|
# dataset_name = config.get("dataset_name")
|
@@ -60,6 +60,8 @@ class MultiPipeline():
|
|
60
60
|
experiment_tags = config.get("experiment_tags")
|
61
61
|
tracking_uri = config.get("tracking_uri")
|
62
62
|
artifact_location = config.get("artifact_location")
|
63
|
+
#new
|
64
|
+
multi_modal = config.get("multi_modal")
|
63
65
|
columns = config.get("columns")
|
64
66
|
ner_data_file = config.get("ner_data_file")
|
65
67
|
ner_threshold = config.get("ner_threshold")
|
@@ -69,8 +71,9 @@ class MultiPipeline():
|
|
69
71
|
embedding_pooling_strategy = get_import(
|
70
72
|
config.get("embedding_pooling_strategy_type")) if config.get("embedding_pooling_strategy_type") else None
|
71
73
|
# Dynamically import the model and dataset classes
|
72
|
-
model_type = get_import(config.get("model_type"))
|
74
|
+
# model_type = get_import(config.get("model_type"))
|
73
75
|
dataset_type = get_import(config.get("dataset_type"))
|
76
|
+
|
74
77
|
combination_type = None
|
75
78
|
kwargs_combination_params=None
|
76
79
|
if config.get("combination_strategy"):
|
@@ -100,7 +103,8 @@ class MultiPipeline():
|
|
100
103
|
embedding_pooling_strategy_type=embedding_pooling_strategy,
|
101
104
|
ner_data_file=ner_data_file,
|
102
105
|
ner_threshold=ner_threshold,
|
103
|
-
combinations=combinations
|
106
|
+
combinations=combinations,
|
107
|
+
multi_modal= multi_modal)
|
104
108
|
elif type== "ner_search":
|
105
109
|
pipeline = NerParameterSearch(
|
106
110
|
library=library,
|
@@ -119,9 +123,9 @@ class MultiPipeline():
|
|
119
123
|
return {
|
120
124
|
"name": experiment_name,
|
121
125
|
"library": library,
|
122
|
-
"batch_size": batch_size,
|
123
|
-
"epochs": epochs,
|
124
|
-
"model_type": model_type,
|
126
|
+
# "batch_size": batch_size,
|
127
|
+
# "epochs": epochs,
|
128
|
+
# "model_type": model_type,
|
125
129
|
"pipeline": pipeline}
|
126
130
|
|
127
131
|
def build(self):
|
@@ -134,12 +138,12 @@ class MultiPipeline():
|
|
134
138
|
for item in self.items:
|
135
139
|
print(f"{item['name']} is running")
|
136
140
|
pipeline = item['pipeline']
|
137
|
-
model_type = item['model_type']
|
138
|
-
batch_size = item['batch_size']
|
139
|
-
epochs = item['epochs']
|
141
|
+
# model_type = item['model_type']
|
142
|
+
# batch_size = item['batch_size']
|
143
|
+
# epochs = item['epochs']
|
140
144
|
# It can be moved to build function
|
141
145
|
pipeline.build()
|
142
|
-
result = pipeline.run(
|
146
|
+
result = pipeline.run()
|
143
147
|
self.pipeline_resuts[item['name']] = result
|
144
148
|
return self
|
145
149
|
|
ddi_fw/pipeline/pipeline.py
CHANGED
@@ -28,7 +28,8 @@ class Pipeline:
|
|
28
28
|
ner_data_file=None,
|
29
29
|
ner_threshold=None,
|
30
30
|
combinations=None,
|
31
|
-
model=None
|
31
|
+
model=None,
|
32
|
+
multi_modal = None ):
|
32
33
|
self.library = library
|
33
34
|
self.experiment_name = experiment_name
|
34
35
|
self.experiment_description = experiment_description
|
@@ -46,6 +47,7 @@ class Pipeline:
|
|
46
47
|
self.ner_threshold = ner_threshold
|
47
48
|
self.combinations = combinations
|
48
49
|
self.model = model
|
50
|
+
self.multi_modal = multi_modal
|
49
51
|
|
50
52
|
def __create_or_update_embeddings__(self, embedding_dict, vector_db_persist_directory, vector_db_collection_name, column=None):
|
51
53
|
"""
|
@@ -155,6 +157,7 @@ class Pipeline:
|
|
155
157
|
self.train_idx_arr = self.dataset.train_idx_arr
|
156
158
|
self.val_idx_arr = self.dataset.val_idx_arr
|
157
159
|
# Logic to set up the experiment
|
160
|
+
# column name, train data, train label, test data, test label
|
158
161
|
self.items = self.dataset.produce_inputs()
|
159
162
|
|
160
163
|
unique_classes = pd.unique(self.dataframe['event_category'])
|
@@ -168,7 +171,7 @@ class Pipeline:
|
|
168
171
|
# Implement additional build logic as needed
|
169
172
|
return self
|
170
173
|
|
171
|
-
def run(self
|
174
|
+
def run(self):
|
172
175
|
mlflow.set_tracking_uri(self.tracking_uri)
|
173
176
|
|
174
177
|
if mlflow.get_experiment_by_name(self.experiment_name) == None:
|
@@ -178,8 +181,9 @@ class Pipeline:
|
|
178
181
|
mlflow.set_experiment(self.experiment_name)
|
179
182
|
|
180
183
|
y_test_label = self.items[0][4]
|
181
|
-
multi_modal_runner = MultiModalRunner(
|
182
|
-
|
184
|
+
multi_modal_runner = MultiModalRunner(library=self.library, multi_modal = self.multi_modal)
|
185
|
+
# multi_modal_runner = MultiModalRunner(
|
186
|
+
# library=self.library, model_func=model_func, batch_size=batch_size, epochs=epochs)
|
183
187
|
# multi_modal = TFMultiModal(
|
184
188
|
# model_func=model_func, batch_size=batch_size, epochs=epochs) # 100
|
185
189
|
multi_modal_runner.set_data(
|
ddi_fw/test/__init__.py
ADDED
File without changes
|
@@ -75,18 +75,19 @@ ddi_fw/langchain/sentence_splitter.py,sha256=h_bYElx4Ud1mwDNJfL7mUwvgadwKX3GKlSz
|
|
75
75
|
ddi_fw/langchain/storage.py,sha256=OizKyWm74Js7T6Q9kez-ulUoBGzIMFo4R46h4kjUyIM,11200
|
76
76
|
ddi_fw/ml/__init__.py,sha256=tIxiW0g6q1VsmDYVXR_ovvHQR3SCir8g2bKxx_CrS7s,221
|
77
77
|
ddi_fw/ml/evaluation_helper.py,sha256=o4-w5Xa3t4olLW4ymx_8L-Buhe5wfQEmT2bh4Zz544c,13066
|
78
|
-
ddi_fw/ml/ml_helper.py,sha256=
|
79
|
-
ddi_fw/ml/model_wrapper.py,sha256=
|
78
|
+
ddi_fw/ml/ml_helper.py,sha256=poZKeIRK4S9SJVgQzMtahHvoekwNxCARCExUeTCjARY,6162
|
79
|
+
ddi_fw/ml/model_wrapper.py,sha256=kc01_TVJuriUvNI6ABnLngnJWvmG_Y7-XJ6XMusLJ8U,1088
|
80
80
|
ddi_fw/ml/pytorch_wrapper.py,sha256=AkG-2sKDXr0IBhgmkbjG0i20OuwQv3mhdvqp6UvJDCA,3716
|
81
|
-
ddi_fw/ml/tensorflow_wrapper.py,sha256=
|
81
|
+
ddi_fw/ml/tensorflow_wrapper.py,sha256=jaWPZfsQMSG_Vzl1OTALAbgb2sdJklcgQD5kokt7b18,5409
|
82
82
|
ddi_fw/ner/__init__.py,sha256=JwhGXrepomxPSsGsg2b_xPRC72AjvxOIn2CW5Mvscn0,26
|
83
83
|
ddi_fw/ner/mmlrestclient.py,sha256=NZta7m2Qm6I_qtVguMZhqtAUjVBmmXn0-TMnsNp0jpg,6859
|
84
84
|
ddi_fw/ner/ner.py,sha256=BEs9AFljAxOQrC2BEP1raSzRoypcfELS5UTdl4bjTqw,15863
|
85
85
|
ddi_fw/pipeline/__init__.py,sha256=tKDM_rW4vPjlYTeOkNgi9PujDzb4e9O3LK1w5wqnebw,212
|
86
86
|
ddi_fw/pipeline/multi_modal_combination_strategy.py,sha256=qIst7vxHaOAhRv4lgozszwa3b1QE4aIrN74t41Xnvr4,1637
|
87
|
-
ddi_fw/pipeline/multi_pipeline.py,sha256=
|
87
|
+
ddi_fw/pipeline/multi_pipeline.py,sha256=G8ONZdfwjGZRI2PrzMOaET6w5AUcmgYzMtaV6j5Hbz0,5981
|
88
88
|
ddi_fw/pipeline/ner_pipeline.py,sha256=wB7hz4YCOv7UAz6bGE6sSpPXXIdoOflOVK5UCc1fO-o,5586
|
89
|
-
ddi_fw/pipeline/pipeline.py,sha256
|
89
|
+
ddi_fw/pipeline/pipeline.py,sha256=-1zGbSJapmUSx9xltJLQajmUCeZdT-9Ow0cC6JZ92y0,8984
|
90
|
+
ddi_fw/test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
90
91
|
ddi_fw/test/basic_test.py,sha256=fEOGcZm1ObnsDvMiXNmdmz6YCeUrGc8V0DwlSwGhsq8,376
|
91
92
|
ddi_fw/test/combination_test.py,sha256=TWNE8sf-DSh1Q9-yRaRBc774Sn1kSMGXLwQhd2_Qynk,324
|
92
93
|
ddi_fw/test/compress_json_test.py,sha256=BGny56YqiG-pzhMoDzLKQBQI1E7o3jU0S7VYWtclAx4,1045
|
@@ -105,7 +106,7 @@ ddi_fw/utils/package_helper.py,sha256=erl8_onmhK-41zQoaED2qyDUV9GQxmT9sdoyRp9_q5
|
|
105
106
|
ddi_fw/utils/py7zr_helper.py,sha256=gOqaFIyJvTjUM-btO2x9AQ69jZOS8PoKN0wetYIckJw,4747
|
106
107
|
ddi_fw/utils/utils.py,sha256=szwnxMTDRrZoeNRyDuf3aCbtzriwtaRk4mHSH3asLdA,4301
|
107
108
|
ddi_fw/utils/zip_helper.py,sha256=YRZA4tKZVBJwGQM0_WK6L-y5MoqkKoC-nXuuHK6CU9I,5567
|
108
|
-
ddi_fw-0.0.
|
109
|
-
ddi_fw-0.0.
|
110
|
-
ddi_fw-0.0.
|
111
|
-
ddi_fw-0.0.
|
109
|
+
ddi_fw-0.0.133.dist-info/METADATA,sha256=LpoElNyGbr_R9jW90vl4RPpMbi9oRwi0PjYKTuLvwZM,1965
|
110
|
+
ddi_fw-0.0.133.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
111
|
+
ddi_fw-0.0.133.dist-info/top_level.txt,sha256=PMwHICFZTZtcpzQNPV4UQnfNXYIeLR_Ste-Wfc1h810,7
|
112
|
+
ddi_fw-0.0.133.dist-info/RECORD,,
|
File without changes
|
File without changes
|