aimodelshare 0.1.30__py3-none-any.whl → 0.1.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aimodelshare might be problematic. Click here for more details.
- aimodelshare/aimsonnx.py +112 -174
- aimodelshare/data_sharing/download_data.py +0 -1
- {aimodelshare-0.1.30.dist-info → aimodelshare-0.1.32.dist-info}/METADATA +1 -1
- {aimodelshare-0.1.30.dist-info → aimodelshare-0.1.32.dist-info}/RECORD +7 -7
- {aimodelshare-0.1.30.dist-info → aimodelshare-0.1.32.dist-info}/WHEEL +0 -0
- {aimodelshare-0.1.30.dist-info → aimodelshare-0.1.32.dist-info}/licenses/LICENSE +0 -0
- {aimodelshare-0.1.30.dist-info → aimodelshare-0.1.32.dist-info}/top_level.txt +0 -0
aimodelshare/aimsonnx.py
CHANGED
|
@@ -549,197 +549,145 @@ def _pyspark_to_onnx(model, initial_types, spark_session,
|
|
|
549
549
|
return onx
|
|
550
550
|
|
|
551
551
|
def _keras_to_onnx(model, transfer_learning=None,
|
|
552
|
-
|
|
553
|
-
'''
|
|
552
|
+
deep_learning=None, task_type=None, epochs=None):
|
|
553
|
+
'''Converts a Keras model to ONNX and extracts metadata.'''
|
|
554
554
|
|
|
555
|
-
|
|
556
|
-
|
|
555
|
+
import tf2onnx
|
|
556
|
+
import tensorflow as tf
|
|
557
|
+
import numpy as np
|
|
558
|
+
import onnx
|
|
559
|
+
import pickle
|
|
560
|
+
import psutil
|
|
561
|
+
import warnings
|
|
562
|
+
from pympler import asizeof
|
|
563
|
+
import logging
|
|
564
|
+
import os
|
|
565
|
+
import sys
|
|
566
|
+
from contextlib import contextmanager
|
|
567
|
+
|
|
568
|
+
# -- Helper to suppress tf2onnx stderr (NumPy warnings etc.)
|
|
569
|
+
@contextmanager
|
|
570
|
+
def suppress_stderr():
|
|
571
|
+
with open(os.devnull, "w") as devnull:
|
|
572
|
+
old_stderr = sys.stderr
|
|
573
|
+
sys.stderr = devnull
|
|
574
|
+
try:
|
|
575
|
+
yield
|
|
576
|
+
finally:
|
|
577
|
+
sys.stderr = old_stderr
|
|
578
|
+
|
|
579
|
+
# Reduce logging output
|
|
580
|
+
tf2onnx_logger = logging.getLogger("tf2onnx")
|
|
581
|
+
tf2onnx_logger.setLevel(logging.CRITICAL)
|
|
582
|
+
|
|
583
|
+
# Unwrap scikeras, sklearn pipelines etc.
|
|
584
|
+
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
|
|
585
|
+
from sklearn.pipeline import Pipeline
|
|
586
|
+
from scikeras.wrappers import KerasClassifier, KerasRegressor
|
|
557
587
|
|
|
558
|
-
# handle keras models in sklearn wrapper
|
|
559
588
|
if isinstance(model, (GridSearchCV, RandomizedSearchCV)):
|
|
560
589
|
model = model.best_estimator_
|
|
561
|
-
|
|
562
|
-
if isinstance(model, sklearn.pipeline.Pipeline):
|
|
590
|
+
if isinstance(model, Pipeline):
|
|
563
591
|
model = model.steps[-1][1]
|
|
564
|
-
|
|
565
|
-
sklearn_wrappers = (KerasClassifier,KerasRegressor)
|
|
566
|
-
|
|
567
|
-
if isinstance(model, sklearn_wrappers):
|
|
592
|
+
if isinstance(model, (KerasClassifier, KerasRegressor)):
|
|
568
593
|
model = model.model
|
|
569
|
-
|
|
570
|
-
# convert to onnx
|
|
571
|
-
#onx = convert_keras(model)
|
|
572
|
-
# generate tempfile for onnx object
|
|
573
|
-
temp_dir = tempfile.mkdtemp()
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
tf.get_logger().setLevel('ERROR') # probably not good practice
|
|
579
|
-
output_path = os.path.join(temp_dir, 'temp.onnx')
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
tf.saved_model.save(model, temp_dir)
|
|
583
594
|
|
|
584
|
-
#
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
modelstringtest="python -m tf2onnx.convert --tflite "+os.path.join(temp_dir,'tempmodel.tflite')+" --output "+output_path+" --opset 13"
|
|
606
|
-
resultonnx2=os.system(modelstringtest)
|
|
607
|
-
pass
|
|
608
|
-
|
|
609
|
-
if any([resultonnx==0, resultonnx2==0]):
|
|
610
|
-
pass
|
|
611
|
-
else:
|
|
612
|
-
return print("Model conversion to onnx unsuccessful. Please try different model or submit\npredictions to leaderboard without submitting preprocessor or model files.")
|
|
613
|
-
|
|
614
|
-
onx = onnx.load(output_path)
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
# generate metadata dict
|
|
618
|
-
metadata = {}
|
|
619
|
-
|
|
620
|
-
# placeholders, need to be generated elsewhere
|
|
621
|
-
metadata['model_id'] = None
|
|
622
|
-
metadata['data_id'] = None
|
|
623
|
-
metadata['preprocessor_id'] = None
|
|
624
|
-
|
|
625
|
-
# infer ml framework from function call
|
|
626
|
-
metadata['ml_framework'] = 'keras'
|
|
627
|
-
|
|
628
|
-
# get model type from model object
|
|
629
|
-
metadata['model_type'] = str(model.__class__.__name__)
|
|
630
|
-
|
|
631
|
-
# get transfer learning bool from user input
|
|
632
|
-
metadata['transfer_learning'] = transfer_learning
|
|
633
|
-
|
|
634
|
-
# get deep learning bool from user input
|
|
635
|
-
metadata['deep_learning'] = deep_learning
|
|
636
|
-
|
|
637
|
-
# get task type from user input
|
|
638
|
-
metadata['task_type'] = task_type
|
|
639
|
-
|
|
640
|
-
# placeholders, need to be inferred from data
|
|
641
|
-
metadata['target_distribution'] = None
|
|
642
|
-
metadata['input_type'] = None
|
|
643
|
-
metadata['input_shape'] = None
|
|
644
|
-
metadata['input_dtypes'] = None
|
|
645
|
-
metadata['input_distribution'] = None
|
|
595
|
+
# Input signature
|
|
596
|
+
input_shape = model.input_shape
|
|
597
|
+
if isinstance(input_shape, list):
|
|
598
|
+
input_shape = input_shape[0]
|
|
599
|
+
input_signature = [tf.TensorSpec(input_shape, tf.float32, name="input")]
|
|
600
|
+
|
|
601
|
+
# Wrap model in tf.function
|
|
602
|
+
@tf.function(input_signature=input_signature)
|
|
603
|
+
def model_fn(x):
|
|
604
|
+
return model(x)
|
|
605
|
+
|
|
606
|
+
concrete_func = model_fn
|
|
607
|
+
|
|
608
|
+
# Convert to ONNX
|
|
609
|
+
with suppress_stderr():
|
|
610
|
+
onx_model, _ = tf2onnx.convert.from_function(
|
|
611
|
+
concrete_func,
|
|
612
|
+
input_signature=input_signature,
|
|
613
|
+
opset=13,
|
|
614
|
+
output_path=None
|
|
615
|
+
)
|
|
646
616
|
|
|
647
|
-
#
|
|
648
|
-
metadata
|
|
617
|
+
# Extract metadata
|
|
618
|
+
metadata = {
|
|
619
|
+
'model_id': None,
|
|
620
|
+
'data_id': None,
|
|
621
|
+
'preprocessor_id': None,
|
|
622
|
+
'ml_framework': 'keras',
|
|
623
|
+
'model_type': model.__class__.__name__,
|
|
624
|
+
'transfer_learning': transfer_learning,
|
|
625
|
+
'deep_learning': deep_learning,
|
|
626
|
+
'task_type': task_type,
|
|
627
|
+
'target_distribution': None,
|
|
628
|
+
'input_type': None,
|
|
629
|
+
'input_shape': input_shape,
|
|
630
|
+
'input_dtypes': None,
|
|
631
|
+
'input_distribution': None,
|
|
632
|
+
'model_config': str(model.get_config()),
|
|
633
|
+
'model_state': None,
|
|
634
|
+
'eval_metrics': None,
|
|
635
|
+
'model_graph': "",
|
|
636
|
+
'metadata_onnx': None,
|
|
637
|
+
'epochs': epochs
|
|
638
|
+
}
|
|
649
639
|
|
|
650
|
-
# get model weights from keras object
|
|
651
640
|
model_size = asizeof.asizeof(model.get_weights())
|
|
652
641
|
mem = psutil.virtual_memory()
|
|
653
642
|
|
|
654
|
-
if model_size > mem.available:
|
|
655
|
-
|
|
656
|
-
warnings.warn(f"Model size ({model_size/1e6} MB) exceeds available memory ({mem.available/1e6} MB). Skipping extraction of model weights.")
|
|
657
|
-
|
|
643
|
+
if model_size > mem.available:
|
|
644
|
+
warnings.warn(f"Model size ({model_size/1e6} MB) exceeds available memory.")
|
|
658
645
|
metadata['model_weights'] = None
|
|
659
|
-
|
|
660
|
-
else:
|
|
661
|
-
|
|
646
|
+
else:
|
|
662
647
|
metadata['model_weights'] = pickle.dumps(model.get_weights())
|
|
663
648
|
|
|
664
|
-
#
|
|
665
|
-
metadata['model_state'] = None
|
|
666
|
-
|
|
667
|
-
# get list of current layer types
|
|
668
|
-
layer_list, activation_list = _get_layer_names()
|
|
649
|
+
# Extract architecture
|
|
669
650
|
|
|
670
|
-
|
|
651
|
+
keras_layers = keras_unpack(model)
|
|
671
652
|
layers = []
|
|
672
653
|
layers_n_params = []
|
|
673
654
|
layers_shapes = []
|
|
674
655
|
activations = []
|
|
675
656
|
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
if hasattr(model, 'optimizer'):
|
|
699
|
-
optimizer = model.optimizer.__class__.__name__
|
|
700
|
-
else:
|
|
701
|
-
optimizer = None
|
|
702
|
-
|
|
703
|
-
model_summary_pd = model_summary_keras(model)
|
|
704
|
-
|
|
705
|
-
# insert data into model architecture dict
|
|
706
|
-
model_architecture = {'layers_number': len(layers),
|
|
707
|
-
'layers_sequence': layers,
|
|
708
|
-
'layers_summary': {i:layers.count(i) for i in set(layers)},
|
|
709
|
-
'layers_n_params': layers_n_params,
|
|
710
|
-
'layers_shapes': layers_shapes,
|
|
711
|
-
'activations_sequence': activations,
|
|
712
|
-
'activations_summary': {i:activations.count(i) for i in set(activations)},
|
|
713
|
-
'loss':loss,
|
|
714
|
-
'optimizer': optimizer
|
|
715
|
-
}
|
|
657
|
+
for layer in keras_layers:
|
|
658
|
+
layers.append(layer.__class__.__name__)
|
|
659
|
+
layers_n_params.append(layer.count_params())
|
|
660
|
+
layers_shapes.append(getattr(layer, 'output_shape', None))
|
|
661
|
+
if hasattr(layer, 'activation'):
|
|
662
|
+
act = getattr(layer.activation, '__name__', None)
|
|
663
|
+
if act: activations.append(act)
|
|
664
|
+
|
|
665
|
+
optimizer = getattr(model.optimizer, '__class__', None)
|
|
666
|
+
loss = getattr(model.loss, '__class__', None)
|
|
667
|
+
|
|
668
|
+
model_architecture = {
|
|
669
|
+
'layers_number': len(layers),
|
|
670
|
+
'layers_sequence': layers,
|
|
671
|
+
'layers_summary': {i: layers.count(i) for i in set(layers)},
|
|
672
|
+
'layers_n_params': layers_n_params,
|
|
673
|
+
'layers_shapes': layers_shapes,
|
|
674
|
+
'activations_sequence': activations,
|
|
675
|
+
'activations_summary': {i: activations.count(i) for i in set(activations)},
|
|
676
|
+
'loss': loss.__name__ if loss else None,
|
|
677
|
+
'optimizer': optimizer.__name__ if optimizer else None
|
|
678
|
+
}
|
|
716
679
|
|
|
717
680
|
metadata['model_architecture'] = str(model_architecture)
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
metadata['model_summary'] = model_summary_pd.to_json()
|
|
721
|
-
|
|
681
|
+
metadata['model_summary'] = model_summary_keras(model).to_json()
|
|
722
682
|
metadata['memory_size'] = model_size
|
|
723
683
|
|
|
724
|
-
metadata
|
|
725
|
-
|
|
726
|
-
# model graph
|
|
727
|
-
#G = model_graph_keras(model)
|
|
728
|
-
#metadata['model_graph'] = G.create_dot().decode('utf-8')
|
|
729
|
-
metadata['model_graph'] = ""
|
|
730
|
-
# placeholder, needs evaluation engine
|
|
731
|
-
metadata['eval_metrics'] = None
|
|
732
|
-
|
|
733
|
-
# add metadata from onnx object
|
|
734
|
-
# metadata['metadata_onnx'] = str(_extract_onnx_metadata(onx, framework='keras'))
|
|
735
|
-
metadata['metadata_onnx'] = None
|
|
736
|
-
# add metadata dict to onnx object
|
|
737
|
-
|
|
738
|
-
meta = onx.metadata_props.add()
|
|
684
|
+
# Embed metadata in ONNX
|
|
685
|
+
meta = onx_model.metadata_props.add()
|
|
739
686
|
meta.key = 'model_metadata'
|
|
740
687
|
meta.value = str(metadata)
|
|
741
688
|
|
|
742
|
-
return
|
|
689
|
+
return onx_model
|
|
690
|
+
|
|
743
691
|
|
|
744
692
|
|
|
745
693
|
def _pytorch_to_onnx(model, model_input, transfer_learning=None,
|
|
@@ -1844,22 +1792,12 @@ def torch_unpack(model):
|
|
|
1844
1792
|
|
|
1845
1793
|
|
|
1846
1794
|
def keras_unpack(model):
|
|
1847
|
-
|
|
1848
1795
|
layers = []
|
|
1849
|
-
|
|
1850
1796
|
for module in model.layers:
|
|
1851
|
-
|
|
1852
1797
|
if isinstance(module, (tf.keras.Model, tf.keras.Sequential)):
|
|
1853
|
-
|
|
1854
|
-
layers_out = keras_unpack(module)
|
|
1855
|
-
|
|
1856
|
-
layers = layers + layers_out
|
|
1857
|
-
|
|
1858
|
-
|
|
1798
|
+
layers += keras_unpack(module)
|
|
1859
1799
|
else:
|
|
1860
|
-
|
|
1861
1800
|
layers.append(module)
|
|
1862
|
-
|
|
1863
1801
|
return layers
|
|
1864
1802
|
|
|
1865
1803
|
|
|
@@ -89,7 +89,6 @@ def pull_image(image_uri):
|
|
|
89
89
|
|
|
90
90
|
resp = requests.get('https://{}/v2/{}/manifests/{}'.format(registry, repository, tag), headers=auth_head, verify=False)
|
|
91
91
|
|
|
92
|
-
print(resp.json())
|
|
93
92
|
config = resp.json()['config']['digest']
|
|
94
93
|
config_resp = requests.get('https://{}/v2/{}/blobs/{}'.format(registry, repository, config), headers=auth_head, verify=False)
|
|
95
94
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: aimodelshare
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.32
|
|
4
4
|
Summary: Deploy locally saved machine learning models to a live rest API and web-dashboard. Share it with the world via modelshare.org
|
|
5
5
|
Home-page: https://www.modelshare.org
|
|
6
6
|
Author: Michael Parrott
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
aimodelshare/README.md,sha256=_OMdUIeIYZnpFlKdafM1KNWaANO2nWdx0QpLE_ZC-Qs,2014
|
|
2
2
|
aimodelshare/__init__.py,sha256=CS0iFxgAic21gBcQE6NSZ-D_ElHw80_A3OWrEYo9Dks,539
|
|
3
|
-
aimodelshare/aimsonnx.py,sha256=
|
|
3
|
+
aimodelshare/aimsonnx.py,sha256=rGhnNBonyrwpNgkuPTJ0hJFx_RGHQnNsCeAERZsPdcU,67129
|
|
4
4
|
aimodelshare/api.py,sha256=jeCIMbpBllTYi-bPvCdNvI6yHaT3JYakq9fgaz10s_E,34920
|
|
5
5
|
aimodelshare/aws.py,sha256=jn99R9-N77Qac-_eYm-LaCQUPd-RnE7oVULm9rh-3RY,15232
|
|
6
6
|
aimodelshare/aws_client.py,sha256=Ce19iwf69BwpuyyJlVN8z1da3c5jf93svsTgx1OWhaA,6784
|
|
@@ -31,7 +31,7 @@ aimodelshare/containerization_templates/lambda_function.txt,sha256=nEFoPDXemNcQZ
|
|
|
31
31
|
aimodelshare/custom_approach/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
32
32
|
aimodelshare/custom_approach/lambda_function.py,sha256=d1HZlgviHZq4mNBKx4q-RCunDK8P8i9DKZcfv6Nmgzc,479
|
|
33
33
|
aimodelshare/data_sharing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
|
-
aimodelshare/data_sharing/download_data.py,sha256=
|
|
34
|
+
aimodelshare/data_sharing/download_data.py,sha256=SshJCz3jJaL1gqZmWjFMOKzUVhijqsisnPGA6jYXbq8,22795
|
|
35
35
|
aimodelshare/data_sharing/share_data.py,sha256=dMOP0-PTSpviOeHi3Nvj-uiq5PlIfk_SN5nN92j4PnI,13964
|
|
36
36
|
aimodelshare/data_sharing/utils.py,sha256=865lN8-oGFi_U_zRaNnGB8Bd0sC8dN_iI5krZOSt_Ts,236
|
|
37
37
|
aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt,sha256=27wmp7b0rXqJQsumhPxCvGHmUcDiiVgrC6i7DmY7KQA,77
|
|
@@ -140,11 +140,11 @@ aimodelshare/sam/codepipeline_policies.txt,sha256=267HMXMnbP7qRASkmFZYSx-2HmKf5o
|
|
|
140
140
|
aimodelshare/sam/codepipeline_trust_relationship.txt,sha256=yfPYvZlN3fnaIHs7I3ENMMveigIE89mufV9pvR8EQH8,245
|
|
141
141
|
aimodelshare/sam/spark-class.txt,sha256=chyJBxDzCzlUKXzVQYTzuJ2PXCTwg8_gd1yfnI-xbRw,217
|
|
142
142
|
aimodelshare/sam/template.txt,sha256=JKSvEOZNaaLalHSx7r9psJg_6LLCb0XLAYi1-jYPu3M,1195
|
|
143
|
-
aimodelshare-0.1.
|
|
143
|
+
aimodelshare-0.1.32.dist-info/licenses/LICENSE,sha256=JXBYLriXYgTloZs-9CJPZY76dqkuDT5df_HghMnljx8,1134
|
|
144
144
|
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
145
145
|
tests/test_aimsonnx.py,sha256=-GOF1_qXGQaMxHyqK0GPg7dD1meE-S7CZea4pLmBDTk,3906
|
|
146
146
|
tests/test_playground.py,sha256=vdFWPRrZNQ2poiBOoN3l7HsXB5yc3p3rrrclNYJHnaw,24574
|
|
147
|
-
aimodelshare-0.1.
|
|
148
|
-
aimodelshare-0.1.
|
|
149
|
-
aimodelshare-0.1.
|
|
150
|
-
aimodelshare-0.1.
|
|
147
|
+
aimodelshare-0.1.32.dist-info/METADATA,sha256=4tr51rAZknBEIiALek1441tjpMArQd3xn-5zAZ5jElQ,3523
|
|
148
|
+
aimodelshare-0.1.32.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
|
|
149
|
+
aimodelshare-0.1.32.dist-info/top_level.txt,sha256=2KJgeHQ0BmZuilB75J203i7W4vri6CON2kdbwk9BNpU,19
|
|
150
|
+
aimodelshare-0.1.32.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|