aimodelshare 0.1.29__tar.gz → 0.1.31__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aimodelshare might be problematic. Click here for more details.

Files changed (156) hide show
  1. {aimodelshare-0.1.29/aimodelshare.egg-info → aimodelshare-0.1.31}/PKG-INFO +1 -1
  2. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/aimsonnx.py +112 -163
  3. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/data_sharing/download_data.py +84 -106
  4. {aimodelshare-0.1.29 → aimodelshare-0.1.31/aimodelshare.egg-info}/PKG-INFO +1 -1
  5. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/setup.py +1 -1
  6. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/LICENSE +0 -0
  7. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/MANIFEST.in +0 -0
  8. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/README.md +0 -0
  9. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/README.md +0 -0
  10. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/__init__.py +0 -0
  11. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/api.py +0 -0
  12. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/aws.py +0 -0
  13. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/aws_client.py +0 -0
  14. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/base_image.py +0 -0
  15. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/bucketpolicy.py +0 -0
  16. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/color_mappings/color_mapping_keras.csv +0 -0
  17. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/color_mappings/color_mapping_pytorch.csv +0 -0
  18. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/containerisation.py +0 -0
  19. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/containerization.py +0 -0
  20. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/containerization_templates/Dockerfile.txt +0 -0
  21. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/containerization_templates/Dockerfile_PySpark.txt +0 -0
  22. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/containerization_templates/buildspec.txt +0 -0
  23. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/containerization_templates/lambda_function.txt +0 -0
  24. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/custom_approach/__init__.py +0 -0
  25. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/custom_approach/lambda_function.py +0 -0
  26. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/custom_eval_metrics.py +0 -0
  27. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/data_sharing/__init__.py +0 -0
  28. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +0 -0
  29. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/data_sharing/data_sharing_templates/__init__.py +0 -0
  30. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +0 -0
  31. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +0 -0
  32. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +0 -0
  33. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/data_sharing/share_data.py +0 -0
  34. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/data_sharing/utils.py +0 -0
  35. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/deploy_custom_lambda.py +0 -0
  36. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/Makefile +0 -0
  37. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/__init__.py +0 -0
  38. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/_version.py +0 -0
  39. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +0 -0
  40. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/layout.html +0 -0
  41. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/search.html +0 -0
  42. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/searchbox.html +0 -0
  43. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +0 -0
  44. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +0 -0
  45. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +0 -0
  46. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +0 -0
  47. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +0 -0
  48. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +0 -0
  49. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
  50. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +0 -0
  51. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
  52. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
  53. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
  54. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +0 -0
  55. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/karma_sphinx_theme/theme.conf +0 -0
  56. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/make.bat +0 -0
  57. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/requirements.txt +0 -0
  58. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/about.rst +0 -0
  59. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/advanced_features.rst +0 -0
  60. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/competition.rst +0 -0
  61. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/conf.py +0 -0
  62. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/create_credentials.rst +0 -0
  63. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/example_notebooks.rst +0 -0
  64. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/functions.rst +0 -0
  65. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/gettingstarted.rst +0 -0
  66. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/images/creds1.png +0 -0
  67. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/images/creds2.png +0 -0
  68. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/images/creds3.png +0 -0
  69. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/images/creds4.png +0 -0
  70. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/images/creds5.png +0 -0
  71. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/images/creds_file_example.png +0 -0
  72. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/images/predict_tab.png +0 -0
  73. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/index.rst +0 -0
  74. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/documentation/source/modelplayground.rst +0 -0
  75. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/exceptions.py +0 -0
  76. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/generatemodelapi.py +0 -0
  77. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/iam/codebuild_policy.txt +0 -0
  78. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/iam/codebuild_trust_relationship.txt +0 -0
  79. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/iam/lambda_policy.txt +0 -0
  80. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/iam/lambda_trust_relationship.txt +0 -0
  81. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/__init__.py +0 -0
  82. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/api_json.txt +0 -0
  83. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/auth/policy.txt +0 -0
  84. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/auth/role.txt +0 -0
  85. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/eval/policy.txt +0 -0
  86. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/eval/role.txt +0 -0
  87. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/function/policy.txt +0 -0
  88. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/function/role.txt +0 -0
  89. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/integration_response.txt +0 -0
  90. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/lambda_policy_1.txt +0 -0
  91. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/lambda_policy_2.txt +0 -0
  92. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/lambda_role_1.txt +0 -0
  93. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/json_templates/lambda_role_2.txt +0 -0
  94. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/leaderboard.py +0 -0
  95. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/1.txt +0 -0
  96. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/1B.txt +0 -0
  97. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/2.txt +0 -0
  98. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/3.txt +0 -0
  99. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/4.txt +0 -0
  100. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/5.txt +0 -0
  101. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/6.txt +0 -0
  102. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/7.txt +0 -0
  103. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/8.txt +0 -0
  104. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/__init__.py +0 -0
  105. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/authorization.txt +0 -0
  106. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/eval_classification.txt +0 -0
  107. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/eval_lambda.txt +0 -0
  108. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/eval_regression.txt +0 -0
  109. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/lambda_function.txt +0 -0
  110. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/main/nst.txt +0 -0
  111. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/model.py +0 -0
  112. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/modeluser.py +0 -0
  113. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/placeholders/model.onnx +0 -0
  114. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/placeholders/preprocessor.zip +0 -0
  115. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/playground.py +0 -0
  116. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/postprocessormodules.py +0 -0
  117. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/preprocessormodules.py +0 -0
  118. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/1.txt +0 -0
  119. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/1B.txt +0 -0
  120. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/2.txt +0 -0
  121. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/3.txt +0 -0
  122. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/4.txt +0 -0
  123. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/5.txt +0 -0
  124. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/6.txt +0 -0
  125. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/7.txt +0 -0
  126. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/8.txt +0 -0
  127. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/__init__.py +0 -0
  128. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/authorization.txt +0 -0
  129. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/eval_classification.txt +0 -0
  130. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/eval_lambda.txt +0 -0
  131. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/eval_regression.txt +0 -0
  132. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/lambda_function.txt +0 -0
  133. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/pyspark/nst.txt +0 -0
  134. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/python/my_preprocessor.py +0 -0
  135. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/readme.md +0 -0
  136. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/reproducibility.py +0 -0
  137. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/Dockerfile.txt +0 -0
  138. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/Dockerfile_PySpark.txt +0 -0
  139. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/__init__.py +0 -0
  140. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/buildspec.txt +0 -0
  141. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/codebuild_policies.txt +0 -0
  142. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/codebuild_trust_relationship.txt +0 -0
  143. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/codepipeline_policies.txt +0 -0
  144. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/codepipeline_trust_relationship.txt +0 -0
  145. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/spark-class.txt +0 -0
  146. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/sam/template.txt +0 -0
  147. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/tools.py +0 -0
  148. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare/utils.py +0 -0
  149. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare.egg-info/SOURCES.txt +0 -0
  150. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare.egg-info/dependency_links.txt +0 -0
  151. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare.egg-info/requires.txt +0 -0
  152. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/aimodelshare.egg-info/top_level.txt +0 -0
  153. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/setup.cfg +0 -0
  154. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/tests/__init__.py +0 -0
  155. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/tests/test_aimsonnx.py +0 -0
  156. {aimodelshare-0.1.29 → aimodelshare-0.1.31}/tests/test_playground.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aimodelshare
3
- Version: 0.1.29
3
+ Version: 0.1.31
4
4
  Summary: Deploy locally saved machine learning models to a live rest API and web-dashboard. Share it with the world via modelshare.org
5
5
  Home-page: https://www.modelshare.org
6
6
  Author: Michael Parrott
@@ -549,197 +549,146 @@ def _pyspark_to_onnx(model, initial_types, spark_session,
549
549
  return onx
550
550
 
551
551
  def _keras_to_onnx(model, transfer_learning=None,
552
- deep_learning=None, task_type=None, epochs=None):
553
- '''Extracts metadata from keras model object.'''
552
+ deep_learning=None, task_type=None, epochs=None):
553
+ '''Converts a Keras model to ONNX and extracts metadata.'''
554
554
 
555
- # check whether this is a fitted keras model
556
- # isinstance...
555
+ import tf2onnx
556
+ import tensorflow as tf
557
+ import numpy as np
558
+ import onnx
559
+ import pickle
560
+ import psutil
561
+ import warnings
562
+ from pympler import asizeof
563
+ import logging
564
+ import os
565
+ import sys
566
+ from contextlib import contextmanager
567
+
568
+ # -- Helper to suppress tf2onnx stderr (NumPy warnings etc.)
569
+ @contextmanager
570
+ def suppress_stderr():
571
+ with open(os.devnull, "w") as devnull:
572
+ old_stderr = sys.stderr
573
+ sys.stderr = devnull
574
+ try:
575
+ yield
576
+ finally:
577
+ sys.stderr = old_stderr
578
+
579
+ # Reduce logging output
580
+ tf2onnx_logger = logging.getLogger("tf2onnx")
581
+ tf2onnx_logger.setLevel(logging.CRITICAL)
582
+
583
+ # Unwrap scikeras, sklearn pipelines etc.
584
+ from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
585
+ from sklearn.pipeline import Pipeline
586
+ from scikeras.wrappers import KerasClassifier, KerasRegressor
557
587
 
558
- # handle keras models in sklearn wrapper
559
588
  if isinstance(model, (GridSearchCV, RandomizedSearchCV)):
560
589
  model = model.best_estimator_
561
-
562
- if isinstance(model, sklearn.pipeline.Pipeline):
590
+ if isinstance(model, Pipeline):
563
591
  model = model.steps[-1][1]
564
-
565
- sklearn_wrappers = (KerasClassifier,KerasRegressor)
566
-
567
- if isinstance(model, sklearn_wrappers):
592
+ if isinstance(model, (KerasClassifier, KerasRegressor)):
568
593
  model = model.model
569
-
570
- # convert to onnx
571
- #onx = convert_keras(model)
572
- # generate tempfile for onnx object
573
- temp_dir = tempfile.mkdtemp()
574
-
575
-
576
-
577
-
578
- tf.get_logger().setLevel('ERROR') # probably not good practice
579
- output_path = os.path.join(temp_dir, 'temp.onnx')
580
-
581
-
582
- model.save(temp_dir)
583
-
584
- # # Convert the model
585
- try:
586
- modelstringtest="python -m tf2onnx.convert --saved-model "+temp_dir+" --output "+output_path+" --opset 13"
587
- resultonnx=os.system(modelstringtest)
588
- resultonnx2=1
589
- if resultonnx==0:
590
- pass
591
- else:
592
- raise Exception('Model conversion to onnx unsuccessful. Please try different model or submit predictions to leaderboard without submitting preprocessor or model files.')
593
- except:
594
- converter = tf.lite.TFLiteConverter.from_saved_model(temp_dir) # path to the SavedModel directory
595
- converter.target_spec.supported_ops = [
596
- tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
597
- tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
598
- ]
599
- tflite_model = converter.convert()
600
-
601
- # Save the model.
602
- with open(os.path.join(temp_dir,'tempmodel.tflite'), 'wb') as f:
603
- f.write(tflite_model)
604
-
605
- modelstringtest="python -m tf2onnx.convert --tflite "+os.path.join(temp_dir,'tempmodel.tflite')+" --output "+output_path+" --opset 13"
606
- resultonnx2=os.system(modelstringtest)
607
- pass
608
-
609
- if any([resultonnx==0, resultonnx2==0]):
610
- pass
611
- else:
612
- return print("Model conversion to onnx unsuccessful. Please try different model or submit\npredictions to leaderboard without submitting preprocessor or model files.")
613
-
614
- onx = onnx.load(output_path)
615
-
616
-
617
- # generate metadata dict
618
- metadata = {}
619
-
620
- # placeholders, need to be generated elsewhere
621
- metadata['model_id'] = None
622
- metadata['data_id'] = None
623
- metadata['preprocessor_id'] = None
624
-
625
- # infer ml framework from function call
626
- metadata['ml_framework'] = 'keras'
627
594
 
628
- # get model type from model object
629
- metadata['model_type'] = str(model.__class__.__name__)
630
-
631
- # get transfer learning bool from user input
632
- metadata['transfer_learning'] = transfer_learning
633
-
634
- # get deep learning bool from user input
635
- metadata['deep_learning'] = deep_learning
636
-
637
- # get task type from user input
638
- metadata['task_type'] = task_type
639
-
640
- # placeholders, need to be inferred from data
641
- metadata['target_distribution'] = None
642
- metadata['input_type'] = None
643
- metadata['input_shape'] = None
644
- metadata['input_dtypes'] = None
645
- metadata['input_distribution'] = None
595
+ # Input signature
596
+ input_shape = model.input_shape
597
+ if isinstance(input_shape, list):
598
+ input_shape = input_shape[0]
599
+ input_signature = [tf.TensorSpec(input_shape, tf.float32, name="input")]
600
+
601
+ # Wrap model in tf.function
602
+ @tf.function(input_signature=input_signature)
603
+ def model_fn(x):
604
+ return model(x)
605
+
606
+ concrete_func = model_fn
607
+
608
+ # Convert to ONNX
609
+ with suppress_stderr():
610
+ onx_model, _ = tf2onnx.convert.from_function(
611
+ concrete_func,
612
+ input_signature=input_signature,
613
+ opset=13,
614
+ output_path=None
615
+ )
646
616
 
647
- # get model config dict from keras model object
648
- metadata['model_config'] = str(model.get_config())
617
+ # Extract metadata
618
+ metadata = {
619
+ 'model_id': None,
620
+ 'data_id': None,
621
+ 'preprocessor_id': None,
622
+ 'ml_framework': 'keras',
623
+ 'model_type': model.__class__.__name__,
624
+ 'transfer_learning': transfer_learning,
625
+ 'deep_learning': deep_learning,
626
+ 'task_type': task_type,
627
+ 'target_distribution': None,
628
+ 'input_type': None,
629
+ 'input_shape': input_shape,
630
+ 'input_dtypes': None,
631
+ 'input_distribution': None,
632
+ 'model_config': str(model.get_config()),
633
+ 'model_state': None,
634
+ 'eval_metrics': None,
635
+ 'model_graph': "",
636
+ 'metadata_onnx': None,
637
+ 'epochs': epochs
638
+ }
649
639
 
650
- # get model weights from keras object
651
640
  model_size = asizeof.asizeof(model.get_weights())
652
641
  mem = psutil.virtual_memory()
653
642
 
654
- if model_size > mem.available:
655
-
656
- warnings.warn(f"Model size ({model_size/1e6} MB) exceeds available memory ({mem.available/1e6} MB). Skipping extraction of model weights.")
657
-
643
+ if model_size > mem.available:
644
+ warnings.warn(f"Model size ({model_size/1e6} MB) exceeds available memory.")
658
645
  metadata['model_weights'] = None
659
-
660
- else:
661
-
646
+ else:
662
647
  metadata['model_weights'] = pickle.dumps(model.get_weights())
663
648
 
664
- # get model state from pytorch model object
665
- metadata['model_state'] = None
666
-
667
- # get list of current layer types
668
- layer_list, activation_list = _get_layer_names()
649
+ # Extract architecture
650
+ from aimodelshare.model import keras_unpack, model_summary_keras
669
651
 
670
- # extract model architecture metadata
652
+ keras_layers = keras_unpack(model)
671
653
  layers = []
672
654
  layers_n_params = []
673
655
  layers_shapes = []
674
656
  activations = []
675
657
 
676
-
677
- keras_layers = keras_unpack(model)
678
-
679
- for i in keras_layers:
680
-
681
- # get layer names
682
- if i.__class__.__name__ in layer_list:
683
- layers.append(i.__class__.__name__)
684
- layers_n_params.append(i.count_params())
685
- layers_shapes.append(i.output_shape)
686
-
687
- # get activation names
688
- if i.__class__.__name__ in activation_list:
689
- activations.append(i.__class__.__name__.lower())
690
- if hasattr(i, 'activation') and i.activation.__name__ in activation_list:
691
- activations.append(i.activation.__name__)
692
-
693
- if hasattr(model, 'loss'):
694
- loss = model.loss.__class__.__name__
695
- else:
696
- loss = None
697
-
698
- if hasattr(model, 'optimizer'):
699
- optimizer = model.optimizer.__class__.__name__
700
- else:
701
- optimizer = None
702
-
703
- model_summary_pd = model_summary_keras(model)
704
-
705
- # insert data into model architecture dict
706
- model_architecture = {'layers_number': len(layers),
707
- 'layers_sequence': layers,
708
- 'layers_summary': {i:layers.count(i) for i in set(layers)},
709
- 'layers_n_params': layers_n_params,
710
- 'layers_shapes': layers_shapes,
711
- 'activations_sequence': activations,
712
- 'activations_summary': {i:activations.count(i) for i in set(activations)},
713
- 'loss':loss,
714
- 'optimizer': optimizer
715
- }
658
+ for layer in keras_layers:
659
+ layers.append(layer.__class__.__name__)
660
+ layers_n_params.append(layer.count_params())
661
+ layers_shapes.append(getattr(layer, 'output_shape', None))
662
+ if hasattr(layer, 'activation'):
663
+ act = getattr(layer.activation, '__name__', None)
664
+ if act: activations.append(act)
665
+
666
+ optimizer = getattr(model.optimizer, '__class__', None)
667
+ loss = getattr(model.loss, '__class__', None)
668
+
669
+ model_architecture = {
670
+ 'layers_number': len(layers),
671
+ 'layers_sequence': layers,
672
+ 'layers_summary': {i: layers.count(i) for i in set(layers)},
673
+ 'layers_n_params': layers_n_params,
674
+ 'layers_shapes': layers_shapes,
675
+ 'activations_sequence': activations,
676
+ 'activations_summary': {i: activations.count(i) for i in set(activations)},
677
+ 'loss': loss.__name__ if loss else None,
678
+ 'optimizer': optimizer.__name__ if optimizer else None
679
+ }
716
680
 
717
681
  metadata['model_architecture'] = str(model_architecture)
718
-
719
-
720
- metadata['model_summary'] = model_summary_pd.to_json()
721
-
682
+ metadata['model_summary'] = model_summary_keras(model).to_json()
722
683
  metadata['memory_size'] = model_size
723
684
 
724
- metadata['epochs'] = epochs
725
-
726
- # model graph
727
- #G = model_graph_keras(model)
728
- #metadata['model_graph'] = G.create_dot().decode('utf-8')
729
- metadata['model_graph'] = ""
730
- # placeholder, needs evaluation engine
731
- metadata['eval_metrics'] = None
732
-
733
- # add metadata from onnx object
734
- # metadata['metadata_onnx'] = str(_extract_onnx_metadata(onx, framework='keras'))
735
- metadata['metadata_onnx'] = None
736
- # add metadata dict to onnx object
737
-
738
- meta = onx.metadata_props.add()
685
+ # Embed metadata in ONNX
686
+ meta = onx_model.metadata_props.add()
739
687
  meta.key = 'model_metadata'
740
688
  meta.value = str(metadata)
741
689
 
742
- return onx
690
+ return onx_model
691
+
743
692
 
744
693
 
745
694
  def _pytorch_to_onnx(model, model_input, transfer_learning=None,
@@ -76,110 +76,87 @@ def download_layer(layer, layer_count, tmp_img_dir, blobs_resp):
76
76
  return layer_id, layer_dir
77
77
 
78
78
  def pull_image(image_uri):
79
- import os
80
- import requests
81
- import tempfile
82
- import json
83
- import shutil
84
- import tarfile
85
- from aimodelshare.data_sharing.utils import redo_with_write
86
-
87
- image_uri_parts = image_uri.split('/')
88
-
89
- registry = image_uri_parts[0]
90
- image, tag = image_uri_parts[2].split(':')
91
- repository = '/'.join([image_uri_parts[1], image])
92
-
93
- auth_url = get_auth_url(registry)
94
-
95
- # Request manifest with correct Accept header
96
- auth_head = get_auth_head(auth_url, registry, repository)
97
- manifest_url = f'https://{registry}/v2/{repository}/manifests/{tag}'
98
- resp = requests.get(manifest_url, headers=auth_head, verify=False)
99
-
100
- # --- PATCH: Handle manifest list (multi-platform images) ---
101
- if resp.headers.get('Content-Type') == 'application/vnd.docker.distribution.manifest.list.v2+json':
102
- manifest_list = resp.json()
103
-
104
- # Find the first linux/amd64 image (or fallback to first available)
105
- target_manifest = next(
106
- (m for m in manifest_list['manifests']
107
- if m['platform'].get('architecture') == 'amd64' and m['platform'].get('os') == 'linux'),
108
- manifest_list['manifests'][0]
109
- )
110
- digest = target_manifest['digest']
111
-
112
- # Get the actual image manifest now
113
- resp = requests.get(
114
- f'https://{registry}/v2/{repository}/manifests/{digest}',
115
- headers=auth_head,
116
- verify=False
117
- )
118
- # -----------------------------------------------------------
119
-
120
- manifest = resp.json()
121
-
122
- # Safely check and fail early if config key is still missing
123
- if 'config' not in manifest:
124
- raise ValueError("Manifest response missing 'config'. This image may not follow Docker V2 manifest schema.")
125
-
126
- config = manifest['config']['digest']
127
- config_resp = requests.get(f'https://{registry}/v2/{repository}/blobs/{config}', headers=auth_head, verify=False)
128
-
129
- tmp_img_dir = os.path.join(tempfile.gettempdir(), f'tmp_{image}_{tag}')
130
- os.mkdir(tmp_img_dir)
131
-
132
- with open(f'{tmp_img_dir}/{config[7:]}.json', 'wb') as file:
133
- file.write(config_resp.content)
134
-
135
- content = [{
136
- 'Config': config[7:] + '.json',
137
- 'RepoTags': [image_uri],
138
- 'Layers': []
139
- }]
140
-
141
- # Skip first 6 layers? Keep original logic for compatibility
142
- layers = manifest['layers'][6:]
143
- layer_count = 0
144
-
145
- for layer in layers:
146
- layer_count += 1
147
- auth_head = get_auth_head(auth_url, registry, repository)
148
- blobs_resp = requests.get(
149
- f'https://{registry}/v2/{repository}/blobs/{layer["digest"]}',
150
- headers=auth_head,
151
- stream=True,
152
- verify=False
153
- )
154
-
155
- layer_id, layer_dir = download_layer(layer, layer_count, tmp_img_dir, blobs_resp)
156
- content[0]['Layers'].append(layer_id + '/layer.tar')
157
-
158
- json_path = os.path.join(layer_dir, 'json')
159
- with open(json_path, 'w') as file:
160
- if layers[-1]['digest'] == layer['digest']:
161
- json_obj = json.loads(config_resp.content)
162
- json_obj.pop('history', None)
163
- json_obj.pop('rootfs', None)
164
- else:
165
- json_obj = {}
166
- json_obj['id'] = layer_id
167
- file.write(json.dumps(json_obj))
168
-
169
- with open(os.path.join(tmp_img_dir, 'manifest.json'), 'w') as f:
170
- f.write(json.dumps(content))
171
-
172
- repo_dict = {'/'.join(image_uri_parts[:-1]) + '/' + image: {tag: layer_id}}
173
- with open(os.path.join(tmp_img_dir, 'repositories'), 'w') as f:
174
- f.write(json.dumps(repo_dict))
175
-
176
- # Create tar archive from temp image directory
177
- docker_tar = os.path.join(tempfile.gettempdir(), f'{repository.replace("/", "_")}_{tag}.tar')
178
- with tarfile.open(docker_tar, "w") as tar:
179
- tar.add(tmp_img_dir, arcname=os.path.sep)
180
-
181
- shutil.rmtree(tmp_img_dir, onerror=redo_with_write)
182
- return docker_tar
79
+
80
+ image_uri_parts = image_uri.split('/')
81
+
82
+ registry = image_uri_parts[0]
83
+ image, tag = image_uri_parts[2].split(':')
84
+ repository = '/'.join([image_uri_parts[1], image])
85
+
86
+ auth_url = get_auth_url(registry)
87
+
88
+ auth_head = get_auth_head(auth_url, registry, repository)
89
+
90
+ resp = requests.get('https://{}/v2/{}/manifests/{}'.format(registry, repository, tag), headers=auth_head, verify=False)
91
+
92
+ config = resp.json()['config']['digest']
93
+ config_resp = requests.get('https://{}/v2/{}/blobs/{}'.format(registry, repository, config), headers=auth_head, verify=False)
94
+
95
+ tmp_img_dir = tempfile.gettempdir() + '/' + 'tmp_{}_{}'.format(image, tag)
96
+ os.mkdir(tmp_img_dir)
97
+
98
+ file = open('{}/{}.json'.format(tmp_img_dir, config[7:]), 'wb')
99
+ file.write(config_resp.content)
100
+ file.close()
101
+
102
+ content = [{
103
+ 'Config': config[7:] + '.json',
104
+ 'RepoTags': [],
105
+ 'Layers': []
106
+ }]
107
+ content[0]['RepoTags'].append(image_uri)
108
+
109
+ layer_count=0
110
+ layers = resp.json()['layers'][6:]
111
+
112
+ for layer in layers:
113
+
114
+ layer_count += 1
115
+
116
+ auth_head = get_auth_head(auth_url, registry, repository) # done to keep from expiring
117
+ blobs_resp = requests.get('https://{}/v2/{}/blobs/{}'.format(registry, repository, layer['digest']), headers=auth_head, stream=True, verify=False)
118
+
119
+ layer_id, layer_dir = download_layer(layer, layer_count, tmp_img_dir, blobs_resp)
120
+ content[0]['Layers'].append(layer_id + '/layer.tar')
121
+
122
+ # Creating json file
123
+ file = open(layer_dir + '/json', 'w')
124
+
125
+ # last layer = config manifest - history - rootfs
126
+ if layers[-1]['digest'] == layer['digest']:
127
+ json_obj = json.loads(config_resp.content)
128
+ del json_obj['history']
129
+ del json_obj['rootfs']
130
+ else: # other layers json are empty
131
+ json_obj = json.loads('{}')
132
+
133
+ json_obj['id'] = layer_id
134
+ file.write(json.dumps(json_obj))
135
+ file.close()
136
+
137
+ file = open(tmp_img_dir + '/manifest.json', 'w')
138
+ file.write(json.dumps(content))
139
+ file.close()
140
+
141
+ content = {
142
+ '/'.join(image_uri_parts[:-1]) + '/' + image : { tag : layer_id }
143
+ }
144
+
145
+ file = open(tmp_img_dir + '/repositories', 'w')
146
+ file.write(json.dumps(content))
147
+ file.close()
148
+
149
+ # Create image tar and clean tmp folder
150
+ docker_tar = tempfile.gettempdir() + '/' + '_'.join([repository.replace('/', '_'), tag]) + '.tar'
151
+ sys.stdout.flush()
152
+
153
+ tar = tarfile.open(docker_tar, "w")
154
+ tar.add(tmp_img_dir, arcname=os.path.sep)
155
+ tar.close()
156
+
157
+ shutil.rmtree(tmp_img_dir, onerror=redo_with_write)
158
+
159
+ return docker_tar
183
160
 
184
161
 
185
162
  def extract_data_from_image(image_name, file_name, location):
@@ -219,10 +196,11 @@ def import_quickstart_data(tutorial, section="modelplayground"):
219
196
 
220
197
  #Download Quick Start materials
221
198
  if all([tutorial == "flowers", section == "modelplayground"]):
222
- quickstart_repository = "public.ecr.aws/y2e2a1d6/quickstart_materials-repository:latest"
199
+ quickstart_repository = "public.ecr.aws/z5w0c9e9/quickstart_materials-repository:latest"
223
200
  existing_folder = 'flower_competition_data'
201
+
224
202
  if all([tutorial == "flowers", section == "competition"]):
225
- quickstart_repository = "public.ecr.aws/y2e2a1d6/quickstart_flowers_competition-repository:latest"
203
+ quickstart_repository = "public.ecr.aws/z5w0c9e9/quickstart_flowers_competition-repository:latest"
226
204
  existing_folder = 'flower_competition_data'
227
205
 
228
206
  if all([tutorial == "mnist", section == "modelplayground"]):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aimodelshare
3
- Version: 0.1.29
3
+ Version: 0.1.31
4
4
  Summary: Deploy locally saved machine learning models to a live rest API and web-dashboard. Share it with the world via modelshare.org
5
5
  Home-page: https://www.modelshare.org
6
6
  Author: Michael Parrott
@@ -6,7 +6,7 @@ with open("README.md", "r") as fh:
6
6
 
7
7
  setuptools.setup(
8
8
  name='aimodelshare', #TODO:update
9
- version='0.1.29', #TODO:update
9
+ version='0.1.31', #TODO:update
10
10
  author="Michael Parrott",
11
11
  author_email="mikedparrott@modelshare.org",
12
12
  description="Deploy locally saved machine learning models to a live rest API and web-dashboard. Share it with the world via modelshare.org",
File without changes
File without changes
File without changes
File without changes