deriva-ml 1.14.0__py3-none-any.whl → 1.14.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. deriva_ml/__init__.py +25 -30
  2. deriva_ml/core/__init__.py +39 -0
  3. deriva_ml/core/base.py +1489 -0
  4. deriva_ml/core/constants.py +36 -0
  5. deriva_ml/core/definitions.py +74 -0
  6. deriva_ml/core/enums.py +222 -0
  7. deriva_ml/core/ermrest.py +288 -0
  8. deriva_ml/core/exceptions.py +28 -0
  9. deriva_ml/core/filespec.py +116 -0
  10. deriva_ml/dataset/__init__.py +4 -0
  11. deriva_ml/{dataset_aux_classes.py → dataset/aux_classes.py} +16 -12
  12. deriva_ml/{dataset.py → dataset/dataset.py} +406 -428
  13. deriva_ml/{dataset_bag.py → dataset/dataset_bag.py} +137 -97
  14. deriva_ml/{history.py → dataset/history.py} +51 -33
  15. deriva_ml/{upload.py → dataset/upload.py} +48 -70
  16. deriva_ml/demo_catalog.py +233 -183
  17. deriva_ml/execution/environment.py +290 -0
  18. deriva_ml/{execution.py → execution/execution.py} +365 -252
  19. deriva_ml/execution/execution_configuration.py +163 -0
  20. deriva_ml/{execution_configuration.py → execution/workflow.py} +212 -224
  21. deriva_ml/feature.py +83 -46
  22. deriva_ml/model/__init__.py +0 -0
  23. deriva_ml/{deriva_model.py → model/catalog.py} +113 -132
  24. deriva_ml/{database_model.py → model/database.py} +52 -74
  25. deriva_ml/model/sql_mapper.py +44 -0
  26. deriva_ml/run_notebook.py +19 -11
  27. deriva_ml/schema/__init__.py +3 -0
  28. deriva_ml/{schema_setup → schema}/annotations.py +31 -22
  29. deriva_ml/schema/check_schema.py +104 -0
  30. deriva_ml/{schema_setup → schema}/create_schema.py +151 -104
  31. deriva_ml/schema/deriva-ml-reference.json +8525 -0
  32. deriva_ml/schema/table_comments_utils.py +57 -0
  33. {deriva_ml-1.14.0.dist-info → deriva_ml-1.14.27.dist-info}/METADATA +5 -4
  34. deriva_ml-1.14.27.dist-info/RECORD +40 -0
  35. {deriva_ml-1.14.0.dist-info → deriva_ml-1.14.27.dist-info}/entry_points.txt +1 -0
  36. deriva_ml/deriva_definitions.py +0 -391
  37. deriva_ml/deriva_ml_base.py +0 -1046
  38. deriva_ml/execution_environment.py +0 -139
  39. deriva_ml/schema_setup/table_comments_utils.py +0 -56
  40. deriva_ml/test-files/execution-parameters.json +0 -1
  41. deriva_ml/test-files/notebook-parameters.json +0 -5
  42. deriva_ml/test_functions.py +0 -141
  43. deriva_ml/test_notebook.ipynb +0 -197
  44. deriva_ml-1.14.0.dist-info/RECORD +0 -31
  45. /deriva_ml/{schema_setup → execution}/__init__.py +0 -0
  46. /deriva_ml/{schema_setup → schema}/policy.json +0 -0
  47. {deriva_ml-1.14.0.dist-info → deriva_ml-1.14.27.dist-info}/WHEEL +0 -0
  48. {deriva_ml-1.14.0.dist-info → deriva_ml-1.14.27.dist-info}/licenses/LICENSE +0 -0
  49. {deriva_ml-1.14.0.dist-info → deriva_ml-1.14.27.dist-info}/top_level.txt +0 -0
@@ -1,139 +0,0 @@
1
- """ Module that captures details of execution environment for use as execution metadata"""
2
-
3
- import locale
4
- import os
5
- import platform
6
- import sys
7
-
8
- import site
9
- import importlib
10
-
11
-
12
- def get_execution_environment() -> dict:
13
- return dict(
14
- imports=get_loaded_modules(),
15
- os=get_os_info(),
16
- sys=get_sys_info(),
17
- sys_path=sys.path,
18
- site=get_site_info(),
19
- platform=get_platform_info(),
20
- )
21
-
22
-
23
- def get_loaded_modules():
24
- return {
25
- dist.metadata["Name"]: dist.version
26
- for dist in importlib.metadata.distributions()
27
- }
28
-
29
-
30
- def get_site_info():
31
- return {
32
- attr: getattr(site, attr)
33
- for attr in ["PREFIXES", "ENABLE_USER_SITE", "USER_SITE", "USER_BASE"]
34
- }
35
-
36
-
37
- def get_platform_info():
38
- """
39
- Returns all available attributes from the platform module.
40
- """
41
- attributes = [
42
- attr
43
- for attr in dir(platform)
44
- if (not attr.startswith("_")) and callable(getattr(platform, attr))
45
- ]
46
- platform_info = {}
47
- for attr in attributes:
48
- try:
49
- platform_info[attr] = getattr(platform, attr)()
50
- except Exception:
51
- # Not all attributes are available on all platforms.
52
- continue
53
- return platform_info
54
-
55
-
56
- def get_os_info():
57
- values = {}
58
- for func in [
59
- "cwd",
60
- "egid",
61
- "euid",
62
- "gid",
63
- "groups",
64
- "login",
65
- "pgrp",
66
- "uid",
67
- ]:
68
- try:
69
- values[func] = getattr(os, "get" + func)()
70
- except (OSError, AttributeError):
71
- pass
72
- values["umask"] = oct(get_umask())
73
- values["name"] = os.name
74
- values["environ"] = {e: v for e, v in os.environ.items()}
75
- return values
76
-
77
-
78
- def get_umask():
79
- # https://stackoverflow.com/questions/53227072/reading-umask-thread-safe
80
- current_value = os.umask(0)
81
- os.umask(current_value)
82
- return current_value
83
-
84
-
85
- def get_sys_info():
86
- values = {}
87
- for attr in [
88
- "argv",
89
- "byteorder",
90
- "exec_prefix",
91
- "executable",
92
- "flags",
93
- "float_info",
94
- # "maxint",
95
- "maxsize",
96
- "maxunicode",
97
- # "meta_path",
98
- ]:
99
- values[attr] = getattr(sys, attr)
100
- for func in [
101
- "getdefaultencoding",
102
- "getfilesystemencoding",
103
- "getrecursionlimit",
104
- ]:
105
- try:
106
- values[func] = getattr(sys, func)()
107
- except (OSError, AttributeError) as exc:
108
- values[func] = exc
109
- return values
110
-
111
-
112
- def localeconv():
113
- values = []
114
- for key, value in sorted(locale.localeconv().items()):
115
- if isinstance(value, bytes):
116
- value = value.decode("ascii", errors="replace")
117
- if key == "currency_symbol":
118
- value = repr(value)
119
- values.append("%s: %s" % (key, value))
120
- return values
121
-
122
-
123
- def locale_module():
124
- values = []
125
- values.append("getdefaultlocale(): {}".format(locale.getdefaultlocale()))
126
- for category in [
127
- "LC_CTYPE",
128
- "LC_COLLATE",
129
- "LC_TIME",
130
- "LC_MONETARY",
131
- "LC_MESSAGES",
132
- "LC_NUMERIC",
133
- ]:
134
- values.append(
135
- "getlocale(locale.{}): {}".format(
136
- category, locale.getlocale(getattr(locale, category))
137
- )
138
- )
139
- return values
@@ -1,56 +0,0 @@
1
- import sys
2
- from deriva.core import ErmrestCatalog, get_credential
3
- import argparse
4
- import os
5
- from pathlib import Path
6
-
7
-
8
- def update_table_comments(model, schema_name: str, table_name: str, comments_dir: str) -> None:
9
- table = model.schemas[schema_name].tables[table_name]
10
- table_comments_dir = Path(comments_dir)/Path(f"{schema_name}/{table_name}")
11
- for file in os.listdir(table_comments_dir):
12
- file_path = os.path.join(table_comments_dir, file)
13
- with open(file_path, "r") as f:
14
- comment_str = f.read()
15
- if file.split(".")[0] == table_name:
16
- table.comment = comment_str
17
- else:
18
- table.columns[file.split(".")[0]].comment = comment_str
19
-
20
-
21
- def update_schema_comments(model, schema_name: str, comments_dir: str) -> None:
22
- schema_comments_dir = Path(comments_dir)/Path(schema_name)
23
- for table in os.listdir(schema_comments_dir):
24
- if not table.endswith(".DS_Store"):
25
- update_table_comments(model, schema_name, table, comments_dir)
26
-
27
-
28
- def main():
29
- scheme = 'https'
30
- parser = argparse.ArgumentParser()
31
- parser.add_argument('--hostname', type=str, required=True)
32
- parser.add_argument('--schema_name', type=str, required=True)
33
- parser.add_argument('--catalog_id', type=str, required=True)
34
- parser.add_argument('--comments_dir', type=str, required=True,
35
- help="The directory containing the comments files for the whole catalog")
36
- parser.add_argument('--table_name', type=str,
37
- help="Only update the comments for one table")
38
- args = parser.parse_args()
39
-
40
- credentials = get_credential(args.hostname)
41
- catalog = ErmrestCatalog(scheme, args.hostname, args.catalog_id, credentials)
42
- model = catalog.getCatalogModel()
43
- if args.table_name:
44
- update_table_comments(model, args.schema_name, args.table_name, args.comments_dir)
45
- model.apply()
46
- else:
47
- update_schema_comments(model, args.schema_name, args.comments_dir)
48
- model.apply()
49
-
50
-
51
- if __name__ == '__main__':
52
- sys.exit(main())
53
-
54
-
55
-
56
- # docs/<schema-name>/<table-name>/[table|<column-name>.Md
@@ -1 +0,0 @@
1
- {"local-file": "My local file.txt"}
@@ -1,5 +0,0 @@
1
- {
2
- "assets": ["2-7J8M"],
3
- "datasets": ["2-7K8W"],
4
- "parameters": "test-files/execution-parameters.json"
5
- }
@@ -1,141 +0,0 @@
1
- host = "dev.eye-ai.org"
2
- catalog_id = "eye-ai"
3
-
4
- # source_dataset = '2-7K8W'
5
- source_dataset = "3R6"
6
- create_catalog = False
7
- from deriva_ml.demo_catalog import create_demo_catalog, DemoML
8
- from deriva_ml import (
9
- Workflow,
10
- ExecutionConfiguration,
11
- MLVocab as vc,
12
- DerivaML,
13
- DatasetSpec,
14
- )
15
-
16
-
17
- def setup_demo_ml():
18
- host = "dev.eye-ai.org"
19
- test_catalog = create_demo_catalog(
20
- host, "test-schema", create_features=True, create_datasets=True
21
- )
22
- ml_instance = DemoML(host, test_catalog.catalog_id)
23
- return ml_instance
24
-
25
-
26
- def setup_dev():
27
- host = "dev.eye-ai.org"
28
- source_dataset = "2-277M"
29
- ml_instance = DerivaML(host, catalog_id="eye-ai")
30
- preds_workflow = Workflow(
31
- name="LAC data template",
32
- url="https://github.com/informatics-isi-edu/eye-ai-exec/blob/main/notebooks/templates/template_lac.ipynb",
33
- workflow_type="Test Workflow",
34
- )
35
- config = ExecutionConfiguration(
36
- datasets=[
37
- {
38
- "rid": source_dataset,
39
- "materialize": False,
40
- "version": ml_instance.dataset_version(source_dataset),
41
- }
42
- ],
43
- assets=["2-C8JM"],
44
- workflow=preds_workflow,
45
- description="Instance of linking VGG19 predictions to patient-level data",
46
- )
47
- return ml_instance, config
48
-
49
- # Configuration instance.
50
- config = ExecutionConfiguration(
51
- datasets=huy_datasets,
52
- # Materialize set to False if you only need the metadata from the bag, and not the assets.
53
- assets=["2-4JR6"],
54
- workflow=test_workflow,
55
- description="Template instance of a feature creation workflow",
56
- )
57
- return config
58
-
59
-
60
- def create_demo_ml():
61
- host = "dev.eye-ai.org"
62
- test_catalog = create_demo_catalog(
63
- host,
64
- "test-schema",
65
- create_features=True,
66
- create_datasets=True,
67
- )
68
- return DemoML(host, test_catalog.catalog_id)
69
-
70
-
71
- def execution_test(ml_instance):
72
- training_dataset_rid = [
73
- ds["RID"]
74
- for ds in ml_instance.find_datasets()
75
- if "Training" in ds["Dataset_Type"]
76
- ][0]
77
- testing_dataset_rid = [
78
- ds["RID"]
79
- for ds in ml_instance.find_datasets()
80
- if "Testing" in ds["Dataset_Type"]
81
- ][0]
82
-
83
- nested_dataset_rid = [
84
- ds["RID"]
85
- for ds in ml_instance.find_datasets()
86
- if "Partitioned" in ds["Dataset_Type"]
87
- ][0]
88
-
89
- ml_instance.add_term(
90
- vc.workflow_type, "Manual Workflow", description="Initial setup of Model File"
91
- )
92
- ml_instance.add_term(
93
- vc.execution_asset_type, "API_Model", description="Model for our API workflow"
94
- )
95
- ml_instance.add_term(
96
- vc.workflow_type, "ML Demo", description="A ML Workflow that uses Deriva ML API"
97
- )
98
-
99
- api_workflow = ml_instance.add_workflow(Workflow(
100
- name="Manual Workflow",
101
- url="https://github.com/informatics-isi-edu/deriva-ml/blob/main/docs/Notebooks/DerivaML%20Execution.ipynb",
102
- workflow_type="Manual Workflow",
103
- description="A manual operation",
104
- ))
105
-
106
- manual_execution = ml_instance.create_execution(
107
- ExecutionConfiguration(description="Sample Execution", workflow=api_workflow)
108
- )
109
-
110
- # Now lets create model configuration for our program.
111
- model_file = manual_execution.execution_asset_path("API_Model") / "modelfile.txt"
112
- with open(model_file, "w") as fp:
113
- fp.write("My model")
114
-
115
- # Now upload the file and retrieve the RID of the new asset from the returned results.
116
- uploaded_assets = manual_execution.upload_execution_outputs()
117
-
118
- training_model_rid = uploaded_assets["API_Model/modelfile.txt"].result["RID"]
119
- api_workflow = Workflow(
120
- name="ML Demo",
121
- url="https://github.com/informatics-isi-edu/deriva-ml/blob/main/pyproject.toml",
122
- workflow_type="ML Demo",
123
- description="A workflow that uses Deriva ML",
124
- )
125
-
126
- config = ExecutionConfiguration(
127
- datasets=[
128
- DatasetSpec(
129
- rid=nested_dataset_rid,
130
- version=ml_instance.dataset_version(nested_dataset_rid),
131
- ),
132
- DatasetSpec(
133
- rid=testing_dataset_rid,
134
- version=ml_instance.dataset_version(testing_dataset_rid),
135
- ),
136
- ],
137
- assets=[training_model_rid],
138
- description="Sample Execution",
139
- workflow=api_workflow,
140
- )
141
- return config
@@ -1,197 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "id": "0",
6
- "metadata": {
7
- "ExecuteTime": {
8
- "end_time": "2025-04-18T22:52:49.930351Z",
9
- "start_time": "2025-04-18T22:52:48.926842Z"
10
- }
11
- },
12
- "source": [
13
- "import builtins\n",
14
- "import os\n",
15
- "\n",
16
- "from deriva.core.utils.globus_auth_utils import GlobusNativeLogin\n",
17
- "from deriva_ml import ExecutionConfiguration, MLVocab, DerivaML, DatasetSpec"
18
- ],
19
- "outputs": [],
20
- "execution_count": 1
21
- },
22
- {
23
- "cell_type": "code",
24
- "id": "1",
25
- "metadata": {
26
- "tags": [
27
- "parameters"
28
- ],
29
- "ExecuteTime": {
30
- "end_time": "2025-04-18T22:52:49.988873Z",
31
- "start_time": "2025-04-18T22:52:49.986713Z"
32
- }
33
- },
34
- "source": [
35
- "foo: int = 1\n",
36
- "assets = []\n",
37
- "datasets = []\n",
38
- "parameters = None"
39
- ],
40
- "outputs": [],
41
- "execution_count": 2
42
- },
43
- {
44
- "metadata": {
45
- "ExecuteTime": {
46
- "end_time": "2025-04-18T22:52:50.002808Z",
47
- "start_time": "2025-04-18T22:52:49.999450Z"
48
- }
49
- },
50
- "cell_type": "code",
51
- "source": [
52
- "print(\"foo\", foo)\n",
53
- "print(\"assets\", assets)\n",
54
- "print(\"datasets\", datasets)\n",
55
- "print(\"parameters\", parameters)"
56
- ],
57
- "id": "70b23cdd933ce669",
58
- "outputs": [
59
- {
60
- "name": "stdout",
61
- "output_type": "stream",
62
- "text": [
63
- "foo 1\n",
64
- "assets []\n",
65
- "datasets []\n",
66
- "parameters None\n"
67
- ]
68
- }
69
- ],
70
- "execution_count": 3
71
- },
72
- {
73
- "metadata": {
74
- "ExecuteTime": {
75
- "end_time": "2025-04-18T22:52:50.344660Z",
76
- "start_time": "2025-04-18T22:52:50.013816Z"
77
- }
78
- },
79
- "cell_type": "code",
80
- "source": [
81
- "hostname = os.environ.get(\"DERIVA_HOST\") #or \"dev.eye-ai.org\"\n",
82
- "catalog_id = os.environ.get(\"DERIVA_CATALOG_ID\") #or 'eye-ai'\n",
83
- "\n",
84
- "gnl = GlobusNativeLogin(host=hostname)\n",
85
- "if gnl.is_logged_in([hostname]):\n",
86
- " print(\"You are already logged in.\")\n",
87
- "else:\n",
88
- " gnl.login([hostname], no_local_server=True, no_browser=True, refresh_tokens=True, update_bdbag_keychain=True)\n",
89
- " print(\"Login Successful\")\n"
90
- ],
91
- "id": "2",
92
- "outputs": [
93
- {
94
- "ename": "AttributeError",
95
- "evalue": "'NoneType' object has no attribute 'lower'",
96
- "output_type": "error",
97
- "traceback": [
98
- "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
99
- "\u001B[0;31mAttributeError\u001B[0m Traceback (most recent call last)",
100
- "Cell \u001B[0;32mIn[4], line 5\u001B[0m\n\u001B[1;32m 2\u001B[0m catalog_id \u001B[38;5;241m=\u001B[39m os\u001B[38;5;241m.\u001B[39menviron\u001B[38;5;241m.\u001B[39mget(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mDERIVA_CATALOG_ID\u001B[39m\u001B[38;5;124m\"\u001B[39m) \u001B[38;5;66;03m#or 'eye-ai'\u001B[39;00m\n\u001B[1;32m 4\u001B[0m gnl \u001B[38;5;241m=\u001B[39m GlobusNativeLogin(host\u001B[38;5;241m=\u001B[39mhostname)\n\u001B[0;32m----> 5\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[43mgnl\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mis_logged_in\u001B[49m\u001B[43m(\u001B[49m\u001B[43m[\u001B[49m\u001B[43mhostname\u001B[49m\u001B[43m]\u001B[49m\u001B[43m)\u001B[49m:\n\u001B[1;32m 6\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mYou are already logged in.\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[1;32m 7\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n",
101
- "File \u001B[0;32m~/opt/anaconda3/envs/deriva-test/lib/python3.10/site-packages/deriva/core/utils/globus_auth_utils.py:582\u001B[0m, in \u001B[0;36mGlobusNativeLogin.is_logged_in\u001B[0;34m(self, hosts, requested_scopes, hosts_to_scope_map, exclude_defaults)\u001B[0m\n\u001B[1;32m 576\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21mis_logged_in\u001B[39m(\u001B[38;5;28mself\u001B[39m,\n\u001B[1;32m 577\u001B[0m hosts\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 578\u001B[0m requested_scopes\u001B[38;5;241m=\u001B[39m(),\n\u001B[1;32m 579\u001B[0m hosts_to_scope_map\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 580\u001B[0m exclude_defaults\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mFalse\u001B[39;00m):\n\u001B[1;32m 581\u001B[0m scopes \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mset\u001B[39m(requested_scopes)\n\u001B[0;32m--> 582\u001B[0m scope_map \u001B[38;5;241m=\u001B[39m hosts_to_scope_map \u001B[38;5;28;01mif\u001B[39;00m hosts_to_scope_map \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mhosts_to_scope_map\u001B[49m\u001B[43m(\u001B[49m\u001B[43mhosts\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;129;43;01mor\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mhosts\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 583\u001B[0m scopes\u001B[38;5;241m.\u001B[39mupdate(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mscope_set_from_scope_map(scope_map))\n\u001B[1;32m 584\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m exclude_defaults:\n",
102
- "File \u001B[0;32m~/opt/anaconda3/envs/deriva-test/lib/python3.10/site-packages/deriva/core/utils/globus_auth_utils.py:607\u001B[0m, in \u001B[0;36mGlobusNativeLogin.hosts_to_scope_map\u001B[0;34m(self, hosts, match_scope_tag, all_tagged_scopes, force_refresh, warn_on_discovery_failure)\u001B[0m\n\u001B[1;32m 605\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m host \u001B[38;5;129;01min\u001B[39;00m hosts:\n\u001B[1;32m 606\u001B[0m scope_map\u001B[38;5;241m.\u001B[39mupdate({host: []})\n\u001B[0;32m--> 607\u001B[0m scopes \u001B[38;5;241m=\u001B[39m \u001B[43mget_oauth_scopes_for_host\u001B[49m\u001B[43m(\u001B[49m\u001B[43mhost\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 608\u001B[0m \u001B[43m \u001B[49m\u001B[43mconfig_file\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconfig_file\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 609\u001B[0m \u001B[43m \u001B[49m\u001B[43mforce_refresh\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mforce_refresh\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 610\u001B[0m \u001B[43m \u001B[49m\u001B[43mwarn_on_discovery_failure\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mwarn_on_discovery_failure\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 611\u001B[0m scope_list \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mlist\u001B[39m()\n\u001B[1;32m 612\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m scopes:\n",
103
- "File \u001B[0;32m~/opt/anaconda3/envs/deriva-test/lib/python3.10/site-packages/deriva/core/utils/core_utils.py:300\u001B[0m, in \u001B[0;36mget_oauth_scopes_for_host\u001B[0;34m(host, config_file, force_refresh, warn_on_discovery_failure)\u001B[0m\n\u001B[1;32m 298\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m required_scopes:\n\u001B[1;32m 299\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m hostname, scopes \u001B[38;5;129;01min\u001B[39;00m required_scopes\u001B[38;5;241m.\u001B[39mitems():\n\u001B[0;32m--> 300\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[43mhost\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mlower\u001B[49m() \u001B[38;5;241m==\u001B[39m hostname\u001B[38;5;241m.\u001B[39mlower():\n\u001B[1;32m 301\u001B[0m result \u001B[38;5;241m=\u001B[39m scopes\n\u001B[1;32m 302\u001B[0m \u001B[38;5;28;01mbreak\u001B[39;00m\n",
104
- "\u001B[0;31mAttributeError\u001B[0m: 'NoneType' object has no attribute 'lower'"
105
- ]
106
- }
107
- ],
108
- "execution_count": 4
109
- },
110
- {
111
- "cell_type": "code",
112
- "id": "3",
113
- "metadata": {},
114
- "source": [
115
- "ml_instance = DerivaML(hostname, catalog_id)\n",
116
- "\n",
117
- "ml_instance.add_term(MLVocab.workflow_type, \"Manual Workflow\", description=\"Initial setup of Model File\")\n",
118
- "ml_instance.add_term(MLVocab.asset_type, \"API_Model\", description=\"Model for our API workflow\")"
119
- ],
120
- "outputs": [],
121
- "execution_count": null
122
- },
123
- {
124
- "metadata": {},
125
- "cell_type": "code",
126
- "source": [
127
- "api_workflow = ml_instance.create_workflow(\n",
128
- " name=\"Manual Workflow\",\n",
129
- " workflow_type=\"Manual Workflow\",\n",
130
- " description=\"A manual operation\"\n",
131
- ")"
132
- ],
133
- "id": "5",
134
- "outputs": [],
135
- "execution_count": null
136
- },
137
- {
138
- "cell_type": "code",
139
- "id": "6",
140
- "metadata": {},
141
- "source": [
142
- "manual_execution = ml_instance.create_execution(\n",
143
- " ExecutionConfiguration(\n",
144
- " description=\"Sample Execution\",\n",
145
- " workflow=api_workflow,\n",
146
- " datasets=[DatasetSpec(rid=ds, version=ml_instance.dataset_version(ds)) for ds in datasets],\n",
147
- " assets=assets,\n",
148
- " parameters=parameters\n",
149
- " )\n",
150
- ")"
151
- ],
152
- "outputs": [],
153
- "execution_count": null
154
- },
155
- {
156
- "metadata": {},
157
- "cell_type": "code",
158
- "source": [
159
- "print(f'parameters: {manual_execution.parameters}')\n",
160
- "print(f'datasets: {manual_execution.datasets}')\n",
161
- "print(f'assets: {manual_execution.asset_paths}')"
162
- ],
163
- "id": "4b2a3b8c16333645",
164
- "outputs": [],
165
- "execution_count": null
166
- },
167
- {
168
- "metadata": {},
169
- "cell_type": "code",
170
- "source": "manual_execution.upload_execution_outputs()",
171
- "id": "efa8cb1b0ed438bb",
172
- "outputs": [],
173
- "execution_count": null
174
- }
175
- ],
176
- "metadata": {
177
- "kernelspec": {
178
- "display_name": "deriva-test",
179
- "language": "python",
180
- "name": "deriva-test"
181
- },
182
- "language_info": {
183
- "codemirror_mode": {
184
- "name": "ipython",
185
- "version": 2
186
- },
187
- "file_extension": ".py",
188
- "mimetype": "text/x-python",
189
- "name": "python",
190
- "nbconvert_exporter": "python",
191
- "pygments_lexer": "ipython2",
192
- "version": "2.7.6"
193
- }
194
- },
195
- "nbformat": 4,
196
- "nbformat_minor": 5
197
- }
@@ -1,31 +0,0 @@
1
- deriva_ml/__init__.py,sha256=GfneBq7xDphMqUQY96sW9ixRj74M3UTUCmD4KMIRSaM,1101
2
- deriva_ml/database_model.py,sha256=D5vY0HqsCzWzbQVuK5jsImeEfjgg9wMe6fjDz8ucZqc,14695
3
- deriva_ml/dataset.py,sha256=NVyIRzy33QHnYvi5fZoxEIuUf-F2bHDCdIb6Zzchdpk,60649
4
- deriva_ml/dataset_aux_classes.py,sha256=IyPAbZUoVWjnm4hcpLet2mkJvKHRGN__IEwcAIrH-u4,6597
5
- deriva_ml/dataset_bag.py,sha256=yS8oYVshfFtRDyhGPRqtbvxjyd3ZFF29lrB783OP4vM,11849
6
- deriva_ml/demo_catalog.py,sha256=NBBsnF955jGpsGTgYFjeBn2580MjRuzC1MqqdrkaHAU,12081
7
- deriva_ml/deriva_definitions.py,sha256=LhreMYY_1a_MgpmmtG_ABVzK6psbfbiUcYf47KxNIGc,11309
8
- deriva_ml/deriva_ml_base.py,sha256=FQirlTQvOUs25G0WGxnlVj3uF3PQbMX96nNj9s9arTI,38565
9
- deriva_ml/deriva_model.py,sha256=yMuJ3C8-anl1qf7lFBkiswVMWcheI_TUV5G_GvlzVCk,13565
10
- deriva_ml/execution.py,sha256=qcxqkF0gxmPfwLQb-ooyagFawgO9sK4CQfzJOfBmMp4,38330
11
- deriva_ml/execution_configuration.py,sha256=7fiIbtzz9nmkxA9-GTiN6Ln2twfaOLivwJwGZb8gAL0,14163
12
- deriva_ml/execution_environment.py,sha256=bCRKrCELDbGQDo7_FKfw7e8iMzVjSRZK3baKkqH5-_0,3264
13
- deriva_ml/feature.py,sha256=07g0uSrhumdopJluWuWSRMrzagaikAOihqB09bzXBP4,5475
14
- deriva_ml/history.py,sha256=qP1fSAYUrMHdssT_0u2hzfgwb3X8-ZhI89q6xnVxTvI,2893
15
- deriva_ml/run_notebook.py,sha256=vhmij4P1Va52MIj8hOc-WmjLRp3sTmK6p7LXCWrzejc,6308
16
- deriva_ml/test_functions.py,sha256=-eqLHjjCQCLBNAr1ofbZekNiCOfMISSACRxT_YHER8I,4396
17
- deriva_ml/test_notebook.ipynb,sha256=_5D6rkSGbmENPJZbDgfZ6-yt94BNEwxytVUDmG3RE3w,10166
18
- deriva_ml/upload.py,sha256=VgCb0fUfm_siOW1p3g1MwenFIL8dpHzZHrVhzZGbHb4,16113
19
- deriva_ml/schema_setup/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- deriva_ml/schema_setup/annotations.py,sha256=0lGFa7PLwlFXXps26OYQm1vuWdsfCtbL2v2FqjyehC8,17739
21
- deriva_ml/schema_setup/create_schema.py,sha256=PTbUL00A-IwuMqADFEHvGTeRgU9UyBmQlw94EGseHhE,10305
22
- deriva_ml/schema_setup/policy.json,sha256=5ykB8nnZFl-oCHzlAwppCFKJHWJFIkYognUMVEanfY8,1826
23
- deriva_ml/schema_setup/table_comments_utils.py,sha256=-2_ubEpoH7ViLVb-ZfW9wZbQ26DTKNgjkCABMzGu4i4,2140
24
- deriva_ml/test-files/execution-parameters.json,sha256=1vBqXlaMa0cysonE20TweVDfTGRdSi9CUuAkW1xiYNo,36
25
- deriva_ml/test-files/notebook-parameters.json,sha256=7uEE2sLQSrSc9cEGQ_RKE7t5dwkEYv0qLo5mRbzo8Og,108
26
- deriva_ml-1.14.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
27
- deriva_ml-1.14.0.dist-info/METADATA,sha256=DAIaEfvbsypLwhnNYZoYFnKi3jNln1je4n5KR4fiwvY,999
28
- deriva_ml-1.14.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
29
- deriva_ml-1.14.0.dist-info/entry_points.txt,sha256=cJnALMa6pjdk6RQCt4HFbKHqALpVa0k6wPeQDPedLJI,295
30
- deriva_ml-1.14.0.dist-info/top_level.txt,sha256=I1Q1dkH96cRghdsFRVqwpa2M7IqJpR2QPUNNc5-Bnpw,10
31
- deriva_ml-1.14.0.dist-info/RECORD,,
File without changes
File without changes