datamint 2.0.2__tar.gz → 2.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of datamint might be problematic. Click here for more details.
- {datamint-2.0.2 → datamint-2.1.1}/PKG-INFO +2 -2
- datamint-2.1.1/datamint/client_cmd_tools/datamint_config.py +436 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/dataset/base_dataset.py +25 -26
- {datamint-2.0.2 → datamint-2.1.1}/datamint/dataset/dataset.py +63 -38
- {datamint-2.0.2 → datamint-2.1.1}/pyproject.toml +2 -2
- datamint-2.0.2/datamint/client_cmd_tools/datamint_config.py +0 -180
- {datamint-2.0.2 → datamint-2.1.1}/README.md +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/base_api.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/client.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/dto/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/endpoints/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/endpoints/annotations_api.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/endpoints/channels_api.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/endpoints/datasetsinfo_api.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/endpoints/projects_api.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/endpoints/resources_api.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/endpoints/users_api.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/api/entity_base_api.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/apihandler/annotation_api_handler.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/apihandler/api_handler.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/apihandler/base_api_handler.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/apihandler/dto/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/apihandler/dto/annotation_dto.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/apihandler/exp_api_handler.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/apihandler/root_api_handler.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/client_cmd_tools/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/client_cmd_tools/datamint_upload.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/configs.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/dataset/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/dataset/annotation.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/entities/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/entities/annotation.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/entities/base_entity.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/entities/channel.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/entities/datasetinfo.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/entities/project.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/entities/resource.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/entities/user.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/examples/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/examples/example_projects.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/exceptions.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/experiment/__init__.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/experiment/_patcher.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/experiment/experiment.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/logging.yaml +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/utils/logging_utils.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/utils/torchmetrics.py +0 -0
- {datamint-2.0.2 → datamint-2.1.1}/datamint/utils/visualization.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: datamint
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.1.1
|
|
4
4
|
Summary: A library for interacting with the Datamint API, designed for efficient data management, processing and Deep Learning workflows.
|
|
5
5
|
Requires-Python: >=3.10
|
|
6
6
|
Classifier: Programming Language :: Python :: 3
|
|
@@ -21,7 +21,7 @@ Requires-Dist: humanize (>=4.0.0,<5.0.0)
|
|
|
21
21
|
Requires-Dist: lazy-loader (>=0.3.0)
|
|
22
22
|
Requires-Dist: lightning
|
|
23
23
|
Requires-Dist: matplotlib
|
|
24
|
-
Requires-Dist: medimgkit (>=0.6.
|
|
24
|
+
Requires-Dist: medimgkit (>=0.6.3)
|
|
25
25
|
Requires-Dist: nest-asyncio (>=1.0.0,<2.0.0)
|
|
26
26
|
Requires-Dist: nibabel (>=4.0.0)
|
|
27
27
|
Requires-Dist: numpy
|
|
@@ -0,0 +1,436 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import logging
|
|
3
|
+
from datamint import configs
|
|
4
|
+
from datamint.utils.logging_utils import load_cmdline_logging_config, ConsoleWrapperHandler
|
|
5
|
+
from rich.prompt import Prompt, Confirm
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
import os
|
|
8
|
+
import shutil
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from rich.table import Table
|
|
11
|
+
|
|
12
|
+
_LOGGER = logging.getLogger(__name__)
|
|
13
|
+
_USER_LOGGER = logging.getLogger('user_logger')
|
|
14
|
+
console: Console
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def configure_default_url():
|
|
18
|
+
"""Configure the default API URL interactively."""
|
|
19
|
+
current_url = configs.get_value(configs.APIURL_KEY, 'Not set')
|
|
20
|
+
console.print(f"Current default URL: [key]{current_url}[/key]")
|
|
21
|
+
url = Prompt.ask("Enter the default API URL (leave empty to abort)", console=console).strip()
|
|
22
|
+
if url == '':
|
|
23
|
+
return
|
|
24
|
+
|
|
25
|
+
# Basic URL validation
|
|
26
|
+
if not (url.startswith('http://') or url.startswith('https://')):
|
|
27
|
+
console.print("[warning]⚠️ URL should start with http:// or https://[/warning]")
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
configs.set_value(configs.APIURL_KEY, url)
|
|
31
|
+
console.print("[success]✅ Default API URL set successfully.[/success]")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def ask_api_key(ask_to_save: bool) -> str | None:
|
|
35
|
+
"""Ask user for API key with improved guidance."""
|
|
36
|
+
console.print("[info]💡 Get your API key from your Datamint administrator or the web app (https://app.datamint.io/team)[/info]")
|
|
37
|
+
|
|
38
|
+
api_key = Prompt.ask('API key (leave empty to abort)', console=console).strip()
|
|
39
|
+
if api_key == '':
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
if ask_to_save:
|
|
43
|
+
ans = Confirm.ask("Save the API key so it automatically loads next time? (y/n): ",
|
|
44
|
+
default=True, console=console)
|
|
45
|
+
try:
|
|
46
|
+
if ans:
|
|
47
|
+
configs.set_value(configs.APIKEY_KEY, api_key)
|
|
48
|
+
console.print("[success]✅ API key saved.[/success]")
|
|
49
|
+
except Exception as e:
|
|
50
|
+
console.print("[error]❌ Error saving API key.[/error]")
|
|
51
|
+
_LOGGER.exception(e)
|
|
52
|
+
return api_key
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def show_all_configurations():
|
|
56
|
+
"""Display all current configurations in a user-friendly format."""
|
|
57
|
+
config = configs.read_config()
|
|
58
|
+
if config is not None and len(config) > 0:
|
|
59
|
+
console.print("[title]📋 Current configurations:[/title]")
|
|
60
|
+
for key, value in config.items():
|
|
61
|
+
# Mask API key for security
|
|
62
|
+
if key == configs.APIKEY_KEY and value:
|
|
63
|
+
masked_value = f"{value[:3]}...{value[-3:]}" if len(value) > 6 else value
|
|
64
|
+
console.print(f" [key]{key}[/key]: [dim]{masked_value}[/dim]")
|
|
65
|
+
else:
|
|
66
|
+
console.print(f" [key]{key}[/key]: {value}")
|
|
67
|
+
else:
|
|
68
|
+
console.print("[dim]No configurations found.[/dim]")
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def clear_all_configurations():
|
|
72
|
+
"""Clear all configurations with confirmation."""
|
|
73
|
+
yesno = Confirm.ask('Are you sure you want to clear all configurations?',
|
|
74
|
+
default=True, console=console)
|
|
75
|
+
if yesno:
|
|
76
|
+
configs.clear_all_configurations()
|
|
77
|
+
console.print("[success]✅ All configurations cleared.[/success]")
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def configure_api_key():
|
|
81
|
+
"""Configure API key interactively."""
|
|
82
|
+
api_key = ask_api_key(ask_to_save=False)
|
|
83
|
+
if api_key is None:
|
|
84
|
+
return
|
|
85
|
+
configs.set_value(configs.APIKEY_KEY, api_key)
|
|
86
|
+
console.print("[success]✅ API key saved.[/success]")
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def test_connection():
|
|
90
|
+
"""Test the API connection with current settings."""
|
|
91
|
+
try:
|
|
92
|
+
from datamint import Api
|
|
93
|
+
console.print("[accent]🔄 Testing connection...[/accent]")
|
|
94
|
+
Api(check_connection=True)
|
|
95
|
+
console.print(f"[success]✅ Connection successful![/success]")
|
|
96
|
+
except ImportError:
|
|
97
|
+
console.print("[error]❌ Full API not available. Install with: pip install datamint[/error]")
|
|
98
|
+
except Exception as e:
|
|
99
|
+
console.print(f"[error]❌ Connection failed: {e}[/error]")
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def discover_local_datasets() -> list[dict[str, str]]:
|
|
103
|
+
"""Discover locally downloaded datasets.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
List of dictionaries containing dataset info with keys: 'name', 'path', 'size'
|
|
107
|
+
"""
|
|
108
|
+
from datamint.dataset.base_dataset import DatamintBaseDataset
|
|
109
|
+
|
|
110
|
+
# Check default datamint directory
|
|
111
|
+
default_root = os.path.join(
|
|
112
|
+
os.path.expanduser("~"),
|
|
113
|
+
DatamintBaseDataset.DATAMINT_DEFAULT_DIR,
|
|
114
|
+
DatamintBaseDataset.DATAMINT_DATASETS_DIR
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
datasets = []
|
|
118
|
+
|
|
119
|
+
if not os.path.exists(default_root):
|
|
120
|
+
return datasets
|
|
121
|
+
|
|
122
|
+
for item in os.listdir(default_root):
|
|
123
|
+
dataset_path = os.path.join(default_root, item)
|
|
124
|
+
if os.path.isdir(dataset_path):
|
|
125
|
+
# Check if it has a dataset.json file (indicating it's a datamint dataset)
|
|
126
|
+
dataset_json = os.path.join(dataset_path, 'dataset.json')
|
|
127
|
+
if os.path.exists(dataset_json):
|
|
128
|
+
# Calculate directory size
|
|
129
|
+
total_size = sum(
|
|
130
|
+
os.path.getsize(os.path.join(dirpath, filename))
|
|
131
|
+
for dirpath, dirnames, filenames in os.walk(dataset_path)
|
|
132
|
+
for filename in filenames
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
datasets.append({
|
|
136
|
+
'name': item,
|
|
137
|
+
'path': dataset_path,
|
|
138
|
+
'size': _format_size(total_size),
|
|
139
|
+
'size_bytes': total_size
|
|
140
|
+
})
|
|
141
|
+
|
|
142
|
+
return sorted(datasets, key=lambda x: x['name'])
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _format_size(size_bytes: int) -> str:
|
|
146
|
+
"""Format size in bytes to human readable format."""
|
|
147
|
+
if size_bytes == 0:
|
|
148
|
+
return "0 B"
|
|
149
|
+
|
|
150
|
+
size_names = ["B", "KB", "MB", "GB", "TB"]
|
|
151
|
+
i = 0
|
|
152
|
+
while size_bytes >= 1024 and i < len(size_names) - 1:
|
|
153
|
+
size_bytes /= 1024.0
|
|
154
|
+
i += 1
|
|
155
|
+
|
|
156
|
+
return f"{size_bytes:.1f} {size_names[i]}"
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def show_local_datasets() -> list[dict[str, str]]:
|
|
160
|
+
"""Display all locally downloaded datasets."""
|
|
161
|
+
datasets = discover_local_datasets()
|
|
162
|
+
|
|
163
|
+
if not datasets:
|
|
164
|
+
console.print("[dim]No local datasets found.[/dim]")
|
|
165
|
+
return datasets
|
|
166
|
+
|
|
167
|
+
console.print("[title]📁 Local Datasets:[/title]")
|
|
168
|
+
|
|
169
|
+
table = Table(show_header=True, header_style="bold blue")
|
|
170
|
+
table.add_column("Dataset Name", style="cyan")
|
|
171
|
+
table.add_column("Size", justify="right", style="green")
|
|
172
|
+
table.add_column("Path", style="dim")
|
|
173
|
+
|
|
174
|
+
total_size = 0
|
|
175
|
+
for dataset in datasets:
|
|
176
|
+
table.add_row(dataset['name'], dataset['size'], dataset['path'])
|
|
177
|
+
total_size += dataset['size_bytes']
|
|
178
|
+
|
|
179
|
+
console.print(table)
|
|
180
|
+
console.print(f"\n[bold]Total size:[/bold] {_format_size(total_size)}")
|
|
181
|
+
|
|
182
|
+
return datasets
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def clean_dataset(dataset_name: str) -> bool:
|
|
186
|
+
"""Clean a specific dataset.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
dataset_name: Name of the dataset to clean
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
True if dataset was cleaned, False otherwise
|
|
193
|
+
"""
|
|
194
|
+
datasets = discover_local_datasets()
|
|
195
|
+
dataset_to_clean = None
|
|
196
|
+
|
|
197
|
+
for dataset in datasets:
|
|
198
|
+
if dataset['name'] == dataset_name:
|
|
199
|
+
dataset_to_clean = dataset
|
|
200
|
+
break
|
|
201
|
+
|
|
202
|
+
if dataset_to_clean is None:
|
|
203
|
+
console.print(f"[error]❌ Dataset '{dataset_name}' not found locally.[/error]")
|
|
204
|
+
return False
|
|
205
|
+
|
|
206
|
+
console.print(f"[warning]⚠️ About to delete dataset: {dataset_name}[/warning]")
|
|
207
|
+
console.print(f"[dim]Path: {dataset_to_clean['path']}[/dim]")
|
|
208
|
+
console.print(f"[dim]Size: {dataset_to_clean['size']}[/dim]")
|
|
209
|
+
|
|
210
|
+
confirmed = Confirm.ask("Are you sure you want to delete this dataset?",
|
|
211
|
+
default=False, console=console)
|
|
212
|
+
|
|
213
|
+
if not confirmed:
|
|
214
|
+
console.print("[dim]Operation cancelled.[/dim]")
|
|
215
|
+
return False
|
|
216
|
+
|
|
217
|
+
try:
|
|
218
|
+
shutil.rmtree(dataset_to_clean['path'])
|
|
219
|
+
console.print(f"[success]✅ Dataset '{dataset_name}' has been deleted.[/success]")
|
|
220
|
+
return True
|
|
221
|
+
except Exception as e:
|
|
222
|
+
console.print(f"[error]❌ Error deleting dataset: {e}[/error]")
|
|
223
|
+
_LOGGER.exception(e)
|
|
224
|
+
return False
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def clean_all_datasets() -> bool:
|
|
228
|
+
"""Clean all locally downloaded datasets.
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
True if datasets were cleaned, False otherwise
|
|
232
|
+
"""
|
|
233
|
+
datasets = discover_local_datasets()
|
|
234
|
+
|
|
235
|
+
if not datasets:
|
|
236
|
+
console.print("[dim]No local datasets found to clean.[/dim]")
|
|
237
|
+
return True
|
|
238
|
+
|
|
239
|
+
console.print(f"[warning]⚠️ About to delete {len(datasets)} dataset(s):[/warning]")
|
|
240
|
+
|
|
241
|
+
table = Table(show_header=True, header_style="bold red")
|
|
242
|
+
table.add_column("Dataset Name", style="cyan")
|
|
243
|
+
table.add_column("Size", justify="right", style="green")
|
|
244
|
+
|
|
245
|
+
total_size = 0
|
|
246
|
+
for dataset in datasets:
|
|
247
|
+
table.add_row(dataset['name'], dataset['size'])
|
|
248
|
+
total_size += dataset['size_bytes']
|
|
249
|
+
|
|
250
|
+
console.print(table)
|
|
251
|
+
console.print(f"\n[bold red]Total size to be deleted:[/bold red] {_format_size(total_size)}")
|
|
252
|
+
|
|
253
|
+
confirmed = Confirm.ask("Are you sure you want to delete ALL local datasets? (this does not affect remote datasets)",
|
|
254
|
+
default=False, console=console)
|
|
255
|
+
|
|
256
|
+
if not confirmed:
|
|
257
|
+
console.print("[dim]Operation cancelled.[/dim]")
|
|
258
|
+
return False
|
|
259
|
+
|
|
260
|
+
success_count = 0
|
|
261
|
+
for dataset in datasets:
|
|
262
|
+
try:
|
|
263
|
+
shutil.rmtree(dataset['path'])
|
|
264
|
+
console.print(f"[success]✅ Deleted: {dataset['name']}[/success]")
|
|
265
|
+
success_count += 1
|
|
266
|
+
except Exception as e:
|
|
267
|
+
console.print(f"[error]❌ Failed to delete {dataset['name']}: {e}[/error]")
|
|
268
|
+
_LOGGER.exception(e)
|
|
269
|
+
|
|
270
|
+
if success_count == len(datasets):
|
|
271
|
+
console.print(f"[success]✅ Successfully deleted all {success_count} datasets.[/success]")
|
|
272
|
+
return True
|
|
273
|
+
else:
|
|
274
|
+
console.print(f"[warning]⚠️ Deleted {success_count} out of {len(datasets)} datasets.[/warning]")
|
|
275
|
+
return False
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def interactive_dataset_cleaning() -> None:
|
|
279
|
+
"""Interactive dataset cleaning menu."""
|
|
280
|
+
datasets = show_local_datasets()
|
|
281
|
+
|
|
282
|
+
if not datasets:
|
|
283
|
+
return
|
|
284
|
+
|
|
285
|
+
console.print("\n[title]🧹 Dataset Cleaning Options:[/title]")
|
|
286
|
+
console.print(" [accent](1)[/accent] Clean a specific dataset")
|
|
287
|
+
console.print(" [accent](2)[/accent] Clean all datasets")
|
|
288
|
+
console.print(" [accent](b)[/accent] Back to main menu")
|
|
289
|
+
|
|
290
|
+
try:
|
|
291
|
+
choice = Prompt.ask("Enter your choice", console=console).lower().strip()
|
|
292
|
+
|
|
293
|
+
# Handle ESC key (appears as escape sequence)
|
|
294
|
+
if choice in ('', '\x1b', 'esc', 'escape'):
|
|
295
|
+
return
|
|
296
|
+
|
|
297
|
+
if choice == '1':
|
|
298
|
+
dataset_names = [d['name'] for d in datasets]
|
|
299
|
+
console.print("\n[title]Available datasets:[/title]")
|
|
300
|
+
for i, name in enumerate(dataset_names, 1):
|
|
301
|
+
console.print(f" [accent]({i})[/accent] {name}")
|
|
302
|
+
|
|
303
|
+
dataset_choice = Prompt.ask("Enter dataset number or name", console=console).strip()
|
|
304
|
+
|
|
305
|
+
# Handle ESC key in dataset selection
|
|
306
|
+
if dataset_choice in ('', '\x1b', 'esc', 'escape'):
|
|
307
|
+
return
|
|
308
|
+
|
|
309
|
+
# Handle numeric choice
|
|
310
|
+
try:
|
|
311
|
+
dataset_idx = int(dataset_choice) - 1
|
|
312
|
+
if 0 <= dataset_idx < len(dataset_names):
|
|
313
|
+
clean_dataset(dataset_names[dataset_idx])
|
|
314
|
+
return
|
|
315
|
+
except ValueError:
|
|
316
|
+
pass
|
|
317
|
+
|
|
318
|
+
# Handle name choice
|
|
319
|
+
if dataset_choice in dataset_names:
|
|
320
|
+
clean_dataset(dataset_choice)
|
|
321
|
+
else:
|
|
322
|
+
console.print("[error]❌ Invalid dataset selection.[/error]")
|
|
323
|
+
|
|
324
|
+
elif choice == '2':
|
|
325
|
+
clean_all_datasets()
|
|
326
|
+
elif choice != 'b':
|
|
327
|
+
console.print("[error]❌ Invalid choice.[/error]")
|
|
328
|
+
except KeyboardInterrupt:
|
|
329
|
+
pass
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def interactive_mode():
|
|
333
|
+
"""Run the interactive configuration mode."""
|
|
334
|
+
console.print("[title]🔧 Datamint Configuration Tool[/title]")
|
|
335
|
+
|
|
336
|
+
try:
|
|
337
|
+
if len(configs.read_config()) == 0:
|
|
338
|
+
console.print("[warning]👋 Welcome! Let's set up your API key first.[/warning]")
|
|
339
|
+
configure_api_key()
|
|
340
|
+
|
|
341
|
+
while True:
|
|
342
|
+
console.print("\n[title]📋 Select the action you want to perform:[/title]")
|
|
343
|
+
console.print(" [accent](1)[/accent] Configure the API key")
|
|
344
|
+
console.print(" [accent](2)[/accent] Configure the default URL")
|
|
345
|
+
console.print(" [accent](3)[/accent] Show all configuration settings")
|
|
346
|
+
console.print(" [accent](4)[/accent] Clear all configuration settings")
|
|
347
|
+
console.print(" [accent](5)[/accent] Test connection")
|
|
348
|
+
console.print(" [accent](6)[/accent] Manage/Show local datasets...")
|
|
349
|
+
console.print(" [accent](q)[/accent] Exit")
|
|
350
|
+
choice = Prompt.ask("Enter your choice", console=console).lower().strip()
|
|
351
|
+
|
|
352
|
+
if choice == '1':
|
|
353
|
+
configure_api_key()
|
|
354
|
+
elif choice == '2':
|
|
355
|
+
configure_default_url()
|
|
356
|
+
elif choice == '3':
|
|
357
|
+
show_all_configurations()
|
|
358
|
+
elif choice == '4':
|
|
359
|
+
clear_all_configurations()
|
|
360
|
+
elif choice == '5':
|
|
361
|
+
test_connection()
|
|
362
|
+
elif choice == '6':
|
|
363
|
+
interactive_dataset_cleaning()
|
|
364
|
+
elif choice in ('q', 'exit', 'quit'):
|
|
365
|
+
break
|
|
366
|
+
else:
|
|
367
|
+
console.print("[error]❌ Invalid choice. Please enter a number between 1 and 7 or 'q' to quit.[/error]")
|
|
368
|
+
except KeyboardInterrupt:
|
|
369
|
+
console.print('')
|
|
370
|
+
|
|
371
|
+
console.print("[success]👋 Goodbye![/success]")
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
def main():
|
|
375
|
+
"""Main entry point for the configuration tool."""
|
|
376
|
+
global console
|
|
377
|
+
load_cmdline_logging_config()
|
|
378
|
+
console = [h for h in _USER_LOGGER.handlers if isinstance(h, ConsoleWrapperHandler)][0].console
|
|
379
|
+
parser = argparse.ArgumentParser(
|
|
380
|
+
description='🔧 Datamint API Configuration Tool',
|
|
381
|
+
epilog="""
|
|
382
|
+
Examples:
|
|
383
|
+
datamint-config # Interactive mode
|
|
384
|
+
datamint-config --api-key YOUR_KEY # Set API key
|
|
385
|
+
datamint-config --list-datasets # Show local datasets
|
|
386
|
+
datamint-config --clean-dataset NAME # Clean specific dataset
|
|
387
|
+
datamint-config --clean-all-datasets # Clean all datasets
|
|
388
|
+
|
|
389
|
+
More Documentation: https://sonanceai.github.io/datamint-python-api/command_line_tools.html
|
|
390
|
+
""",
|
|
391
|
+
formatter_class=argparse.RawDescriptionHelpFormatter
|
|
392
|
+
)
|
|
393
|
+
parser.add_argument('--api-key', type=str, help='API key to set')
|
|
394
|
+
parser.add_argument('--default-url', '--url', type=str, help='Default URL to set')
|
|
395
|
+
parser.add_argument('-i', '--interactive', action='store_true',
|
|
396
|
+
help='Interactive mode (default if no other arguments provided)')
|
|
397
|
+
parser.add_argument('--list-datasets', action='store_true',
|
|
398
|
+
help='List all locally downloaded datasets')
|
|
399
|
+
parser.add_argument('--clean-dataset', type=str, metavar='DATASET_NAME',
|
|
400
|
+
help='Clean a specific dataset by name')
|
|
401
|
+
parser.add_argument('--clean-all-datasets', action='store_true',
|
|
402
|
+
help='Clean all locally downloaded datasets')
|
|
403
|
+
|
|
404
|
+
args = parser.parse_args()
|
|
405
|
+
|
|
406
|
+
if args.api_key is not None:
|
|
407
|
+
configs.set_value(configs.APIKEY_KEY, args.api_key)
|
|
408
|
+
console.print("[success]✅ API key saved.[/success]")
|
|
409
|
+
|
|
410
|
+
if args.default_url is not None:
|
|
411
|
+
# Basic URL validation
|
|
412
|
+
if not (args.default_url.startswith('http://') or args.default_url.startswith('https://')):
|
|
413
|
+
console.print("[error]❌ URL must start with http:// or https://[/error]")
|
|
414
|
+
return
|
|
415
|
+
configs.set_value(configs.APIURL_KEY, args.default_url)
|
|
416
|
+
console.print("[success]✅ Default URL saved.[/success]")
|
|
417
|
+
|
|
418
|
+
if args.list_datasets:
|
|
419
|
+
show_local_datasets()
|
|
420
|
+
|
|
421
|
+
if args.clean_dataset:
|
|
422
|
+
clean_dataset(args.clean_dataset)
|
|
423
|
+
|
|
424
|
+
if args.clean_all_datasets:
|
|
425
|
+
clean_all_datasets()
|
|
426
|
+
|
|
427
|
+
no_arguments_provided = (args.api_key is None and args.default_url is None and
|
|
428
|
+
not args.list_datasets and not args.clean_dataset and
|
|
429
|
+
not args.clean_all_datasets)
|
|
430
|
+
|
|
431
|
+
if no_arguments_provided or args.interactive:
|
|
432
|
+
interactive_mode()
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
if __name__ == "__main__":
|
|
436
|
+
main()
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import requests
|
|
3
3
|
from tqdm.auto import tqdm
|
|
4
|
-
from typing import Optional, Callable, Any, Literal
|
|
4
|
+
from typing import Optional, Callable, Any, Literal, Sequence
|
|
5
5
|
import logging
|
|
6
6
|
import shutil
|
|
7
7
|
import json
|
|
@@ -66,8 +66,8 @@ class DatamintBaseDataset:
|
|
|
66
66
|
project_name: str,
|
|
67
67
|
root: str | None = None,
|
|
68
68
|
auto_update: bool = True,
|
|
69
|
-
api_key:
|
|
70
|
-
server_url:
|
|
69
|
+
api_key: str | None = None,
|
|
70
|
+
server_url: str | None = None,
|
|
71
71
|
return_dicom: bool = False,
|
|
72
72
|
return_metainfo: bool = True,
|
|
73
73
|
return_annotations: bool = True,
|
|
@@ -75,14 +75,14 @@ class DatamintBaseDataset:
|
|
|
75
75
|
include_unannotated: bool = True,
|
|
76
76
|
all_annotations: bool = False,
|
|
77
77
|
# Filtering parameters
|
|
78
|
-
include_annotators:
|
|
79
|
-
exclude_annotators:
|
|
80
|
-
include_segmentation_names:
|
|
81
|
-
exclude_segmentation_names:
|
|
82
|
-
include_image_label_names:
|
|
83
|
-
exclude_image_label_names:
|
|
84
|
-
include_frame_label_names:
|
|
85
|
-
exclude_frame_label_names:
|
|
78
|
+
include_annotators: list[str] | None = None,
|
|
79
|
+
exclude_annotators: list[str] | None = None,
|
|
80
|
+
include_segmentation_names: list[str] | None = None,
|
|
81
|
+
exclude_segmentation_names: list[str] | None = None,
|
|
82
|
+
include_image_label_names: list[str] | None = None,
|
|
83
|
+
exclude_image_label_names: list[str] | None = None,
|
|
84
|
+
include_frame_label_names: list[str] | None = None,
|
|
85
|
+
exclude_frame_label_names: list[str] | None = None,
|
|
86
86
|
):
|
|
87
87
|
self._validate_inputs(project_name, include_annotators, exclude_annotators,
|
|
88
88
|
include_segmentation_names, exclude_segmentation_names,
|
|
@@ -106,14 +106,14 @@ class DatamintBaseDataset:
|
|
|
106
106
|
def _validate_inputs(
|
|
107
107
|
self,
|
|
108
108
|
project_name: str,
|
|
109
|
-
include_annotators:
|
|
110
|
-
exclude_annotators:
|
|
111
|
-
include_segmentation_names:
|
|
112
|
-
exclude_segmentation_names:
|
|
113
|
-
include_image_label_names:
|
|
114
|
-
exclude_image_label_names:
|
|
115
|
-
include_frame_label_names:
|
|
116
|
-
exclude_frame_label_names:
|
|
109
|
+
include_annotators: Sequence[str] | None,
|
|
110
|
+
exclude_annotators: Sequence[str] | None,
|
|
111
|
+
include_segmentation_names: Sequence[str] | None,
|
|
112
|
+
exclude_segmentation_names: Sequence[str] | None,
|
|
113
|
+
include_image_label_names: Sequence[str] | None,
|
|
114
|
+
exclude_image_label_names: Sequence[str] | None,
|
|
115
|
+
include_frame_label_names: Sequence[str] | None,
|
|
116
|
+
exclude_frame_label_names: Sequence[str] | None,
|
|
117
117
|
) -> None:
|
|
118
118
|
"""Validate input parameters."""
|
|
119
119
|
if project_name is None:
|
|
@@ -360,11 +360,13 @@ class DatamintBaseDataset:
|
|
|
360
360
|
@property
|
|
361
361
|
def segmentation_labels_set(self) -> list[str]:
|
|
362
362
|
"""Returns the set of segmentation labels in the dataset."""
|
|
363
|
-
|
|
363
|
+
a = set(self.frame_lsets['segmentation'])
|
|
364
|
+
b = set(self.image_lsets['segmentation'])
|
|
365
|
+
return list(a.union(b))
|
|
364
366
|
|
|
365
367
|
def _get_annotations_internal(
|
|
366
368
|
self,
|
|
367
|
-
annotations:
|
|
369
|
+
annotations: Sequence[Annotation],
|
|
368
370
|
type: Literal['label', 'category', 'segmentation', 'all'] = 'all',
|
|
369
371
|
scope: Literal['frame', 'image', 'all'] = 'all'
|
|
370
372
|
) -> list[Annotation]:
|
|
@@ -441,10 +443,8 @@ class DatamintBaseDataset:
|
|
|
441
443
|
|
|
442
444
|
def get_resources_ids(self) -> list[str]:
|
|
443
445
|
"""Get list of resource IDs."""
|
|
444
|
-
return [
|
|
445
|
-
|
|
446
|
-
for i in self.subset_indices
|
|
447
|
-
]
|
|
446
|
+
return [self._get_image_metainfo(i, bypass_subset_indices=True)['metainfo']['id']
|
|
447
|
+
for i in self.subset_indices]
|
|
448
448
|
|
|
449
449
|
def _get_labels_set(self, framed: bool) -> tuple[dict, dict[str, dict[str, int]]]:
|
|
450
450
|
"""Returns the set of labels and mappings to integers.
|
|
@@ -992,7 +992,6 @@ class DatamintBaseDataset:
|
|
|
992
992
|
return Path(resource['file'])
|
|
993
993
|
else:
|
|
994
994
|
# ext = guess_extension(resource['mimetype'])
|
|
995
|
-
# _LOGGER.debug(f"Guessed extension for resource {resource['id']}|{resource['mimetype']}: {ext}")
|
|
996
995
|
# if ext is None:
|
|
997
996
|
# _LOGGER.warning(f"Could not guess extension for resource {resource['id']}.")
|
|
998
997
|
# ext = ''
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from .base_dataset import DatamintBaseDataset
|
|
2
|
-
from typing import List, Optional, Callable, Any, Dict, Literal
|
|
2
|
+
from typing import List, Optional, Callable, Any, Dict, Literal, Sequence
|
|
3
3
|
import torch
|
|
4
4
|
from torch import Tensor
|
|
5
5
|
import os
|
|
@@ -8,6 +8,7 @@ import logging
|
|
|
8
8
|
from PIL import Image
|
|
9
9
|
import albumentations
|
|
10
10
|
from datamint.entities.annotation import Annotation
|
|
11
|
+
from medimgkit.readers import read_array_normalized
|
|
11
12
|
|
|
12
13
|
_LOGGER = logging.getLogger(__name__)
|
|
13
14
|
|
|
@@ -117,7 +118,9 @@ class DatamintDataset(DatamintBaseDataset):
|
|
|
117
118
|
if semantic_seg_merge_strategy is not None and not return_as_semantic_segmentation:
|
|
118
119
|
raise ValueError("semantic_seg_merge_strategy can only be used if return_as_semantic_segmentation is True")
|
|
119
120
|
|
|
120
|
-
def _load_segmentations(self,
|
|
121
|
+
def _load_segmentations(self,
|
|
122
|
+
annotations: list[Annotation],
|
|
123
|
+
img_shape) -> tuple[dict[str, list], dict[str, list]]:
|
|
121
124
|
"""
|
|
122
125
|
Load segmentations from annotations.
|
|
123
126
|
|
|
@@ -152,19 +155,27 @@ class DatamintDataset(DatamintBaseDataset):
|
|
|
152
155
|
|
|
153
156
|
segfilepath = ann.file # png file
|
|
154
157
|
segfilepath = os.path.join(self.dataset_dir, segfilepath)
|
|
155
|
-
|
|
156
|
-
seg
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
158
|
+
seg = read_array_normalized(segfilepath) # (frames, C, H, W)
|
|
159
|
+
if seg.shape[1] != 1:
|
|
160
|
+
raise ValueError(f"Segmentation file must have 1 channel, got {seg.shape} in {segfilepath}")
|
|
161
|
+
seg = seg[:, 0, :, :] # (frames, H, W)
|
|
162
|
+
|
|
163
|
+
# # FIXME: avoid enforcing resizing the mask
|
|
164
|
+
# seg = (Image.open(segfilepath)
|
|
165
|
+
# .convert('L')
|
|
166
|
+
# .resize((w, h), Image.Resampling.NEAREST)
|
|
167
|
+
# )
|
|
168
|
+
# seg = np.array(seg)
|
|
161
169
|
|
|
162
170
|
seg = torch.from_numpy(seg)
|
|
163
171
|
seg = seg == 255 # binary mask
|
|
164
172
|
# map the segmentation label to the code
|
|
165
|
-
seg_code = self.frame_lcodes['segmentation'][ann.name]
|
|
166
173
|
if self.return_frame_by_frame:
|
|
167
174
|
frame_index = 0
|
|
175
|
+
if seg.shape[0] != 1:
|
|
176
|
+
raise NotImplementedError(
|
|
177
|
+
"Volume segmentations are not supported yet when return_frame_by_frame is True")
|
|
178
|
+
seg = seg[0:1] # (#frames, H, W) -> (1, H, W)
|
|
168
179
|
else:
|
|
169
180
|
frame_index = ann.index
|
|
170
181
|
|
|
@@ -174,12 +185,25 @@ class DatamintDataset(DatamintBaseDataset):
|
|
|
174
185
|
author_segs = segmentations[author]
|
|
175
186
|
author_labels = seg_labels[author]
|
|
176
187
|
|
|
177
|
-
if
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
188
|
+
if frame_index is not None and ann.scope == 'frame':
|
|
189
|
+
seg_code = self.frame_lcodes['segmentation'][ann.name]
|
|
190
|
+
if author_segs[frame_index] is None:
|
|
191
|
+
author_segs[frame_index] = []
|
|
192
|
+
author_labels[frame_index] = []
|
|
193
|
+
s = seg[0] if seg.shape[0] == 1 else seg[frame_index]
|
|
194
|
+
author_segs[frame_index].append(s)
|
|
195
|
+
author_labels[frame_index].append(seg_code)
|
|
196
|
+
elif frame_index is None and ann.scope == 'image':
|
|
197
|
+
seg_code = self.image_lcodes['segmentation'][ann.name]
|
|
198
|
+
# apply to all frames
|
|
199
|
+
for i in range(nframes):
|
|
200
|
+
if author_segs[i] is None:
|
|
201
|
+
author_segs[i] = []
|
|
202
|
+
author_labels[i] = []
|
|
203
|
+
author_segs[i].append(seg[i])
|
|
204
|
+
author_labels[i].append(seg_code)
|
|
205
|
+
else:
|
|
206
|
+
raise ValueError(f"Invalid segmentation annotation: {ann}")
|
|
183
207
|
|
|
184
208
|
# convert to tensor
|
|
185
209
|
for author in segmentations.keys():
|
|
@@ -196,8 +220,8 @@ class DatamintDataset(DatamintBaseDataset):
|
|
|
196
220
|
return segmentations, seg_labels
|
|
197
221
|
|
|
198
222
|
def _instanceseg2semanticseg(self,
|
|
199
|
-
segmentations:
|
|
200
|
-
seg_labels:
|
|
223
|
+
segmentations: Sequence[Tensor],
|
|
224
|
+
seg_labels: Sequence[Tensor]) -> Tensor:
|
|
201
225
|
"""
|
|
202
226
|
Convert instance segmentation to semantic segmentation.
|
|
203
227
|
|
|
@@ -208,25 +232,26 @@ class DatamintDataset(DatamintBaseDataset):
|
|
|
208
232
|
Returns:
|
|
209
233
|
Tensor: tensor of shape (n, num_labels, H, W), where `n` is the number of frames.
|
|
210
234
|
"""
|
|
211
|
-
if segmentations is
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
235
|
+
if segmentations is None:
|
|
236
|
+
return None
|
|
237
|
+
|
|
238
|
+
if len(segmentations) != len(seg_labels):
|
|
239
|
+
raise ValueError("segmentations and seg_labels must have the same length")
|
|
240
|
+
|
|
241
|
+
h, w = segmentations[0].shape[1:]
|
|
242
|
+
new_shape = (len(segmentations),
|
|
243
|
+
len(self.segmentation_labels_set)+1, # +1 for background
|
|
244
|
+
h, w)
|
|
245
|
+
new_segmentations = torch.zeros(new_shape, dtype=torch.uint8)
|
|
246
|
+
# for each frame
|
|
247
|
+
for i in range(len(segmentations)):
|
|
248
|
+
# for each instance
|
|
249
|
+
for j in range(len(segmentations[i])):
|
|
250
|
+
new_segmentations[i, seg_labels[i][j]] += segmentations[i][j]
|
|
251
|
+
new_segmentations = new_segmentations > 0
|
|
252
|
+
# pixels that are not in any segmentation are labeled as background
|
|
253
|
+
new_segmentations[:, 0] = new_segmentations.sum(dim=1) == 0
|
|
254
|
+
return new_segmentations.float()
|
|
230
255
|
|
|
231
256
|
def apply_semantic_seg_merge_strategy(self, segmentations: dict[str, Tensor],
|
|
232
257
|
nframes: int,
|
|
@@ -338,7 +363,7 @@ class DatamintDataset(DatamintBaseDataset):
|
|
|
338
363
|
if isinstance(labels, Tensor):
|
|
339
364
|
# single tensor for the author
|
|
340
365
|
seg_names[author] = [code_to_name[code.item()-1] for code in labels]
|
|
341
|
-
elif isinstance(labels,
|
|
366
|
+
elif isinstance(labels, Sequence):
|
|
342
367
|
# list of frame tensors
|
|
343
368
|
seg_names[author] = [[code_to_name[code.item()-1] for code in frame_labels]
|
|
344
369
|
for frame_labels in labels]
|
|
@@ -477,7 +502,7 @@ class DatamintDataset(DatamintBaseDataset):
|
|
|
477
502
|
return new_item
|
|
478
503
|
|
|
479
504
|
def _convert_labels_annotations(self,
|
|
480
|
-
annotations:
|
|
505
|
+
annotations: Sequence[Annotation],
|
|
481
506
|
num_frames: int | None = None) -> dict[str, torch.Tensor]:
|
|
482
507
|
"""
|
|
483
508
|
Converts the annotations, of the same type and scope, to tensor of shape (num_frames, num_labels)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "datamint"
|
|
3
3
|
description = "A library for interacting with the Datamint API, designed for efficient data management, processing and Deep Learning workflows."
|
|
4
|
-
version = "2.
|
|
4
|
+
version = "2.1.1"
|
|
5
5
|
dynamic = ["dependencies"]
|
|
6
6
|
requires-python = ">=3.10"
|
|
7
7
|
readme = "README.md"
|
|
@@ -40,7 +40,7 @@ matplotlib = "*"
|
|
|
40
40
|
lightning = "*"
|
|
41
41
|
albumentations = ">=2.0.0"
|
|
42
42
|
lazy-loader = ">=0.3.0"
|
|
43
|
-
medimgkit = ">=0.6.
|
|
43
|
+
medimgkit = ">=0.6.3"
|
|
44
44
|
typing_extensions = ">=4.0.0"
|
|
45
45
|
pydantic = ">=2.6.4"
|
|
46
46
|
httpx = "*"
|
|
@@ -1,180 +0,0 @@
|
|
|
1
|
-
import argparse
|
|
2
|
-
import logging
|
|
3
|
-
from datamint import configs
|
|
4
|
-
from datamint.utils.logging_utils import load_cmdline_logging_config, ConsoleWrapperHandler
|
|
5
|
-
from rich.prompt import Prompt, Confirm
|
|
6
|
-
from rich.console import Console
|
|
7
|
-
|
|
8
|
-
_LOGGER = logging.getLogger(__name__)
|
|
9
|
-
_USER_LOGGER = logging.getLogger('user_logger')
|
|
10
|
-
console: Console
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def configure_default_url():
|
|
14
|
-
"""Configure the default API URL interactively."""
|
|
15
|
-
current_url = configs.get_value(configs.APIURL_KEY, 'Not set')
|
|
16
|
-
console.print(f"Current default URL: [key]{current_url}[/key]")
|
|
17
|
-
url = Prompt.ask("Enter the default API URL (leave empty to abort)", console=console).strip()
|
|
18
|
-
if url == '':
|
|
19
|
-
return
|
|
20
|
-
|
|
21
|
-
# Basic URL validation
|
|
22
|
-
if not (url.startswith('http://') or url.startswith('https://')):
|
|
23
|
-
console.print("[warning]⚠️ URL should start with http:// or https://[/warning]")
|
|
24
|
-
return
|
|
25
|
-
|
|
26
|
-
configs.set_value(configs.APIURL_KEY, url)
|
|
27
|
-
console.print("[success]✅ Default API URL set successfully.[/success]")
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def ask_api_key(ask_to_save: bool) -> str | None:
|
|
31
|
-
"""Ask user for API key with improved guidance."""
|
|
32
|
-
console.print("[info]💡 Get your API key from your Datamint administrator or the web app (https://app.datamint.io/team)[/info]")
|
|
33
|
-
|
|
34
|
-
api_key = Prompt.ask('API key (leave empty to abort)', console=console).strip()
|
|
35
|
-
if api_key == '':
|
|
36
|
-
return None
|
|
37
|
-
|
|
38
|
-
if ask_to_save:
|
|
39
|
-
ans = Confirm.ask("Save the API key so it automatically loads next time? (y/n): ",
|
|
40
|
-
default=True, console=console)
|
|
41
|
-
try:
|
|
42
|
-
if ans:
|
|
43
|
-
configs.set_value(configs.APIKEY_KEY, api_key)
|
|
44
|
-
console.print("[success]✅ API key saved.[/success]")
|
|
45
|
-
except Exception as e:
|
|
46
|
-
console.print("[error]❌ Error saving API key.[/error]")
|
|
47
|
-
_LOGGER.exception(e)
|
|
48
|
-
return api_key
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def show_all_configurations():
|
|
52
|
-
"""Display all current configurations in a user-friendly format."""
|
|
53
|
-
config = configs.read_config()
|
|
54
|
-
if config is not None and len(config) > 0:
|
|
55
|
-
console.print("[title]📋 Current configurations:[/title]")
|
|
56
|
-
for key, value in config.items():
|
|
57
|
-
# Mask API key for security
|
|
58
|
-
if key == configs.APIKEY_KEY and value:
|
|
59
|
-
masked_value = f"{value[:3]}...{value[-3:]}" if len(value) > 6 else value
|
|
60
|
-
console.print(f" [key]{key}[/key]: [dim]{masked_value}[/dim]")
|
|
61
|
-
else:
|
|
62
|
-
console.print(f" [key]{key}[/key]: {value}")
|
|
63
|
-
else:
|
|
64
|
-
console.print("[dim]No configurations found.[/dim]")
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
def clear_all_configurations():
|
|
68
|
-
"""Clear all configurations with confirmation."""
|
|
69
|
-
yesno = Confirm.ask('Are you sure you want to clear all configurations?',
|
|
70
|
-
default=True, console=console)
|
|
71
|
-
if yesno:
|
|
72
|
-
configs.clear_all_configurations()
|
|
73
|
-
console.print("[success]✅ All configurations cleared.[/success]")
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def configure_api_key():
|
|
77
|
-
"""Configure API key interactively."""
|
|
78
|
-
api_key = ask_api_key(ask_to_save=False)
|
|
79
|
-
if api_key is None:
|
|
80
|
-
return
|
|
81
|
-
configs.set_value(configs.APIKEY_KEY, api_key)
|
|
82
|
-
console.print("[success]✅ API key saved.[/success]")
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
def test_connection():
|
|
86
|
-
"""Test the API connection with current settings."""
|
|
87
|
-
try:
|
|
88
|
-
from datamint import APIHandler
|
|
89
|
-
console.print("[accent]🔄 Testing connection...[/accent]")
|
|
90
|
-
api = APIHandler()
|
|
91
|
-
# Simple test - try to get projects
|
|
92
|
-
projects = api.get_projects()
|
|
93
|
-
console.print(f"[success]✅ Connection successful! Found {len(projects)} projects.[/success]")
|
|
94
|
-
except ImportError:
|
|
95
|
-
console.print("[error]❌ Full API not available. Install with: pip install datamint[/error]")
|
|
96
|
-
except Exception as e:
|
|
97
|
-
console.print(f"[error]❌ Connection failed: {e}[/error]")
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
def interactive_mode():
|
|
101
|
-
"""Run the interactive configuration mode."""
|
|
102
|
-
console.print("[title]🔧 Datamint Configuration Tool[/title]")
|
|
103
|
-
|
|
104
|
-
try:
|
|
105
|
-
if len(configs.read_config()) == 0:
|
|
106
|
-
console.print("[warning]👋 Welcome! Let's set up your API key first.[/warning]")
|
|
107
|
-
configure_api_key()
|
|
108
|
-
|
|
109
|
-
while True:
|
|
110
|
-
console.print("\n[title]📋 Select the action you want to perform:[/title]")
|
|
111
|
-
console.print(" [accent](1)[/accent] Configure the API key")
|
|
112
|
-
console.print(" [accent](2)[/accent] Configure the default URL")
|
|
113
|
-
console.print(" [accent](3)[/accent] Show all configuration settings")
|
|
114
|
-
console.print(" [accent](4)[/accent] Clear all configuration settings")
|
|
115
|
-
console.print(" [accent](5)[/accent] Test connection")
|
|
116
|
-
console.print(" [accent](q)[/accent] Exit")
|
|
117
|
-
choice = Prompt.ask("Enter your choice", console=console).lower().strip()
|
|
118
|
-
|
|
119
|
-
if choice == '1':
|
|
120
|
-
configure_api_key()
|
|
121
|
-
elif choice == '2':
|
|
122
|
-
configure_default_url()
|
|
123
|
-
elif choice == '3':
|
|
124
|
-
show_all_configurations()
|
|
125
|
-
elif choice == '4':
|
|
126
|
-
clear_all_configurations()
|
|
127
|
-
elif choice == '5':
|
|
128
|
-
test_connection()
|
|
129
|
-
elif choice in ('q', 'exit', 'quit'):
|
|
130
|
-
break
|
|
131
|
-
else:
|
|
132
|
-
console.print("[error]❌ Invalid choice. Please enter a number between 1 and 5 or 'q' to quit.[/error]")
|
|
133
|
-
except KeyboardInterrupt:
|
|
134
|
-
console.print('')
|
|
135
|
-
|
|
136
|
-
console.print("[success]👋 Goodbye![/success]")
|
|
137
|
-
|
|
138
|
-
def main():
|
|
139
|
-
"""Main entry point for the configuration tool."""
|
|
140
|
-
global console
|
|
141
|
-
load_cmdline_logging_config()
|
|
142
|
-
console = [h for h in _USER_LOGGER.handlers if isinstance(h, ConsoleWrapperHandler)][0].console
|
|
143
|
-
parser = argparse.ArgumentParser(
|
|
144
|
-
description='🔧 Datamint API Configuration Tool',
|
|
145
|
-
epilog="""
|
|
146
|
-
Examples:
|
|
147
|
-
datamint-config # Interactive mode
|
|
148
|
-
datamint-config --api-key YOUR_KEY # Set API key
|
|
149
|
-
|
|
150
|
-
More Documentation: https://sonanceai.github.io/datamint-python-api/command_line_tools.html
|
|
151
|
-
""",
|
|
152
|
-
formatter_class=argparse.RawDescriptionHelpFormatter
|
|
153
|
-
)
|
|
154
|
-
parser.add_argument('--api-key', type=str, help='API key to set')
|
|
155
|
-
parser.add_argument('--default-url', '--url', type=str, help='Default URL to set')
|
|
156
|
-
parser.add_argument('-i', '--interactive', action='store_true',
|
|
157
|
-
help='Interactive mode (default if no other arguments provided)')
|
|
158
|
-
|
|
159
|
-
args = parser.parse_args()
|
|
160
|
-
|
|
161
|
-
if args.api_key is not None:
|
|
162
|
-
configs.set_value(configs.APIKEY_KEY, args.api_key)
|
|
163
|
-
console.print("[success]✅ API key saved.[/success]")
|
|
164
|
-
|
|
165
|
-
if args.default_url is not None:
|
|
166
|
-
# Basic URL validation
|
|
167
|
-
if not (args.default_url.startswith('http://') or args.default_url.startswith('https://')):
|
|
168
|
-
console.print("[error]❌ URL must start with http:// or https://[/error]")
|
|
169
|
-
return
|
|
170
|
-
configs.set_value(configs.APIURL_KEY, args.default_url)
|
|
171
|
-
console.print("[success]✅ Default URL saved.[/success]")
|
|
172
|
-
|
|
173
|
-
no_arguments_provided = args.api_key is None and args.default_url is None
|
|
174
|
-
|
|
175
|
-
if no_arguments_provided or args.interactive:
|
|
176
|
-
interactive_mode()
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
if __name__ == "__main__":
|
|
180
|
-
main()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|