cloudos-cli 2.86.0__tar.gz → 2.87.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/PKG-INFO +10 -4
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/README.md +9 -3
- cloudos_cli-2.87.0/cloudos_cli/_version.py +1 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/clos.py +13 -3
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/jobs/cli.py +38 -3
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/jobs/job.py +151 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/queue/cli.py +5 -1
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/queue/queue.py +29 -2
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/utils/details.py +307 -191
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli.egg-info/PKG-INFO +10 -4
- cloudos_cli-2.86.0/cloudos_cli/_version.py +0 -1
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/LICENSE +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/__main__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/bash/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/bash/cli.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/configure/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/configure/cli.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/configure/configure.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/constants.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/cost/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/cost/cost.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/cromwell/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/cromwell/cli.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/datasets/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/datasets/cli.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/datasets/datasets.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/import_wf/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/import_wf/import_wf.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/interactive_session/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/interactive_session/cli.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/interactive_session/interactive_session.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/jobs/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/link/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/link/cli.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/link/link.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/logging/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/logging/logger.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/procurement/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/procurement/cli.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/procurement/images.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/projects/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/projects/cli.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/queue/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/related_analyses/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/related_analyses/related_analyses.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/utils/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/utils/array_job.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/utils/cli_helpers.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/utils/cloud.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/utils/errors.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/utils/last_wf.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/utils/requests.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/utils/resources.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/workflows/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/workflows/cli.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli.egg-info/SOURCES.txt +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli.egg-info/dependency_links.txt +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli.egg-info/entry_points.txt +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli.egg-info/requires.txt +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli.egg-info/top_level.txt +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/setup.cfg +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/setup.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/functions_for_pytest.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_cli_project_create.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_cost/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_cost/test_job_cost.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_error_messages.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_interactive_session/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_interactive_session/test_create_session.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_interactive_session/test_list_sessions.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_logging/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_logging/test_logger.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_related_analyses/__init__.py +0 -0
- {cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_related_analyses/test_related_analyses.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cloudos_cli
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.87.0
|
|
4
4
|
Summary: Python package for interacting with CloudOS
|
|
5
5
|
Home-page: https://github.com/lifebit-ai/cloudos-cli
|
|
6
6
|
Author: David Piñeyro
|
|
@@ -436,7 +436,7 @@ Job queues are required for running jobs using AWS batch executor. The available
|
|
|
436
436
|
|
|
437
437
|
#### List Queues
|
|
438
438
|
|
|
439
|
-
This command allows you to view available computational queues and their configurations. You can get a summary of all available
|
|
439
|
+
This command allows you to view available computational queues and their configurations. By default, both regular workspace queues and system queues are displayed. You can get a summary of all available job queues in three different output formats using the `--output-format` option:
|
|
440
440
|
|
|
441
441
|
- **stdout** (default): Displays a rich formatted table directly in the terminal with pagination and visual formatting
|
|
442
442
|
- **csv**: Saves queue data to a CSV file with a selection of available queue information, or all information using the `--all-fields` flag
|
|
@@ -450,6 +450,12 @@ cloudos queue list --profile my_profile
|
|
|
450
450
|
cloudos queue list --profile my_profile --output-format stdout
|
|
451
451
|
```
|
|
452
452
|
|
|
453
|
+
To exclude system queues and show only workspace queues:
|
|
454
|
+
|
|
455
|
+
```bash
|
|
456
|
+
cloudos queue list --profile my_profile --exclude-system-queues
|
|
457
|
+
```
|
|
458
|
+
|
|
453
459
|
To save all available job queues in JSON format:
|
|
454
460
|
|
|
455
461
|
```bash
|
|
@@ -472,7 +478,7 @@ cloudos queue list --profile my_profile --output-format csv
|
|
|
472
478
|
|
|
473
479
|
**Job queues for platform workflows**
|
|
474
480
|
|
|
475
|
-
Platform workflows (those provided by CloudOS in your workspace as modules) run on separate and specific AWS batch queues. Therefore, CloudOS will automatically assign the valid queue and you should not specify any queue using the `--job-queue` parameter. Any attempt to use this parameter will be ignored. Examples of such platform workflows are "System Tools" and "Data Factory" workflows.
|
|
481
|
+
Platform workflows (those provided by CloudOS in your workspace as modules) run on separate and specific AWS batch queues (system queues). Therefore, CloudOS will automatically assign the valid queue and you should not specify any queue using the `--job-queue` parameter. Any attempt to use this parameter will be ignored. Examples of such platform workflows are "System Tools" and "Data Factory" workflows.
|
|
476
482
|
|
|
477
483
|
|
|
478
484
|
### Workflow
|
|
@@ -886,7 +892,7 @@ You can find specific jobs within your workspace using the filtering options. Fi
|
|
|
886
892
|
- **`--filter-job-id`**: Filter jobs by specific job ID (exact match required)
|
|
887
893
|
- **`--filter-only-mine`**: Show only jobs belonging to the current user
|
|
888
894
|
- **`--filter-owner`**: Show only jobs for the specified owner (exact match required, e.g., "John Doe")
|
|
889
|
-
- **`--filter-queue`**: Filter jobs by queue name (only applies to batch jobs)
|
|
895
|
+
- **`--filter-queue`**: Filter jobs by queue name (works with both regular and system queues; only applies to batch jobs)
|
|
890
896
|
|
|
891
897
|
**Filtering Examples**
|
|
892
898
|
|
|
@@ -401,7 +401,7 @@ Job queues are required for running jobs using AWS batch executor. The available
|
|
|
401
401
|
|
|
402
402
|
#### List Queues
|
|
403
403
|
|
|
404
|
-
This command allows you to view available computational queues and their configurations. You can get a summary of all available
|
|
404
|
+
This command allows you to view available computational queues and their configurations. By default, both regular workspace queues and system queues are displayed. You can get a summary of all available job queues in three different output formats using the `--output-format` option:
|
|
405
405
|
|
|
406
406
|
- **stdout** (default): Displays a rich formatted table directly in the terminal with pagination and visual formatting
|
|
407
407
|
- **csv**: Saves queue data to a CSV file with a selection of available queue information, or all information using the `--all-fields` flag
|
|
@@ -415,6 +415,12 @@ cloudos queue list --profile my_profile
|
|
|
415
415
|
cloudos queue list --profile my_profile --output-format stdout
|
|
416
416
|
```
|
|
417
417
|
|
|
418
|
+
To exclude system queues and show only workspace queues:
|
|
419
|
+
|
|
420
|
+
```bash
|
|
421
|
+
cloudos queue list --profile my_profile --exclude-system-queues
|
|
422
|
+
```
|
|
423
|
+
|
|
418
424
|
To save all available job queues in JSON format:
|
|
419
425
|
|
|
420
426
|
```bash
|
|
@@ -437,7 +443,7 @@ cloudos queue list --profile my_profile --output-format csv
|
|
|
437
443
|
|
|
438
444
|
**Job queues for platform workflows**
|
|
439
445
|
|
|
440
|
-
Platform workflows (those provided by CloudOS in your workspace as modules) run on separate and specific AWS batch queues. Therefore, CloudOS will automatically assign the valid queue and you should not specify any queue using the `--job-queue` parameter. Any attempt to use this parameter will be ignored. Examples of such platform workflows are "System Tools" and "Data Factory" workflows.
|
|
446
|
+
Platform workflows (those provided by CloudOS in your workspace as modules) run on separate and specific AWS batch queues (system queues). Therefore, CloudOS will automatically assign the valid queue and you should not specify any queue using the `--job-queue` parameter. Any attempt to use this parameter will be ignored. Examples of such platform workflows are "System Tools" and "Data Factory" workflows.
|
|
441
447
|
|
|
442
448
|
|
|
443
449
|
### Workflow
|
|
@@ -851,7 +857,7 @@ You can find specific jobs within your workspace using the filtering options. Fi
|
|
|
851
857
|
- **`--filter-job-id`**: Filter jobs by specific job ID (exact match required)
|
|
852
858
|
- **`--filter-only-mine`**: Show only jobs belonging to the current user
|
|
853
859
|
- **`--filter-owner`**: Show only jobs for the specified owner (exact match required, e.g., "John Doe")
|
|
854
|
-
- **`--filter-queue`**: Filter jobs by queue name (only applies to batch jobs)
|
|
860
|
+
- **`--filter-queue`**: Filter jobs by queue name (works with both regular and system queues; only applies to batch jobs)
|
|
855
861
|
|
|
856
862
|
**Filtering Examples**
|
|
857
863
|
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = '2.87.0'
|
|
@@ -1239,9 +1239,7 @@ class Cloudos:
|
|
|
1239
1239
|
if target_job_count != 'all' and len(all_jobs) >= target_job_count:
|
|
1240
1240
|
break
|
|
1241
1241
|
else:
|
|
1242
|
-
|
|
1243
|
-
# When filter_queue is used, continue fetching pages until we have enough filtered results
|
|
1244
|
-
if not filter_queue or len(all_jobs) >= current_page_size:
|
|
1242
|
+
if not filter_queue and len(all_jobs) >= current_page_size:
|
|
1245
1243
|
break
|
|
1246
1244
|
|
|
1247
1245
|
# Check if we reached the last page (fewer jobs than requested page size)
|
|
@@ -1257,6 +1255,18 @@ class Cloudos:
|
|
|
1257
1255
|
if use_pagination_mode and target_job_count != 'all' and isinstance(target_job_count, int) and target_job_count > 0:
|
|
1258
1256
|
all_jobs = all_jobs[:target_job_count]
|
|
1259
1257
|
|
|
1258
|
+
# --- Adjust pagination metadata for client-side filtering ---
|
|
1259
|
+
# When filter_queue is applied, we've fetched multiple API pages and filtered them.
|
|
1260
|
+
# We need to return all filtered jobs so the CLI can handle pagination client-side.
|
|
1261
|
+
if filter_queue and last_pagination_metadata:
|
|
1262
|
+
# Mark this as client-filtered so the CLI knows to handle pagination differently
|
|
1263
|
+
last_pagination_metadata = {
|
|
1264
|
+
'Pagination-Count': len(all_jobs), # Total filtered jobs collected
|
|
1265
|
+
'Pagination-Page': page, # The initial page requested
|
|
1266
|
+
'Pagination-Limit': current_page_size, # Page size
|
|
1267
|
+
'_client_filtered': True # Flag indicating client-side pagination needed
|
|
1268
|
+
}
|
|
1269
|
+
|
|
1260
1270
|
return {'jobs': all_jobs, 'pagination_metadata': last_pagination_metadata}
|
|
1261
1271
|
|
|
1262
1272
|
@staticmethod
|
|
@@ -2,6 +2,11 @@
|
|
|
2
2
|
|
|
3
3
|
import rich_click as click
|
|
4
4
|
import cloudos_cli.jobs.job as jb
|
|
5
|
+
from cloudos_cli.jobs.job import (
|
|
6
|
+
fetch_job_page,
|
|
7
|
+
create_api_pagination_callback,
|
|
8
|
+
create_client_pagination_callback
|
|
9
|
+
)
|
|
5
10
|
from cloudos_cli.clos import Cloudos
|
|
6
11
|
from cloudos_cli.utils.errors import BadRequestException
|
|
7
12
|
from cloudos_cli.utils.resources import ssl_selector
|
|
@@ -1335,7 +1340,13 @@ def list_jobs(ctx,
|
|
|
1335
1340
|
])
|
|
1336
1341
|
if output_format == 'stdout':
|
|
1337
1342
|
# For stdout, always show a user-friendly message
|
|
1338
|
-
|
|
1343
|
+
# Create callback for interactive pagination using helper function
|
|
1344
|
+
fetch_page = create_api_pagination_callback(
|
|
1345
|
+
cl, workspace_id, page_size, archived, verify_ssl,
|
|
1346
|
+
filter_status, filter_job_name, filter_project, filter_workflow,
|
|
1347
|
+
filter_job_id, filter_only_mine, filter_owner, filter_queue, last
|
|
1348
|
+
)
|
|
1349
|
+
create_job_list_table([], cloudos_url, pagination_metadata, selected_columns, fetch_page_callback=fetch_page)
|
|
1339
1350
|
else:
|
|
1340
1351
|
if filters_used:
|
|
1341
1352
|
print('A total of 0 jobs collected.')
|
|
@@ -1347,8 +1358,32 @@ def list_jobs(ctx,
|
|
|
1347
1358
|
'does not exist. Please, try a smaller number for --page or collect all the jobs by not ' +
|
|
1348
1359
|
'using --page parameter.')
|
|
1349
1360
|
elif output_format == 'stdout':
|
|
1350
|
-
# Display as table
|
|
1351
|
-
|
|
1361
|
+
# Display as table with interactive pagination
|
|
1362
|
+
|
|
1363
|
+
# Check if results are client-side filtered (e.g., by queue)
|
|
1364
|
+
is_client_filtered = pagination_metadata and pagination_metadata.get('_client_filtered', False)
|
|
1365
|
+
|
|
1366
|
+
if is_client_filtered:
|
|
1367
|
+
# For client-filtered results, we have all jobs already
|
|
1368
|
+
# Create a callback that paginates them client-side using helper function
|
|
1369
|
+
fetch_page = create_client_pagination_callback(my_jobs_r, page_size)
|
|
1370
|
+
|
|
1371
|
+
# Show first page of filtered results
|
|
1372
|
+
first_page_jobs = my_jobs_r[:page_size]
|
|
1373
|
+
first_page_metadata = {
|
|
1374
|
+
'Pagination-Count': len(my_jobs_r),
|
|
1375
|
+
'Pagination-Page': 1,
|
|
1376
|
+
'Pagination-Limit': page_size
|
|
1377
|
+
}
|
|
1378
|
+
create_job_list_table(first_page_jobs, cloudos_url, first_page_metadata, selected_columns, fetch_page_callback=fetch_page)
|
|
1379
|
+
else:
|
|
1380
|
+
# For normal (non-filtered) results, use API pagination with helper function
|
|
1381
|
+
fetch_page = create_api_pagination_callback(
|
|
1382
|
+
cl, workspace_id, page_size, archived, verify_ssl,
|
|
1383
|
+
filter_status, filter_job_name, filter_project, filter_workflow,
|
|
1384
|
+
filter_job_id, filter_only_mine, filter_owner, filter_queue, last
|
|
1385
|
+
)
|
|
1386
|
+
create_job_list_table(my_jobs_r, cloudos_url, pagination_metadata, selected_columns, fetch_page_callback=fetch_page)
|
|
1352
1387
|
elif output_format == 'csv':
|
|
1353
1388
|
my_jobs = cl.process_job_list(my_jobs_r, all_fields)
|
|
1354
1389
|
cl.save_job_list_to_csv(my_jobs, outfile)
|
|
@@ -1727,3 +1727,154 @@ class Job(Cloudos):
|
|
|
1727
1727
|
|
|
1728
1728
|
return {"branches": all_branches, "total": total or len(all_branches)}
|
|
1729
1729
|
|
|
1730
|
+
|
|
1731
|
+
def fetch_job_page(cl, workspace_id, page_num, page_size, last_n_jobs, archived, verify_ssl,
|
|
1732
|
+
filter_status, filter_job_name, filter_project, filter_workflow,
|
|
1733
|
+
filter_job_id, filter_only_mine, filter_owner, filter_queue, last):
|
|
1734
|
+
"""Helper function to fetch a specific page of jobs.
|
|
1735
|
+
Parameters
|
|
1736
|
+
----------
|
|
1737
|
+
cl : Cloudos
|
|
1738
|
+
CloudOS API client instance
|
|
1739
|
+
workspace_id : str
|
|
1740
|
+
The CloudOS workspace ID
|
|
1741
|
+
page_num : int
|
|
1742
|
+
Page number to fetch (1-indexed)
|
|
1743
|
+
page_size : int
|
|
1744
|
+
Number of jobs per page
|
|
1745
|
+
last_n_jobs : int or None
|
|
1746
|
+
Last N jobs parameter (should be None for pagination mode)
|
|
1747
|
+
archived : bool
|
|
1748
|
+
Whether to include archived jobs
|
|
1749
|
+
verify_ssl : bool or str
|
|
1750
|
+
SSL verification setting
|
|
1751
|
+
filter_status : str or None
|
|
1752
|
+
Status filter
|
|
1753
|
+
filter_job_name : str or None
|
|
1754
|
+
Job name filter
|
|
1755
|
+
filter_project : str or None
|
|
1756
|
+
Project filter
|
|
1757
|
+
filter_workflow : str or None
|
|
1758
|
+
Workflow filter
|
|
1759
|
+
filter_job_id : str or None
|
|
1760
|
+
Job ID filter
|
|
1761
|
+
filter_only_mine : bool
|
|
1762
|
+
Filter for user's own jobs
|
|
1763
|
+
filter_owner : str or None
|
|
1764
|
+
Owner filter
|
|
1765
|
+
filter_queue : str or None
|
|
1766
|
+
Queue filter
|
|
1767
|
+
last : bool
|
|
1768
|
+
Use latest workflow for duplicates
|
|
1769
|
+
Returns
|
|
1770
|
+
-------
|
|
1771
|
+
dict
|
|
1772
|
+
Dictionary with 'jobs' list and 'pagination_metadata' dict
|
|
1773
|
+
"""
|
|
1774
|
+
result = cl.get_job_list(
|
|
1775
|
+
workspace_id,
|
|
1776
|
+
last_n_jobs=last_n_jobs,
|
|
1777
|
+
page=page_num,
|
|
1778
|
+
page_size=page_size,
|
|
1779
|
+
archived=archived,
|
|
1780
|
+
verify=verify_ssl,
|
|
1781
|
+
filter_status=filter_status,
|
|
1782
|
+
filter_job_name=filter_job_name,
|
|
1783
|
+
filter_project=filter_project,
|
|
1784
|
+
filter_workflow=filter_workflow,
|
|
1785
|
+
filter_job_id=filter_job_id,
|
|
1786
|
+
filter_only_mine=filter_only_mine,
|
|
1787
|
+
filter_owner=filter_owner,
|
|
1788
|
+
filter_queue=filter_queue,
|
|
1789
|
+
last=last
|
|
1790
|
+
)
|
|
1791
|
+
return result
|
|
1792
|
+
|
|
1793
|
+
|
|
1794
|
+
def create_api_pagination_callback(cl, workspace_id, page_size, archived, verify_ssl,
|
|
1795
|
+
filter_status, filter_job_name, filter_project, filter_workflow,
|
|
1796
|
+
filter_job_id, filter_only_mine, filter_owner, filter_queue, last):
|
|
1797
|
+
"""Create a pagination callback that fetches pages from the API.
|
|
1798
|
+
|
|
1799
|
+
Parameters
|
|
1800
|
+
----------
|
|
1801
|
+
cl : Cloudos
|
|
1802
|
+
CloudOS API client instance
|
|
1803
|
+
workspace_id : str
|
|
1804
|
+
The CloudOS workspace ID
|
|
1805
|
+
page_size : int
|
|
1806
|
+
Number of jobs per page
|
|
1807
|
+
archived : bool
|
|
1808
|
+
Whether to include archived jobs
|
|
1809
|
+
verify_ssl : bool or str
|
|
1810
|
+
SSL verification setting
|
|
1811
|
+
filter_status : str or None
|
|
1812
|
+
Status filter
|
|
1813
|
+
filter_job_name : str or None
|
|
1814
|
+
Job name filter
|
|
1815
|
+
filter_project : str or None
|
|
1816
|
+
Project filter
|
|
1817
|
+
filter_workflow : str or None
|
|
1818
|
+
Workflow filter
|
|
1819
|
+
filter_job_id : str or None
|
|
1820
|
+
Job ID filter
|
|
1821
|
+
filter_only_mine : bool
|
|
1822
|
+
Filter for user's own jobs
|
|
1823
|
+
filter_owner : str or None
|
|
1824
|
+
Owner filter
|
|
1825
|
+
filter_queue : str or None
|
|
1826
|
+
Queue filter
|
|
1827
|
+
last : bool
|
|
1828
|
+
Use latest workflow for duplicates
|
|
1829
|
+
|
|
1830
|
+
Returns
|
|
1831
|
+
-------
|
|
1832
|
+
callable
|
|
1833
|
+
Callback function that takes page_num and returns job page data
|
|
1834
|
+
"""
|
|
1835
|
+
def api_fetch_callback(page_num):
|
|
1836
|
+
return fetch_job_page(
|
|
1837
|
+
cl, workspace_id, page_num, page_size, None, archived, verify_ssl,
|
|
1838
|
+
filter_status, filter_job_name, filter_project, filter_workflow,
|
|
1839
|
+
filter_job_id, filter_only_mine, filter_owner, filter_queue, last
|
|
1840
|
+
)
|
|
1841
|
+
return api_fetch_callback
|
|
1842
|
+
|
|
1843
|
+
|
|
1844
|
+
def create_client_pagination_callback(all_jobs, page_size):
|
|
1845
|
+
"""Create a pagination callback that paginates already-fetched jobs client-side.
|
|
1846
|
+
|
|
1847
|
+
Used when jobs have been pre-filtered client-side (e.g., by queue) and we
|
|
1848
|
+
want to paginate through the cached results.
|
|
1849
|
+
|
|
1850
|
+
Parameters
|
|
1851
|
+
----------
|
|
1852
|
+
all_jobs : list
|
|
1853
|
+
List of all jobs to paginate through
|
|
1854
|
+
page_size : int
|
|
1855
|
+
Number of jobs per page
|
|
1856
|
+
|
|
1857
|
+
Returns
|
|
1858
|
+
-------
|
|
1859
|
+
callable
|
|
1860
|
+
Callback function that takes page_num and returns job page data
|
|
1861
|
+
"""
|
|
1862
|
+
def client_fetch_callback(page_num):
|
|
1863
|
+
"""Paginate client-side filtered results"""
|
|
1864
|
+
start_idx = (page_num - 1) * page_size
|
|
1865
|
+
end_idx = start_idx + page_size
|
|
1866
|
+
page_jobs = all_jobs[start_idx:end_idx]
|
|
1867
|
+
|
|
1868
|
+
# Return with updated pagination metadata
|
|
1869
|
+
return {
|
|
1870
|
+
'jobs': page_jobs,
|
|
1871
|
+
'pagination_metadata': {
|
|
1872
|
+
'Pagination-Count': len(all_jobs),
|
|
1873
|
+
'Pagination-Page': page_num,
|
|
1874
|
+
'Pagination-Limit': page_size,
|
|
1875
|
+
'_client_filtered': True
|
|
1876
|
+
}
|
|
1877
|
+
}
|
|
1878
|
+
return client_fetch_callback
|
|
1879
|
+
|
|
1880
|
+
|
|
@@ -47,6 +47,9 @@ def queue():
|
|
|
47
47
|
'just the preconfigured selected fields. Only applicable ' +
|
|
48
48
|
'when --output-format=csv'),
|
|
49
49
|
is_flag=True)
|
|
50
|
+
@click.option('--exclude-system-queues',
|
|
51
|
+
help='Exclude system job queues from the list.',
|
|
52
|
+
is_flag=True)
|
|
50
53
|
@click.option('--disable-ssl-verification',
|
|
51
54
|
help=('Disable SSL certificate verification. Please, remember that this option is ' +
|
|
52
55
|
'not generally recommended for security reasons.'),
|
|
@@ -63,6 +66,7 @@ def list_queues(ctx,
|
|
|
63
66
|
output_basename,
|
|
64
67
|
output_format,
|
|
65
68
|
all_fields,
|
|
69
|
+
exclude_system_queues,
|
|
66
70
|
disable_ssl_verification,
|
|
67
71
|
ssl_cert,
|
|
68
72
|
profile):
|
|
@@ -72,7 +76,7 @@ def list_queues(ctx,
|
|
|
72
76
|
verify_ssl = ssl_selector(disable_ssl_verification, ssl_cert)
|
|
73
77
|
print('Executing list...')
|
|
74
78
|
j_queue = Queue(cloudos_url, apikey, None, workspace_id, verify=verify_ssl)
|
|
75
|
-
my_queues = j_queue.get_job_queues()
|
|
79
|
+
my_queues = j_queue.get_job_queues(exclude_system_queues=exclude_system_queues)
|
|
76
80
|
if len(my_queues) == 0:
|
|
77
81
|
raise ValueError('No AWS batch queues found. Please, make sure that your CloudOS supports AWS batch queues')
|
|
78
82
|
if output_format == 'stdout':
|
|
@@ -33,9 +33,14 @@ class Queue(Cloudos):
|
|
|
33
33
|
workspace_id: str
|
|
34
34
|
verify: Union[bool, str] = True
|
|
35
35
|
|
|
36
|
-
def get_job_queues(self):
|
|
36
|
+
def get_job_queues(self, exclude_system_queues=False):
|
|
37
37
|
"""Get all the job queues from a CloudOS workspace.
|
|
38
38
|
|
|
39
|
+
Parameters
|
|
40
|
+
----------
|
|
41
|
+
exclude_system_queues : bool, default=False
|
|
42
|
+
Whether to exclude system job queues from the result.
|
|
43
|
+
|
|
39
44
|
Returns
|
|
40
45
|
-------
|
|
41
46
|
r : list
|
|
@@ -47,6 +52,28 @@ class Queue(Cloudos):
|
|
|
47
52
|
headers=headers, verify=self.verify)
|
|
48
53
|
if r.status_code >= 400:
|
|
49
54
|
raise BadRequestException(r)
|
|
55
|
+
queues = json.loads(r.content)
|
|
56
|
+
# By default, include system queues unless excluded
|
|
57
|
+
if not exclude_system_queues:
|
|
58
|
+
system_queues = self.get_system_job_queues()
|
|
59
|
+
queues.extend(system_queues)
|
|
60
|
+
|
|
61
|
+
return queues
|
|
62
|
+
|
|
63
|
+
def get_system_job_queues(self):
|
|
64
|
+
"""Get all the system job queues from CloudOS.
|
|
65
|
+
|
|
66
|
+
Returns
|
|
67
|
+
-------
|
|
68
|
+
r : list
|
|
69
|
+
A list of dicts, each corresponding to a system job queue.
|
|
70
|
+
"""
|
|
71
|
+
headers = {"apikey": self.apikey}
|
|
72
|
+
r = requests.get("{}/api/v1/teams/aws/v2/system-job-queues?teamId={}".format(self.cloudos_url,
|
|
73
|
+
self.workspace_id),
|
|
74
|
+
headers=headers, verify=self.verify)
|
|
75
|
+
if r.status_code >= 400:
|
|
76
|
+
raise BadRequestException(r)
|
|
50
77
|
return json.loads(r.content)
|
|
51
78
|
|
|
52
79
|
@staticmethod
|
|
@@ -118,7 +145,7 @@ class Queue(Cloudos):
|
|
|
118
145
|
if len(available_queues) == 0:
|
|
119
146
|
raise Exception(f'There are no available job queues for {workflow_type} ' +
|
|
120
147
|
'workflows. Consider creating one using CloudOS UI.')
|
|
121
|
-
default_queue = [q for q in available_queues if q
|
|
148
|
+
default_queue = [q for q in available_queues if q.get('isDefault', False)]
|
|
122
149
|
if len(default_queue) > 0:
|
|
123
150
|
default_queue_id = default_queue[0]['id']
|
|
124
151
|
default_queue_name = default_queue[0]['label']
|
|
@@ -337,9 +337,217 @@ def create_job_details(j_details_h, job_id, output_format, output_basename, para
|
|
|
337
337
|
print(f"\tJob details have been saved to '{output_basename}.csv'")
|
|
338
338
|
|
|
339
339
|
|
|
340
|
-
def
|
|
340
|
+
def _build_job_row_values(job, cloudos_url, terminal_width, columns_to_show):
|
|
341
|
+
"""Helper function to build row values for a single job.
|
|
342
|
+
|
|
343
|
+
Parameters
|
|
344
|
+
----------
|
|
345
|
+
job : dict
|
|
346
|
+
Job dictionary from CloudOS API
|
|
347
|
+
cloudos_url : str
|
|
348
|
+
CloudOS service URL for generating job links
|
|
349
|
+
terminal_width : int
|
|
350
|
+
Current terminal width for responsive formatting
|
|
351
|
+
columns_to_show : list
|
|
352
|
+
List of column keys to include
|
|
353
|
+
|
|
354
|
+
Returns
|
|
355
|
+
-------
|
|
356
|
+
list
|
|
357
|
+
Row values in the order of columns_to_show
|
|
358
|
+
"""
|
|
359
|
+
# Status with colored and bold ANSI symbols
|
|
360
|
+
status_raw = str(job.get("status", "N/A"))
|
|
361
|
+
status_symbol_map = {
|
|
362
|
+
"completed": "[bold green]✓[/bold green]",
|
|
363
|
+
"running": "[bold bright_black]◐[/bold bright_black]",
|
|
364
|
+
"failed": "[bold red]✗[/bold red]",
|
|
365
|
+
"aborted": "[bold orange3]■[/bold orange3]",
|
|
366
|
+
"initialising": "[bold bright_black]○[/bold bright_black]",
|
|
367
|
+
"N/A": "[bold bright_black]?[/bold bright_black]"
|
|
368
|
+
}
|
|
369
|
+
status = status_symbol_map.get(status_raw.lower(), status_raw)
|
|
370
|
+
|
|
371
|
+
# Name
|
|
372
|
+
name = str(job.get("name", "N/A"))
|
|
373
|
+
|
|
374
|
+
# Project
|
|
375
|
+
project = str(job.get("project", {}).get("name", "N/A"))
|
|
376
|
+
|
|
377
|
+
# Owner (compact format for small terminals)
|
|
378
|
+
user_info = job.get("user", {})
|
|
379
|
+
name_part = user_info.get('name', '')
|
|
380
|
+
surname_part = user_info.get('surname', '')
|
|
381
|
+
if terminal_width < 90:
|
|
382
|
+
if name_part and surname_part:
|
|
383
|
+
owner = f"{name_part[0]}.{surname_part[0]}."
|
|
384
|
+
elif name_part or surname_part:
|
|
385
|
+
owner = (name_part or surname_part)[:8]
|
|
386
|
+
else:
|
|
387
|
+
owner = "N/A"
|
|
388
|
+
else:
|
|
389
|
+
if name_part and surname_part:
|
|
390
|
+
owner = f"{name_part}\n{surname_part}"
|
|
391
|
+
elif name_part or surname_part:
|
|
392
|
+
owner = name_part or surname_part
|
|
393
|
+
else:
|
|
394
|
+
owner = "N/A"
|
|
395
|
+
|
|
396
|
+
# Pipeline
|
|
397
|
+
pipeline = str(job.get("workflow", {}).get("name", "N/A")).split('\n')[0].strip()
|
|
398
|
+
if len(pipeline) > 25:
|
|
399
|
+
pipeline = pipeline[:22] + "..."
|
|
400
|
+
|
|
401
|
+
# ID with hyperlink
|
|
402
|
+
job_id = str(job.get("_id", "N/A"))
|
|
403
|
+
job_url = f"{cloudos_url}/app/advanced-analytics/analyses/{job_id}"
|
|
404
|
+
job_id_with_link = f"[link={job_url}]{job_id}[/link]"
|
|
405
|
+
|
|
406
|
+
# Submit time (compact format for small terminals)
|
|
407
|
+
created_at = job.get("createdAt")
|
|
408
|
+
if created_at:
|
|
409
|
+
try:
|
|
410
|
+
dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
|
|
411
|
+
submit_time = dt.strftime('%m-%d\n%H:%M') if terminal_width < 90 else dt.strftime('%Y-%m-%d\n%H:%M:%S')
|
|
412
|
+
except (ValueError, TypeError):
|
|
413
|
+
submit_time = "N/A"
|
|
414
|
+
else:
|
|
415
|
+
submit_time = "N/A"
|
|
416
|
+
|
|
417
|
+
# End time (compact format for small terminals)
|
|
418
|
+
end_time_raw = job.get("endTime")
|
|
419
|
+
if end_time_raw:
|
|
420
|
+
try:
|
|
421
|
+
dt = datetime.fromisoformat(end_time_raw.replace('Z', '+00:00'))
|
|
422
|
+
end_time = dt.strftime('%m-%d\n%H:%M') if terminal_width < 90 else dt.strftime('%Y-%m-%d\n%H:%M:%S')
|
|
423
|
+
except (ValueError, TypeError):
|
|
424
|
+
end_time = "N/A"
|
|
425
|
+
else:
|
|
426
|
+
end_time = "N/A"
|
|
427
|
+
|
|
428
|
+
# Run time (calculate from startTime and endTime)
|
|
429
|
+
start_time_raw = job.get("startTime")
|
|
430
|
+
if start_time_raw and end_time_raw:
|
|
431
|
+
try:
|
|
432
|
+
start_dt = datetime.fromisoformat(start_time_raw.replace('Z', '+00:00'))
|
|
433
|
+
end_dt = datetime.fromisoformat(end_time_raw.replace('Z', '+00:00'))
|
|
434
|
+
duration = end_dt - start_dt
|
|
435
|
+
total_seconds = int(duration.total_seconds())
|
|
436
|
+
hours = total_seconds // 3600
|
|
437
|
+
minutes = (total_seconds % 3600) // 60
|
|
438
|
+
seconds = total_seconds % 60
|
|
439
|
+
if hours > 0:
|
|
440
|
+
run_time = f"{hours}h {minutes}m {seconds}s"
|
|
441
|
+
elif minutes > 0:
|
|
442
|
+
run_time = f"{minutes}m {seconds}s"
|
|
443
|
+
else:
|
|
444
|
+
run_time = f"{seconds}s"
|
|
445
|
+
except (ValueError, TypeError):
|
|
446
|
+
run_time = "N/A"
|
|
447
|
+
else:
|
|
448
|
+
run_time = "N/A"
|
|
449
|
+
|
|
450
|
+
# Commit
|
|
451
|
+
revision = job.get("revision", {})
|
|
452
|
+
if job.get("jobType") == "dockerAWS":
|
|
453
|
+
commit = str(revision.get("digest", "N/A"))
|
|
454
|
+
else:
|
|
455
|
+
commit = str(revision.get("commit", "N/A"))
|
|
456
|
+
if commit != "N/A" and len(commit) > 7:
|
|
457
|
+
commit = commit[:7]
|
|
458
|
+
|
|
459
|
+
# Cost
|
|
460
|
+
cost_raw = job.get("computeCostSpent") or job.get("realInstancesExecutionCost")
|
|
461
|
+
if cost_raw is not None:
|
|
462
|
+
try:
|
|
463
|
+
cost = f"${float(cost_raw) / 100:.4f}"
|
|
464
|
+
except (ValueError, TypeError):
|
|
465
|
+
cost = "N/A"
|
|
466
|
+
else:
|
|
467
|
+
cost = "N/A"
|
|
468
|
+
|
|
469
|
+
# Resources (instance type only)
|
|
470
|
+
master_instance = job.get("masterInstance", {})
|
|
471
|
+
used_instance = master_instance.get("usedInstance", {})
|
|
472
|
+
instance_type = used_instance.get("type", "N/A")
|
|
473
|
+
resources = instance_type if instance_type else "N/A"
|
|
474
|
+
|
|
475
|
+
# Storage type
|
|
476
|
+
storage_mode = job.get("storageMode", "N/A")
|
|
477
|
+
if storage_mode == "regular":
|
|
478
|
+
storage_type = "Regular"
|
|
479
|
+
elif storage_mode == "lustre":
|
|
480
|
+
storage_type = "Lustre"
|
|
481
|
+
else:
|
|
482
|
+
storage_type = str(storage_mode).capitalize() if storage_mode != "N/A" else "N/A"
|
|
483
|
+
|
|
484
|
+
# Map column keys to their values
|
|
485
|
+
column_values = {
|
|
486
|
+
'status': status,
|
|
487
|
+
'name': name,
|
|
488
|
+
'project': project,
|
|
489
|
+
'owner': owner,
|
|
490
|
+
'pipeline': pipeline,
|
|
491
|
+
'id': job_id_with_link,
|
|
492
|
+
'submit_time': submit_time,
|
|
493
|
+
'end_time': end_time,
|
|
494
|
+
'run_time': run_time,
|
|
495
|
+
'commit': commit,
|
|
496
|
+
'cost': cost,
|
|
497
|
+
'resources': resources,
|
|
498
|
+
'storage_type': storage_type
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
# Return row values in the order of columns_to_show
|
|
502
|
+
return [column_values[col] for col in columns_to_show]
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
def _build_job_table(jobs, cloudos_url, terminal_width, columns_to_show, all_columns):
|
|
506
|
+
"""Helper function to build a complete job table.
|
|
507
|
+
|
|
508
|
+
Parameters
|
|
509
|
+
----------
|
|
510
|
+
jobs : list
|
|
511
|
+
List of job dictionaries from CloudOS API
|
|
512
|
+
cloudos_url : str
|
|
513
|
+
CloudOS service URL for generating job links
|
|
514
|
+
terminal_width : int
|
|
515
|
+
Current terminal width for responsive formatting
|
|
516
|
+
columns_to_show : list
|
|
517
|
+
List of column keys to include
|
|
518
|
+
all_columns : dict
|
|
519
|
+
Dictionary of all column configurations
|
|
520
|
+
|
|
521
|
+
Returns
|
|
522
|
+
-------
|
|
523
|
+
Table
|
|
524
|
+
Rich Table object populated with job rows
|
|
341
525
|
"""
|
|
342
|
-
|
|
526
|
+
table = Table(title="Job List")
|
|
527
|
+
|
|
528
|
+
# Add columns to table
|
|
529
|
+
for col_key in columns_to_show:
|
|
530
|
+
col_config = all_columns[col_key]
|
|
531
|
+
table.add_column(
|
|
532
|
+
col_config["header"],
|
|
533
|
+
style=col_config.get("style"),
|
|
534
|
+
no_wrap=col_config.get("no_wrap", False),
|
|
535
|
+
overflow=col_config.get("overflow"),
|
|
536
|
+
min_width=col_config.get("min_width"),
|
|
537
|
+
max_width=col_config.get("max_width")
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
# Add rows for each job
|
|
541
|
+
for job in jobs:
|
|
542
|
+
row_values = _build_job_row_values(job, cloudos_url, terminal_width, columns_to_show)
|
|
543
|
+
table.add_row(*row_values)
|
|
544
|
+
|
|
545
|
+
return table
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
def create_job_list_table(jobs, cloudos_url, pagination_metadata=None, selected_columns=None, fetch_page_callback=None):
|
|
549
|
+
"""
|
|
550
|
+
Creates a formatted job list table for stdout output with responsive design and interactive pagination.
|
|
343
551
|
|
|
344
552
|
The table automatically adapts to terminal width by showing different column sets:
|
|
345
553
|
- Very narrow (<60 chars): Essential columns only (status, name, pipeline, id)
|
|
@@ -373,6 +581,10 @@ def create_job_list_table(jobs, cloudos_url, pagination_metadata=None, selected_
|
|
|
373
581
|
- List: List of column names
|
|
374
582
|
Valid columns: 'status', 'name', 'project', 'owner', 'pipeline', 'id',
|
|
375
583
|
'submit_time', 'end_time', 'run_time', 'commit', 'cost', 'resources', 'storage_type'
|
|
584
|
+
fetch_page_callback : callable, optional
|
|
585
|
+
Callback function to fetch a specific page of results for interactive pagination.
|
|
586
|
+
Should accept page number (1-indexed) and return dict with 'jobs' and 'pagination_metadata' keys.
|
|
587
|
+
If provided, enables interactive navigation (n=next, p=previous, q=quit).
|
|
376
588
|
|
|
377
589
|
Returns
|
|
378
590
|
-------
|
|
@@ -446,207 +658,109 @@ def create_job_list_table(jobs, cloudos_url, pagination_metadata=None, selected_
|
|
|
446
658
|
|
|
447
659
|
if not jobs:
|
|
448
660
|
console.print("\n[yellow]No jobs found matching the criteria.[/yellow]")
|
|
449
|
-
|
|
661
|
+
return
|
|
662
|
+
|
|
663
|
+
# Create table using helper function
|
|
664
|
+
table = _build_job_table(jobs, cloudos_url, terminal_width, columns_to_show, all_columns)
|
|
665
|
+
|
|
666
|
+
# If no fetch_page_callback, display static table
|
|
667
|
+
if not fetch_page_callback or not pagination_metadata:
|
|
668
|
+
console.print(table)
|
|
669
|
+
|
|
670
|
+
# Display pagination info at the bottom
|
|
450
671
|
if pagination_metadata:
|
|
451
672
|
total_jobs = pagination_metadata.get('Pagination-Count', 0)
|
|
452
673
|
current_page = pagination_metadata.get('Pagination-Page', 1)
|
|
453
674
|
page_size = pagination_metadata.get('Pagination-Limit', 10)
|
|
454
675
|
total_pages = (total_jobs + page_size - 1) // page_size if total_jobs > 0 else 1
|
|
455
676
|
|
|
456
|
-
console.print(f"\n[cyan]
|
|
457
|
-
console.print(f"[cyan]Page:[/cyan] {current_page} of {total_pages}")
|
|
458
|
-
console.print(f"[cyan]Jobs on this page:[/cyan] {len(jobs)}")
|
|
677
|
+
console.print(f"\n[cyan]Showing {len(jobs)} of {total_jobs} total jobs | Page {current_page} of {total_pages}[/cyan]")
|
|
459
678
|
return
|
|
460
679
|
|
|
461
|
-
#
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
# Owner (compact format for small terminals)
|
|
497
|
-
user_info = job.get("user", {})
|
|
498
|
-
name_part = user_info.get('name', '')
|
|
499
|
-
surname_part = user_info.get('surname', '')
|
|
500
|
-
if terminal_width < 90:
|
|
501
|
-
# Compact format: just first name or first letter of each
|
|
502
|
-
if name_part and surname_part:
|
|
503
|
-
owner = f"{name_part[0]}.{surname_part[0]}."
|
|
504
|
-
elif name_part or surname_part:
|
|
505
|
-
owner = (name_part or surname_part)[:8]
|
|
506
|
-
else:
|
|
507
|
-
owner = "N/A"
|
|
508
|
-
else:
|
|
509
|
-
# Full format for wider terminals
|
|
510
|
-
if name_part and surname_part:
|
|
511
|
-
owner = f"{name_part}\n{surname_part}"
|
|
512
|
-
elif name_part or surname_part:
|
|
513
|
-
owner = name_part or surname_part
|
|
514
|
-
else:
|
|
515
|
-
owner = "N/A"
|
|
516
|
-
|
|
517
|
-
# Pipeline
|
|
518
|
-
pipeline = str(job.get("workflow", {}).get("name", "N/A"))
|
|
519
|
-
# Only show the first line if pipeline name contains newlines
|
|
520
|
-
pipeline = pipeline.split('\n')[0].strip()
|
|
521
|
-
# Truncate to 25 chars with ellipsis if longer
|
|
522
|
-
if len(pipeline) > 25:
|
|
523
|
-
pipeline = pipeline[:22] + "..."
|
|
524
|
-
|
|
525
|
-
# ID with hyperlink
|
|
526
|
-
job_id = str(job.get("_id", "N/A"))
|
|
527
|
-
job_url = f"{cloudos_url}/app/advanced-analytics/analyses/{job_id}"
|
|
528
|
-
job_id_with_link = f"[link={job_url}]{job_id}[/link]"
|
|
529
|
-
|
|
530
|
-
# Submit time (compact format for small terminals)
|
|
531
|
-
created_at = job.get("createdAt")
|
|
532
|
-
if created_at:
|
|
533
|
-
try:
|
|
534
|
-
dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
|
|
535
|
-
if terminal_width < 90:
|
|
536
|
-
# Compact format: MM-DD HH:MM
|
|
537
|
-
submit_time = dt.strftime('%m-%d\n%H:%M')
|
|
538
|
-
else:
|
|
539
|
-
# Full format
|
|
540
|
-
submit_time = dt.strftime('%Y-%m-%d\n%H:%M:%S')
|
|
541
|
-
except (ValueError, TypeError):
|
|
542
|
-
submit_time = "N/A"
|
|
543
|
-
else:
|
|
544
|
-
submit_time = "N/A"
|
|
545
|
-
|
|
546
|
-
# End time (compact format for small terminals)
|
|
547
|
-
end_time_raw = job.get("endTime")
|
|
548
|
-
if end_time_raw:
|
|
680
|
+
# Interactive pagination mode
|
|
681
|
+
current_page = pagination_metadata.get('Pagination-Page', 1) or 1 # Ensure never None
|
|
682
|
+
total_jobs = pagination_metadata.get('Pagination-Count', 0)
|
|
683
|
+
page_size_value = pagination_metadata.get('Pagination-Limit', 10)
|
|
684
|
+
total_pages = (total_jobs + page_size_value - 1) // page_size_value if total_jobs > 0 else 1
|
|
685
|
+
|
|
686
|
+
show_error = None
|
|
687
|
+
|
|
688
|
+
while True:
|
|
689
|
+
# Clear console and display table
|
|
690
|
+
console.clear()
|
|
691
|
+
console.print(table)
|
|
692
|
+
|
|
693
|
+
# Display pagination info
|
|
694
|
+
console.print(f"\n[cyan]Total jobs:[/cyan] {total_jobs}")
|
|
695
|
+
if total_pages > 1:
|
|
696
|
+
console.print(f"[cyan]Page:[/cyan] {current_page} of {total_pages}")
|
|
697
|
+
console.print(f"[cyan]Jobs on this page:[/cyan] {len(jobs)}")
|
|
698
|
+
|
|
699
|
+
# Show error message if any
|
|
700
|
+
if show_error:
|
|
701
|
+
console.print(show_error)
|
|
702
|
+
show_error = None
|
|
703
|
+
|
|
704
|
+
# Show pagination controls only if there are multiple pages
|
|
705
|
+
if total_pages > 1:
|
|
706
|
+
# Check if we're in an interactive environment
|
|
707
|
+
if not sys.stdin.isatty():
|
|
708
|
+
console.print("\n[yellow]Note: Pagination not available in non-interactive mode. Showing page 1 of {0}.[/yellow]".format(total_pages))
|
|
709
|
+
console.print("[yellow]Run in an interactive terminal to navigate through all pages.[/yellow]")
|
|
710
|
+
break
|
|
711
|
+
|
|
712
|
+
console.print(f"\n[bold cyan]n[/] = next, [bold cyan]p[/] = prev, [bold cyan]q[/] = quit")
|
|
713
|
+
|
|
714
|
+
# Get user input for navigation
|
|
549
715
|
try:
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
716
|
+
choice = input(">>> ").strip().lower()
|
|
717
|
+
except (EOFError, KeyboardInterrupt):
|
|
718
|
+
console.print("\n[yellow]Pagination interrupted.[/yellow]")
|
|
719
|
+
break
|
|
720
|
+
|
|
721
|
+
if choice in ("q", "quit"):
|
|
722
|
+
break
|
|
723
|
+
elif choice in ("n", "next"):
|
|
724
|
+
if current_page < total_pages:
|
|
725
|
+
try:
|
|
726
|
+
result = fetch_page_callback(current_page + 1)
|
|
727
|
+
jobs = result.get('jobs', [])
|
|
728
|
+
pagination_metadata = result.get('pagination_metadata', {})
|
|
729
|
+
current_page = pagination_metadata.get('Pagination-Page', current_page + 1)
|
|
730
|
+
total_pages = pagination_metadata.get('totalPages',
|
|
731
|
+
(pagination_metadata.get('Pagination-Count', 0) + page_size_value - 1) // page_size_value
|
|
732
|
+
if pagination_metadata.get('Pagination-Count', 0) > 0 else 1)
|
|
733
|
+
|
|
734
|
+
# Rebuild table with new jobs using helper function
|
|
735
|
+
table = _build_job_table(jobs, cloudos_url, terminal_width, columns_to_show, all_columns)
|
|
736
|
+
|
|
737
|
+
except Exception as e:
|
|
738
|
+
show_error = f"[red]Error fetching page: {str(e)}[/red]"
|
|
554
739
|
else:
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
seconds = total_seconds % 60
|
|
573
|
-
if hours > 0:
|
|
574
|
-
run_time = f"{hours}h {minutes}m {seconds}s"
|
|
575
|
-
elif minutes > 0:
|
|
576
|
-
run_time = f"{minutes}m {seconds}s"
|
|
740
|
+
show_error = "[yellow]Already on last page[/yellow]"
|
|
741
|
+
elif choice in ("p", "prev", "previous"):
|
|
742
|
+
if current_page > 1:
|
|
743
|
+
try:
|
|
744
|
+
result = fetch_page_callback(current_page - 1)
|
|
745
|
+
jobs = result.get('jobs', [])
|
|
746
|
+
pagination_metadata = result.get('pagination_metadata', {})
|
|
747
|
+
current_page = pagination_metadata.get('Pagination-Page', current_page - 1)
|
|
748
|
+
total_pages = pagination_metadata.get('totalPages',
|
|
749
|
+
(pagination_metadata.get('Pagination-Count', 0) + page_size_value - 1) // page_size_value
|
|
750
|
+
if pagination_metadata.get('Pagination-Count', 0) > 0 else 1)
|
|
751
|
+
|
|
752
|
+
# Rebuild table with new jobs using helper function
|
|
753
|
+
table = _build_job_table(jobs, cloudos_url, terminal_width, columns_to_show, all_columns)
|
|
754
|
+
|
|
755
|
+
except Exception as e:
|
|
756
|
+
show_error = f"[red]Error fetching page: {str(e)}[/red]"
|
|
577
757
|
else:
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
else:
|
|
582
|
-
run_time = "N/A"
|
|
583
|
-
|
|
584
|
-
# Commit
|
|
585
|
-
revision = job.get("revision", {})
|
|
586
|
-
if job.get("jobType") == "dockerAWS":
|
|
587
|
-
commit = str(revision.get("digest", "N/A"))
|
|
588
|
-
else:
|
|
589
|
-
commit = str(revision.get("commit", "N/A"))
|
|
590
|
-
# Truncate commit to 7 characters if it's longer
|
|
591
|
-
if commit != "N/A" and len(commit) > 7:
|
|
592
|
-
commit = commit[:7]
|
|
593
|
-
|
|
594
|
-
# Cost
|
|
595
|
-
cost_raw = job.get("computeCostSpent") or job.get("realInstancesExecutionCost")
|
|
596
|
-
if cost_raw is not None:
|
|
597
|
-
try:
|
|
598
|
-
cost = f"${float(cost_raw) / 100:.4f}"
|
|
599
|
-
except (ValueError, TypeError):
|
|
600
|
-
cost = "N/A"
|
|
601
|
-
else:
|
|
602
|
-
cost = "N/A"
|
|
603
|
-
|
|
604
|
-
# Resources (instance type only)
|
|
605
|
-
master_instance = job.get("masterInstance", {})
|
|
606
|
-
used_instance = master_instance.get("usedInstance", {})
|
|
607
|
-
instance_type = used_instance.get("type", "N/A")
|
|
608
|
-
resources = instance_type if instance_type else "N/A"
|
|
609
|
-
|
|
610
|
-
# Storage type
|
|
611
|
-
storage_mode = job.get("storageMode", "N/A")
|
|
612
|
-
if storage_mode == "regular":
|
|
613
|
-
storage_type = "Regular"
|
|
614
|
-
elif storage_mode == "lustre":
|
|
615
|
-
storage_type = "Lustre"
|
|
758
|
+
show_error = "[yellow]Already on first page[/yellow]"
|
|
759
|
+
else:
|
|
760
|
+
show_error = "[yellow]Invalid choice. Use 'n' (next), 'p' (previous), or 'q' (quit)[/yellow]"
|
|
616
761
|
else:
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
# Map column keys to their values
|
|
620
|
-
column_values = {
|
|
621
|
-
'status': status,
|
|
622
|
-
'name': name,
|
|
623
|
-
'project': project,
|
|
624
|
-
'owner': owner,
|
|
625
|
-
'pipeline': pipeline,
|
|
626
|
-
'id': job_id_with_link,
|
|
627
|
-
'submit_time': submit_time,
|
|
628
|
-
'end_time': end_time,
|
|
629
|
-
'run_time': run_time,
|
|
630
|
-
'commit': commit,
|
|
631
|
-
'cost': cost,
|
|
632
|
-
'resources': resources,
|
|
633
|
-
'storage_type': storage_type
|
|
634
|
-
}
|
|
635
|
-
|
|
636
|
-
# Add row to table with only selected columns
|
|
637
|
-
row_values = [column_values[col] for col in columns_to_show]
|
|
638
|
-
table.add_row(*row_values)
|
|
639
|
-
|
|
640
|
-
console.print(table)
|
|
641
|
-
|
|
642
|
-
# Display pagination info at the bottom
|
|
643
|
-
if pagination_metadata:
|
|
644
|
-
total_jobs = pagination_metadata.get('Pagination-Count', 0)
|
|
645
|
-
current_page = pagination_metadata.get('Pagination-Page', 1)
|
|
646
|
-
page_size = pagination_metadata.get('Pagination-Limit', 10)
|
|
647
|
-
total_pages = (total_jobs + page_size - 1) // page_size if total_jobs > 0 else 1
|
|
648
|
-
|
|
649
|
-
console.print(f"\n[cyan]Showing {len(jobs)} of {total_jobs} total jobs | Page {current_page} of {total_pages}[/cyan]")
|
|
762
|
+
# Only one page, exit after displaying
|
|
763
|
+
break
|
|
650
764
|
|
|
651
765
|
|
|
652
766
|
def create_workflow_list_table(workflows, cloudos_url="https://cloudos.lifebit.ai", page_size=10):
|
|
@@ -845,6 +959,8 @@ def create_queue_list_table(queues, cloudos_url="https://cloudos.lifebit.ai"):
|
|
|
845
959
|
resource_type = "N/A"
|
|
846
960
|
elif resource_type == "teamBatchJobQueue":
|
|
847
961
|
resource_type = "Batch Queues"
|
|
962
|
+
elif resource_type == "systemBatchJobQueue":
|
|
963
|
+
resource_type = "System Queue"
|
|
848
964
|
|
|
849
965
|
# Status with checkmark/X icons
|
|
850
966
|
status_raw = str(queue.get("status", "N/A"))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cloudos_cli
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.87.0
|
|
4
4
|
Summary: Python package for interacting with CloudOS
|
|
5
5
|
Home-page: https://github.com/lifebit-ai/cloudos-cli
|
|
6
6
|
Author: David Piñeyro
|
|
@@ -436,7 +436,7 @@ Job queues are required for running jobs using AWS batch executor. The available
|
|
|
436
436
|
|
|
437
437
|
#### List Queues
|
|
438
438
|
|
|
439
|
-
This command allows you to view available computational queues and their configurations. You can get a summary of all available
|
|
439
|
+
This command allows you to view available computational queues and their configurations. By default, both regular workspace queues and system queues are displayed. You can get a summary of all available job queues in three different output formats using the `--output-format` option:
|
|
440
440
|
|
|
441
441
|
- **stdout** (default): Displays a rich formatted table directly in the terminal with pagination and visual formatting
|
|
442
442
|
- **csv**: Saves queue data to a CSV file with a selection of available queue information, or all information using the `--all-fields` flag
|
|
@@ -450,6 +450,12 @@ cloudos queue list --profile my_profile
|
|
|
450
450
|
cloudos queue list --profile my_profile --output-format stdout
|
|
451
451
|
```
|
|
452
452
|
|
|
453
|
+
To exclude system queues and show only workspace queues:
|
|
454
|
+
|
|
455
|
+
```bash
|
|
456
|
+
cloudos queue list --profile my_profile --exclude-system-queues
|
|
457
|
+
```
|
|
458
|
+
|
|
453
459
|
To save all available job queues in JSON format:
|
|
454
460
|
|
|
455
461
|
```bash
|
|
@@ -472,7 +478,7 @@ cloudos queue list --profile my_profile --output-format csv
|
|
|
472
478
|
|
|
473
479
|
**Job queues for platform workflows**
|
|
474
480
|
|
|
475
|
-
Platform workflows (those provided by CloudOS in your workspace as modules) run on separate and specific AWS batch queues. Therefore, CloudOS will automatically assign the valid queue and you should not specify any queue using the `--job-queue` parameter. Any attempt to use this parameter will be ignored. Examples of such platform workflows are "System Tools" and "Data Factory" workflows.
|
|
481
|
+
Platform workflows (those provided by CloudOS in your workspace as modules) run on separate and specific AWS batch queues (system queues). Therefore, CloudOS will automatically assign the valid queue and you should not specify any queue using the `--job-queue` parameter. Any attempt to use this parameter will be ignored. Examples of such platform workflows are "System Tools" and "Data Factory" workflows.
|
|
476
482
|
|
|
477
483
|
|
|
478
484
|
### Workflow
|
|
@@ -886,7 +892,7 @@ You can find specific jobs within your workspace using the filtering options. Fi
|
|
|
886
892
|
- **`--filter-job-id`**: Filter jobs by specific job ID (exact match required)
|
|
887
893
|
- **`--filter-only-mine`**: Show only jobs belonging to the current user
|
|
888
894
|
- **`--filter-owner`**: Show only jobs for the specified owner (exact match required, e.g., "John Doe")
|
|
889
|
-
- **`--filter-queue`**: Filter jobs by queue name (only applies to batch jobs)
|
|
895
|
+
- **`--filter-queue`**: Filter jobs by queue name (works with both regular and system queues; only applies to batch jobs)
|
|
890
896
|
|
|
891
897
|
**Filtering Examples**
|
|
892
898
|
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = '2.86.0'
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/cloudos_cli/interactive_session/interactive_session.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_interactive_session/test_create_session.py
RENAMED
|
File without changes
|
{cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_interactive_session/test_list_sessions.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{cloudos_cli-2.86.0 → cloudos_cli-2.87.0}/tests/test_related_analyses/test_related_analyses.py
RENAMED
|
File without changes
|