fractal-server 1.3.14a0__tar.gz → 1.4.0a1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/PKG-INFO +1 -1
  2. fractal_server-1.4.0a1/fractal_server/__init__.py +1 -0
  3. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/__init__.py +1 -0
  4. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/v1/dataset.py +1 -20
  5. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/v1/job.py +43 -3
  6. fractal_server-1.4.0a1/fractal_server/app/api/v1/monitoring.py +150 -0
  7. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/v1/project.py +5 -2
  8. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/v1/workflow.py +0 -14
  9. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/models/dataset.py +15 -0
  10. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/models/job.py +22 -51
  11. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/models/project.py +2 -3
  12. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/models/workflow.py +5 -1
  13. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/__init__.py +17 -1
  14. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/__init__.py +1 -0
  15. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/applyworkflow.py +30 -1
  16. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/project.py +4 -0
  17. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/main.py +5 -0
  18. fractal_server-1.4.0a1/fractal_server/migrations/versions/84bf0fffde30_add_dumps_to_applyworkflow.py +86 -0
  19. fractal_server-1.4.0a1/fractal_server/migrations/versions/e75cac726012_make_applyworkflow_start_timestamp_not_.py +35 -0
  20. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/pyproject.toml +4 -4
  21. fractal_server-1.3.14a0/fractal_server/__init__.py +0 -1
  22. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/LICENSE +0 -0
  23. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/README.md +0 -0
  24. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/__main__.py +0 -0
  25. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/alembic.ini +0 -0
  26. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/__init__.py +0 -0
  27. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/v1/__init__.py +0 -0
  28. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/v1/_aux_functions.py +0 -0
  29. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/v1/task.py +0 -0
  30. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/v1/task_collection.py +0 -0
  31. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/api/v1/workflowtask.py +0 -0
  32. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/db/__init__.py +0 -0
  33. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/models/__init__.py +0 -0
  34. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/models/linkuserproject.py +0 -0
  35. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/models/security.py +0 -0
  36. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/models/state.py +0 -0
  37. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/models/task.py +0 -0
  38. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/.gitignore +0 -0
  39. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_common.py +0 -0
  40. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_local/__init__.py +0 -0
  41. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_local/_local_config.py +0 -0
  42. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_local/_submit_setup.py +0 -0
  43. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_local/executor.py +0 -0
  44. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_slurm/.gitignore +0 -0
  45. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_slurm/__init__.py +0 -0
  46. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_slurm/_batching.py +0 -0
  47. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_slurm/_executor_wait_thread.py +0 -0
  48. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_slurm/_slurm_config.py +0 -0
  49. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_slurm/_submit_setup.py +0 -0
  50. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_slurm/_subprocess_run_as_user.py +0 -0
  51. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_slurm/executor.py +0 -0
  52. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/_slurm/remote.py +0 -0
  53. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/common.py +0 -0
  54. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/runner/handle_failed_job.py +0 -0
  55. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/_validators.py +0 -0
  56. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/dataset.py +0 -0
  57. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/json_schemas/manifest.json +0 -0
  58. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/manifest.py +0 -0
  59. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/state.py +0 -0
  60. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/task.py +0 -0
  61. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/task_collection.py +0 -0
  62. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/user.py +0 -0
  63. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/schemas/workflow.py +0 -0
  64. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/app/security/__init__.py +0 -0
  65. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/config.py +0 -0
  66. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/logger.py +0 -0
  67. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/README +0 -0
  68. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/env.py +0 -0
  69. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/script.py.mako +0 -0
  70. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/versions/4c308bcaea2b_add_task_args_schema_and_task_args_.py +0 -0
  71. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/versions/50a13d6138fd_initial_schema.py +0 -0
  72. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/versions/70e77f1c38b0_add_applyworkflow_first_task_index_and_.py +0 -0
  73. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/versions/8f79bd162e35_add_docs_info_and_docs_link_to_task_.py +0 -0
  74. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/versions/99ea79d9e5d2_add_dataset_history.py +0 -0
  75. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/versions/a7f4d6137b53_add_workflow_dump_to_applyworkflow.py +0 -0
  76. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/migrations/versions/f384e1c0cf5d_drop_task_default_args_columns.py +0 -0
  77. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/py.typed +0 -0
  78. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/syringe.py +0 -0
  79. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/tasks/__init__.py +0 -0
  80. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/tasks/collection.py +0 -0
  81. {fractal_server-1.3.14a0 → fractal_server-1.4.0a1}/fractal_server/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: fractal-server
3
- Version: 1.3.14a0
3
+ Version: 1.4.0a1
4
4
  Summary: Server component of the Fractal analytics platform
5
5
  Home-page: https://github.com/fractal-analytics-platform/fractal-server
6
6
  License: BSD-3-Clause
@@ -0,0 +1 @@
1
+ __VERSION__ = "1.4.0a1"
@@ -7,6 +7,7 @@ from ...config import get_settings
7
7
  from ...syringe import Inject
8
8
  from .v1.dataset import router as dataset_router
9
9
  from .v1.job import router as job_router
10
+ from .v1.monitoring import router as router_monitoring # noqa
10
11
  from .v1.project import router as project_router
11
12
  from .v1.task import router as task_router
12
13
  from .v1.task_collection import router as taskcollection_router
@@ -7,20 +7,19 @@ from fastapi import Depends
7
7
  from fastapi import HTTPException
8
8
  from fastapi import Response
9
9
  from fastapi import status
10
- from sqlmodel import or_
11
10
  from sqlmodel import select
12
11
 
13
12
  from ...db import AsyncSession
14
13
  from ...db import get_db
15
14
  from ...models import ApplyWorkflow
16
15
  from ...models import Dataset
17
- from ...models import JobStatusType
18
16
  from ...models import Resource
19
17
  from ...runner._common import HISTORY_FILENAME
20
18
  from ...schemas import DatasetCreate
21
19
  from ...schemas import DatasetRead
22
20
  from ...schemas import DatasetStatusRead
23
21
  from ...schemas import DatasetUpdate
22
+ from ...schemas import JobStatusType
24
23
  from ...schemas import ResourceCreate
25
24
  from ...schemas import ResourceRead
26
25
  from ...schemas import ResourceUpdate
@@ -145,24 +144,6 @@ async def delete_dataset(
145
144
  )
146
145
  dataset = output["dataset"]
147
146
 
148
- # Check that no ApplyWorkflow is in relationship with the current Dataset
149
- stm = select(ApplyWorkflow).filter(
150
- or_(
151
- ApplyWorkflow.input_dataset_id == dataset_id,
152
- ApplyWorkflow.output_dataset_id == dataset_id,
153
- )
154
- )
155
- res = await db.execute(stm)
156
- job = res.scalars().first()
157
- if job:
158
- raise HTTPException(
159
- status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
160
- detail=(
161
- f"Cannot remove dataset {dataset_id}: "
162
- f"it's still linked to job {job.id}."
163
- ),
164
- )
165
-
166
147
  await db.delete(dataset)
167
148
  await db.commit()
168
149
  await db.close()
@@ -7,6 +7,7 @@ from zipfile import ZipFile
7
7
  from fastapi import APIRouter
8
8
  from fastapi import Depends
9
9
  from fastapi import HTTPException
10
+ from fastapi import Response
10
11
  from fastapi import status
11
12
  from fastapi.responses import StreamingResponse
12
13
  from sqlmodel import select
@@ -22,11 +23,50 @@ from ...security import current_active_user
22
23
  from ...security import User
23
24
  from ._aux_functions import _get_job_check_owner
24
25
  from ._aux_functions import _get_project_check_owner
26
+ from ._aux_functions import _get_workflow_check_owner
25
27
 
26
28
 
27
29
  router = APIRouter()
28
30
 
29
31
 
32
+ @router.get("/project/job/", response_model=list[ApplyWorkflowRead])
33
+ async def get_user_jobs(
34
+ user: User = Depends(current_active_user),
35
+ ) -> list[ApplyWorkflowRead]:
36
+ """
37
+ Returns all the jobs of the current user
38
+ """
39
+
40
+ job_list = [
41
+ job for project in user.project_list for job in project.job_list
42
+ ]
43
+
44
+ return job_list
45
+
46
+
47
+ @router.get(
48
+ "/project/{project_id}/workflow/{workflow_id}/job/",
49
+ response_model=list[ApplyWorkflowRead],
50
+ )
51
+ async def get_workflow_jobs(
52
+ project_id: int,
53
+ workflow_id: int,
54
+ user: User = Depends(current_active_user),
55
+ db: AsyncSession = Depends(get_db),
56
+ ) -> Optional[list[ApplyWorkflowRead]]:
57
+ """
58
+ Returns all the jobs related to a specific workflow
59
+ """
60
+
61
+ workflow = await _get_workflow_check_owner(
62
+ project_id=project_id, workflow_id=workflow_id, user_id=user.id, db=db
63
+ )
64
+ job_list = workflow.job_list
65
+ await db.close()
66
+
67
+ return job_list
68
+
69
+
30
70
  @router.get(
31
71
  "/project/{project_id}/job/{job_id}",
32
72
  response_model=ApplyWorkflowRead,
@@ -119,14 +159,14 @@ async def get_job_list(
119
159
 
120
160
  @router.get(
121
161
  "/project/{project_id}/job/{job_id}/stop/",
122
- status_code=200,
162
+ status_code=204,
123
163
  )
124
164
  async def stop_job(
125
165
  project_id: int,
126
166
  job_id: int,
127
167
  user: User = Depends(current_active_user),
128
168
  db: AsyncSession = Depends(get_db),
129
- ) -> Optional[ApplyWorkflow]:
169
+ ) -> Response:
130
170
  """
131
171
  Stop execution of a workflow job (only available for slurm backend)
132
172
  """
@@ -161,4 +201,4 @@ async def stop_job(
161
201
  with shutdown_file.open("w") as f:
162
202
  f.write(f"Trigger executor shutdown for {job.id=}, {project_id=}.")
163
203
 
164
- return job
204
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
@@ -0,0 +1,150 @@
1
+ from datetime import datetime
2
+ from typing import Optional
3
+
4
+ from fastapi import APIRouter
5
+ from fastapi import Depends
6
+ from sqlalchemy import func
7
+ from sqlmodel import select
8
+
9
+ from ...db import AsyncSession
10
+ from ...db import get_db
11
+ from ...models import ApplyWorkflow
12
+ from ...models import Dataset
13
+ from ...models import JobStatusType
14
+ from ...models import Project
15
+ from ...models import Workflow
16
+ from ...schemas import ApplyWorkflowRead
17
+ from ...schemas import DatasetRead
18
+ from ...schemas import ProjectRead
19
+ from ...schemas import WorkflowRead
20
+ from ...security import current_active_superuser
21
+ from ...security import User
22
+
23
+
24
+ router = APIRouter()
25
+
26
+
27
+ @router.get("/project/", response_model=list[ProjectRead])
28
+ async def monitor_project(
29
+ id: Optional[int] = None,
30
+ user_id: Optional[int] = None,
31
+ user: User = Depends(current_active_superuser),
32
+ db: AsyncSession = Depends(get_db),
33
+ ) -> list[ProjectRead]:
34
+
35
+ stm = select(Project)
36
+
37
+ if id is not None:
38
+ stm = stm.where(Project.id == id)
39
+
40
+ if user_id is not None:
41
+ stm = stm.where(Project.user_list.any(User.id == user_id))
42
+
43
+ res = await db.execute(stm)
44
+ project_list = res.scalars().all()
45
+ await db.close()
46
+
47
+ return project_list
48
+
49
+
50
+ @router.get("/workflow/", response_model=list[WorkflowRead])
51
+ async def monitor_workflow(
52
+ id: Optional[int] = None,
53
+ project_id: Optional[int] = None,
54
+ name_contains: Optional[str] = None,
55
+ user: User = Depends(current_active_superuser),
56
+ db: AsyncSession = Depends(get_db),
57
+ ) -> list[WorkflowRead]:
58
+ stm = select(Workflow)
59
+
60
+ if id is not None:
61
+ stm = stm.where(Workflow.id == id)
62
+ if project_id is not None:
63
+ stm = stm.where(Workflow.project_id == project_id)
64
+ if name_contains is not None:
65
+ # SQLAlchemy2: use icontains
66
+ stm = stm.where(
67
+ func.lower(Workflow.name).contains(name_contains.lower())
68
+ )
69
+
70
+ res = await db.execute(stm)
71
+ workflow_list = res.scalars().all()
72
+ await db.close()
73
+
74
+ return workflow_list
75
+
76
+
77
+ @router.get("/dataset/", response_model=list[DatasetRead])
78
+ async def monitor_dataset(
79
+ id: Optional[int] = None,
80
+ project_id: Optional[int] = None,
81
+ name_contains: Optional[str] = None,
82
+ type: Optional[str] = None,
83
+ user: User = Depends(current_active_superuser),
84
+ db: AsyncSession = Depends(get_db),
85
+ ) -> list[DatasetRead]:
86
+ stm = select(Dataset)
87
+
88
+ if id is not None:
89
+ stm = stm.where(Dataset.id == id)
90
+ if project_id is not None:
91
+ stm = stm.where(Dataset.project_id == project_id)
92
+ if name_contains is not None:
93
+ # SQLAlchemy2: use icontains
94
+ stm = stm.where(
95
+ func.lower(Dataset.name).contains(name_contains.lower())
96
+ )
97
+ if type is not None:
98
+ stm = stm.where(Dataset.type == type)
99
+
100
+ res = await db.execute(stm)
101
+ dataset_list = res.scalars().all()
102
+ await db.close()
103
+
104
+ return dataset_list
105
+
106
+
107
+ @router.get("/job/", response_model=list[ApplyWorkflowRead])
108
+ async def monitor_job(
109
+ id: Optional[int] = None,
110
+ project_id: Optional[int] = None,
111
+ input_dataset_id: Optional[int] = None,
112
+ output_dataset_id: Optional[int] = None,
113
+ workflow_id: Optional[int] = None,
114
+ status: Optional[JobStatusType] = None,
115
+ start_timestamp_min: Optional[datetime] = None,
116
+ start_timestamp_max: Optional[datetime] = None,
117
+ end_timestamp_min: Optional[datetime] = None,
118
+ end_timestamp_max: Optional[datetime] = None,
119
+ user: User = Depends(current_active_superuser),
120
+ db: AsyncSession = Depends(get_db),
121
+ ) -> list[ApplyWorkflowRead]:
122
+
123
+ stm = select(ApplyWorkflow)
124
+
125
+ if id is not None:
126
+ stm = stm.where(ApplyWorkflow.id == id)
127
+ if project_id is not None:
128
+ stm = stm.where(ApplyWorkflow.project_id == project_id)
129
+ if input_dataset_id is not None:
130
+ stm = stm.where(ApplyWorkflow.input_dataset_id == input_dataset_id)
131
+ if output_dataset_id is not None:
132
+ stm = stm.where(ApplyWorkflow.output_dataset_id == output_dataset_id)
133
+ if workflow_id is not None:
134
+ stm = stm.where(ApplyWorkflow.workflow_id == workflow_id)
135
+ if status is not None:
136
+ stm = stm.where(ApplyWorkflow.status == status)
137
+ if start_timestamp_min is not None:
138
+ stm = stm.where(ApplyWorkflow.start_timestamp >= start_timestamp_min)
139
+ if start_timestamp_max is not None:
140
+ stm = stm.where(ApplyWorkflow.start_timestamp <= start_timestamp_max)
141
+ if end_timestamp_min is not None:
142
+ stm = stm.where(ApplyWorkflow.end_timestamp >= end_timestamp_min)
143
+ if end_timestamp_max is not None:
144
+ stm = stm.where(ApplyWorkflow.end_timestamp <= end_timestamp_max)
145
+
146
+ res = await db.execute(stm)
147
+ job_list = res.scalars().all()
148
+ await db.close()
149
+
150
+ return job_list
@@ -18,7 +18,6 @@ from ...db import DBSyncSession
18
18
  from ...db import get_db
19
19
  from ...db import get_sync_db
20
20
  from ...models import ApplyWorkflow
21
- from ...models import JobStatusType
22
21
  from ...models import LinkUserProject
23
22
  from ...models import Project
24
23
  from ...runner import submit_workflow
@@ -26,6 +25,7 @@ from ...runner import validate_workflow_compatibility
26
25
  from ...runner.common import set_start_and_last_task_index
27
26
  from ...schemas import ApplyWorkflowCreate
28
27
  from ...schemas import ApplyWorkflowRead
28
+ from ...schemas import JobStatusType
29
29
  from ...schemas import ProjectCreate
30
30
  from ...schemas import ProjectRead
31
31
  from ...schemas import ProjectUpdate
@@ -302,10 +302,13 @@ async def apply_workflow(
302
302
  input_dataset_id=input_dataset_id,
303
303
  output_dataset_id=output_dataset_id,
304
304
  workflow_id=workflow_id,
305
+ user_email=user.email,
306
+ input_dataset_dump=input_dataset.dict(),
307
+ output_dataset_dump=output_dataset.dict(),
305
308
  workflow_dump=dict(
306
309
  workflow.dict(exclude={"task_list"}),
307
310
  task_list=[
308
- dict(wf_task.task.dict(exclude={"task"}), task=wf_task.dict())
311
+ dict(wf_task.dict(exclude={"task"}), task=wf_task.task.dict())
309
312
  for wf_task in workflow.task_list
310
313
  ],
311
314
  ),
@@ -23,7 +23,6 @@ from ....logger import close_logger
23
23
  from ....logger import set_logger
24
24
  from ...db import AsyncSession
25
25
  from ...db import get_db
26
- from ...models import ApplyWorkflow
27
26
  from ...models import Task
28
27
  from ...models import Workflow
29
28
  from ...schemas import WorkflowCreate
@@ -186,19 +185,6 @@ async def delete_workflow(
186
185
  project_id=project_id, workflow_id=workflow_id, user_id=user.id, db=db
187
186
  )
188
187
 
189
- # Check that no ApplyWorkflow is in relationship with the current Workflow
190
- stm = select(ApplyWorkflow).where(ApplyWorkflow.workflow_id == workflow_id)
191
- res = await db.execute(stm)
192
- job = res.scalars().first()
193
- if job:
194
- raise HTTPException(
195
- status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
196
- detail=(
197
- f"Cannot remove workflow {workflow_id}: "
198
- f"it's still linked to job {job.id}."
199
- ),
200
- )
201
-
202
188
  await db.delete(workflow)
203
189
  await db.commit()
204
190
 
@@ -10,6 +10,7 @@ from sqlmodel import SQLModel
10
10
 
11
11
  from ..schemas.dataset import _DatasetBase
12
12
  from ..schemas.dataset import _ResourceBase
13
+ from .job import ApplyWorkflow
13
14
 
14
15
 
15
16
  class Resource(_ResourceBase, SQLModel, table=True):
@@ -37,6 +38,19 @@ class Dataset(_DatasetBase, SQLModel, table=True):
37
38
 
38
39
  id: Optional[int] = Field(default=None, primary_key=True)
39
40
  project_id: int = Field(foreign_key="project.id")
41
+
42
+ list_jobs_input: list[ApplyWorkflow] = Relationship( # noqa: F821
43
+ sa_relationship_kwargs=dict(
44
+ lazy="selectin",
45
+ primaryjoin="ApplyWorkflow.input_dataset_id==Dataset.id",
46
+ )
47
+ )
48
+ list_jobs_output: list[ApplyWorkflow] = Relationship( # noqa: F821
49
+ sa_relationship_kwargs=dict(
50
+ lazy="selectin",
51
+ primaryjoin="ApplyWorkflow.output_dataset_id==Dataset.id",
52
+ )
53
+ )
40
54
  resource_list: list[Resource] = Relationship(
41
55
  sa_relationship_kwargs={
42
56
  "lazy": "selectin",
@@ -45,6 +59,7 @@ class Dataset(_DatasetBase, SQLModel, table=True):
45
59
  "cascade": "all, delete-orphan",
46
60
  }
47
61
  )
62
+
48
63
  meta: dict[str, Any] = Field(sa_column=Column(JSON), default={})
49
64
  history: list[dict[str, Any]] = Field(
50
65
  sa_column=Column(JSON, server_default="[]", nullable=False)
@@ -1,5 +1,4 @@
1
1
  from datetime import datetime
2
- from enum import Enum
3
2
  from typing import Any
4
3
  from typing import Optional
5
4
 
@@ -7,38 +6,11 @@ from sqlalchemy import Column
7
6
  from sqlalchemy.types import DateTime
8
7
  from sqlalchemy.types import JSON
9
8
  from sqlmodel import Field
10
- from sqlmodel import Relationship
11
9
  from sqlmodel import SQLModel
12
10
 
13
11
  from ...utils import get_timestamp
12
+ from ..schemas import JobStatusType
14
13
  from ..schemas.applyworkflow import _ApplyWorkflowBase
15
- from .dataset import Dataset
16
- from .workflow import Workflow
17
-
18
-
19
- class JobStatusType(str, Enum):
20
- """
21
- Define the job status available
22
-
23
- Attributes:
24
- SUBMITTED:
25
- The workflow has been applied but not yet scheduled with an
26
- executor. In this phase, due diligence takes place, such as
27
- creating working directory, assemblying arguments, etc.
28
- RUNNING:
29
- The workflow was scheduled with an executor. Note that it might not
30
- yet be running within the executor, e.g., jobs could still be
31
- pending within a SLURM executor.
32
- DONE:
33
- The workflow was applied successfully
34
- FAILED:
35
- The workflow terminated with an error.
36
- """
37
-
38
- SUBMITTED = "submitted"
39
- RUNNING = "running"
40
- DONE = "done"
41
- FAILED = "failed"
42
14
 
43
15
 
44
16
  class ApplyWorkflow(_ApplyWorkflowBase, SQLModel, table=True):
@@ -87,37 +59,36 @@ class ApplyWorkflow(_ApplyWorkflowBase, SQLModel, table=True):
87
59
  arbitrary_types_allowed = True
88
60
 
89
61
  id: Optional[int] = Field(default=None, primary_key=True)
90
- project_id: int = Field(foreign_key="project.id")
91
- input_dataset_id: int = Field(foreign_key="dataset.id")
92
- output_dataset_id: int = Field(foreign_key="dataset.id")
93
- workflow_id: int = Field(foreign_key="workflow.id")
94
- working_dir: Optional[str]
95
- working_dir_user: Optional[str]
96
- first_task_index: int
97
- last_task_index: int
98
62
 
99
- input_dataset: Dataset = Relationship(
100
- sa_relationship_kwargs=dict(
101
- lazy="selectin",
102
- primaryjoin="ApplyWorkflow.input_dataset_id==Dataset.id",
103
- )
63
+ project_id: Optional[int] = Field(foreign_key="project.id")
64
+
65
+ workflow_id: Optional[int] = Field(foreign_key="workflow.id")
66
+
67
+ input_dataset_id: Optional[int] = Field(foreign_key="dataset.id")
68
+ output_dataset_id: Optional[int] = Field(foreign_key="dataset.id")
69
+
70
+ user_email: str = Field(nullable=False)
71
+ input_dataset_dump: dict[str, Any] = Field(
72
+ sa_column=Column(JSON, nullable=False)
73
+ )
74
+ output_dataset_dump: dict[str, Any] = Field(
75
+ sa_column=Column(JSON, nullable=False)
104
76
  )
105
- output_dataset: Dataset = Relationship(
106
- sa_relationship_kwargs=dict(
107
- lazy="selectin",
108
- primaryjoin="ApplyWorkflow.output_dataset_id==Dataset.id",
109
- )
77
+ workflow_dump: Optional[dict[str, Any]] = Field(
78
+ sa_column=Column(JSON, nullable=True)
110
79
  )
111
- workflow: Workflow = Relationship()
112
80
 
113
- workflow_dump: Optional[dict[str, Any]] = Field(sa_column=Column(JSON))
81
+ working_dir: Optional[str]
82
+ working_dir_user: Optional[str]
83
+ first_task_index: int
84
+ last_task_index: int
114
85
 
115
86
  start_timestamp: datetime = Field(
116
87
  default_factory=get_timestamp,
117
- sa_column=Column(DateTime(timezone=True)),
88
+ sa_column=Column(DateTime(timezone=True), nullable=False),
118
89
  )
119
90
  end_timestamp: Optional[datetime] = Field(
120
91
  default=None, sa_column=Column(DateTime(timezone=True))
121
92
  )
122
- status: JobStatusType = JobStatusType.SUBMITTED
93
+ status: str = JobStatusType.SUBMITTED
123
94
  log: Optional[str] = None
@@ -37,9 +37,8 @@ class Project(_ProjectBase, SQLModel, table=True):
37
37
  },
38
38
  )
39
39
 
40
- job_list: list[ApplyWorkflow] = Relationship( # noqa
40
+ job_list: list[ApplyWorkflow] = Relationship(
41
41
  sa_relationship_kwargs={
42
42
  "lazy": "selectin",
43
- "cascade": "all, delete-orphan",
44
- },
43
+ }
45
44
  )
@@ -13,6 +13,7 @@ from sqlmodel import SQLModel
13
13
  from ..db import AsyncSession
14
14
  from ..schemas.workflow import _WorkflowBase
15
15
  from ..schemas.workflow import _WorkflowTaskBase
16
+ from .job import ApplyWorkflow
16
17
  from .task import Task
17
18
 
18
19
 
@@ -106,7 +107,7 @@ class Workflow(_WorkflowBase, SQLModel, table=True):
106
107
  id: Optional[int] = Field(default=None, primary_key=True)
107
108
  project_id: int = Field(foreign_key="project.id")
108
109
 
109
- task_list: list["WorkflowTask"] = Relationship(
110
+ task_list: list[WorkflowTask] = Relationship(
110
111
  sa_relationship_kwargs=dict(
111
112
  lazy="selectin",
112
113
  order_by="WorkflowTask.order",
@@ -114,6 +115,9 @@ class Workflow(_WorkflowBase, SQLModel, table=True):
114
115
  cascade="all, delete-orphan",
115
116
  ),
116
117
  )
118
+ job_list: list[ApplyWorkflow] = Relationship(
119
+ sa_relationship_kwargs={"lazy": "selectin"}
120
+ )
117
121
 
118
122
  async def insert_task(
119
123
  self,
@@ -30,9 +30,9 @@ from ...utils import get_timestamp
30
30
  from ..db import DB
31
31
  from ..models import ApplyWorkflow
32
32
  from ..models import Dataset
33
- from ..models import JobStatusType
34
33
  from ..models import Workflow
35
34
  from ..models import WorkflowTask
35
+ from ..schemas import JobStatusType
36
36
  from ._local import process_workflow as local_process_workflow
37
37
  from .common import close_job_logger
38
38
  from .common import JobExecutionError
@@ -187,6 +187,22 @@ async def submit_workflow(
187
187
  job.status = JobStatusType.RUNNING
188
188
  db_sync.merge(job)
189
189
  db_sync.commit()
190
+
191
+ # After Session.commit() is called, either explicitly or when using a
192
+ # context manager, all objects associated with the Session are expired.
193
+ # https://docs.sqlalchemy.org/en/14/orm/
194
+ # session_basics.html#opening-and-closing-a-session
195
+ # https://docs.sqlalchemy.org/en/14/orm/
196
+ # session_state_management.html#refreshing-expiring
197
+
198
+ # See issue #928:
199
+ # https://github.com/fractal-analytics-platform/
200
+ # fractal-server/issues/928
201
+
202
+ db_sync.refresh(input_dataset)
203
+ db_sync.refresh(output_dataset)
204
+ db_sync.refresh(workflow)
205
+
190
206
  # Write logs
191
207
  logger_name = f"WF{workflow_id}_job{job_id}"
192
208
  log_file_path = WORKFLOW_DIR / "workflow.log"
@@ -3,6 +3,7 @@ Schemas for API request/response bodies
3
3
  """
4
4
  from .applyworkflow import ApplyWorkflowCreate # noqa: F401
5
5
  from .applyworkflow import ApplyWorkflowRead # noqa: F401
6
+ from .applyworkflow import JobStatusType # noqa: F401
6
7
  from .dataset import DatasetCreate # noqa: F401
7
8
  from .dataset import DatasetRead # noqa: F401
8
9
  from .dataset import DatasetStatusRead # noqa: F401
@@ -1,4 +1,5 @@
1
1
  from datetime import datetime
2
+ from enum import Enum
2
3
  from typing import Any
3
4
  from typing import Optional
4
5
 
@@ -14,6 +15,31 @@ __all__ = (
14
15
  )
15
16
 
16
17
 
18
+ class JobStatusType(str, Enum):
19
+ """
20
+ Define the available job statuses
21
+
22
+ Attributes:
23
+ SUBMITTED:
24
+ The workflow has been applied but not yet scheduled with an
25
+ executor. In this phase, due diligence takes place, such as
26
+ creating working directory, assemblying arguments, etc.
27
+ RUNNING:
28
+ The workflow was scheduled with an executor. Note that it might not
29
+ yet be running within the executor, e.g., jobs could still be
30
+ pending within a SLURM executor.
31
+ DONE:
32
+ The workflow was applied successfully
33
+ FAILED:
34
+ The workflow terminated with an error.
35
+ """
36
+
37
+ SUBMITTED = "submitted"
38
+ RUNNING = "running"
39
+ DONE = "done"
40
+ FAILED = "failed"
41
+
42
+
17
43
  class _ApplyWorkflowBase(BaseModel):
18
44
  """
19
45
  Base class for `ApplyWorkflow`.
@@ -99,14 +125,17 @@ class ApplyWorkflowRead(_ApplyWorkflowBase):
99
125
 
100
126
  id: int
101
127
  project_id: int
128
+ user_email: str
102
129
  workflow_id: int
130
+ workflow_dump: Optional[dict[str, Any]]
103
131
  input_dataset_id: int
132
+ input_dataset_dump: Optional[dict[str, Any]]
104
133
  output_dataset_id: int
134
+ output_dataset_dump: Optional[dict[str, Any]]
105
135
  start_timestamp: datetime
106
136
  end_timestamp: Optional[datetime]
107
137
  status: str
108
138
  log: Optional[str]
109
- workflow_dump: Optional[dict[str, Any]]
110
139
  working_dir: Optional[str]
111
140
  working_dir_user: Optional[str]
112
141
  first_task_index: Optional[int]
@@ -4,7 +4,9 @@ from pydantic import BaseModel
4
4
  from pydantic import validator
5
5
 
6
6
  from ._validators import valstr
7
+ from .applyworkflow import ApplyWorkflowRead
7
8
  from .dataset import DatasetRead
9
+ from .workflow import WorkflowRead
8
10
 
9
11
 
10
12
  __all__ = (
@@ -47,6 +49,8 @@ class ProjectRead(_ProjectBase):
47
49
 
48
50
  id: int
49
51
  dataset_list: list[DatasetRead] = []
52
+ workflow_list: list[WorkflowRead] = []
53
+ job_list: list[ApplyWorkflowRead] = []
50
54
 
51
55
 
52
56
  class ProjectUpdate(_ProjectBase):
@@ -48,10 +48,15 @@ def collect_routers(app: FastAPI) -> None:
48
48
  """
49
49
  from .app.api import router_default
50
50
  from .app.api import router_v1
51
+ from .app.api import router_monitoring
52
+
51
53
  from .app.security import auth_router
52
54
 
53
55
  app.include_router(router_default, prefix="/api")
54
56
  app.include_router(router_v1, prefix="/api/v1")
57
+ app.include_router(
58
+ router_monitoring, prefix="/monitoring", tags=["Monitoring"]
59
+ )
55
60
  app.include_router(auth_router, prefix="/auth", tags=["auth"])
56
61
 
57
62
 
@@ -0,0 +1,86 @@
1
+ """Add dumps to ApplyWorkflow
2
+
3
+ Revision ID: 84bf0fffde30
4
+ Revises: 99ea79d9e5d2
5
+ Create Date: 2023-10-26 16:11:44.061971
6
+
7
+ """
8
+ import sqlalchemy as sa
9
+ from alembic import op
10
+
11
+ # revision identifiers, used by Alembic.
12
+ revision = "84bf0fffde30"
13
+ down_revision = "99ea79d9e5d2"
14
+ branch_labels = None
15
+ depends_on = None
16
+
17
+
18
+ def upgrade() -> None:
19
+ # ### commands auto generated by Alembic - please adjust! ###
20
+ with op.batch_alter_table("applyworkflow", schema=None) as batch_op:
21
+ batch_op.add_column(
22
+ sa.Column(
23
+ "user_email",
24
+ sa.String(),
25
+ server_default="__UNDEFINED__",
26
+ nullable=False,
27
+ )
28
+ )
29
+ batch_op.add_column(
30
+ sa.Column(
31
+ "input_dataset_dump",
32
+ sa.JSON(),
33
+ server_default="{}",
34
+ nullable=False,
35
+ )
36
+ )
37
+ batch_op.add_column(
38
+ sa.Column(
39
+ "output_dataset_dump",
40
+ sa.JSON(),
41
+ server_default="{}",
42
+ nullable=False,
43
+ )
44
+ )
45
+
46
+ batch_op.alter_column(
47
+ "project_id", existing_type=sa.INTEGER(), nullable=True
48
+ )
49
+ batch_op.alter_column(
50
+ "workflow_id", existing_type=sa.INTEGER(), nullable=True
51
+ )
52
+ batch_op.alter_column(
53
+ "input_dataset_id", existing_type=sa.INTEGER(), nullable=True
54
+ )
55
+ batch_op.alter_column(
56
+ "output_dataset_id", existing_type=sa.INTEGER(), nullable=True
57
+ )
58
+
59
+ with op.batch_alter_table("applyworkflow", schema=None) as batch_op:
60
+ batch_op.alter_column("user_email", server_default=None)
61
+ batch_op.alter_column("input_dataset_dump", server_default=None)
62
+ batch_op.alter_column("output_dataset_dump", server_default=None)
63
+
64
+ # ### end Alembic commands ###
65
+
66
+
67
+ def downgrade() -> None:
68
+ # ### commands auto generated by Alembic - please adjust! ###
69
+ with op.batch_alter_table("applyworkflow", schema=None) as batch_op:
70
+ batch_op.alter_column(
71
+ "output_dataset_id", existing_type=sa.INTEGER(), nullable=False
72
+ )
73
+ batch_op.alter_column(
74
+ "input_dataset_id", existing_type=sa.INTEGER(), nullable=False
75
+ )
76
+ batch_op.alter_column(
77
+ "workflow_id", existing_type=sa.INTEGER(), nullable=False
78
+ )
79
+ batch_op.alter_column(
80
+ "project_id", existing_type=sa.INTEGER(), nullable=False
81
+ )
82
+ batch_op.drop_column("output_dataset_dump")
83
+ batch_op.drop_column("input_dataset_dump")
84
+ batch_op.drop_column("user_email")
85
+
86
+ # ### end Alembic commands ###
@@ -0,0 +1,35 @@
1
+ """Make ApplyWorkflow.start_timestamp not nullable
2
+
3
+ Revision ID: e75cac726012
4
+ Revises: 84bf0fffde30
5
+ Create Date: 2023-10-30 15:51:18.808789
6
+
7
+ """
8
+ import sqlalchemy as sa
9
+ from alembic import op
10
+
11
+ # revision identifiers, used by Alembic.
12
+ revision = "e75cac726012"
13
+ down_revision = "84bf0fffde30"
14
+ branch_labels = None
15
+ depends_on = None
16
+
17
+
18
+ def upgrade() -> None:
19
+ # ### commands auto generated by Alembic - please adjust! ###
20
+ with op.batch_alter_table("applyworkflow", schema=None) as batch_op:
21
+ batch_op.alter_column(
22
+ "start_timestamp", existing_type=sa.DATETIME(), nullable=False
23
+ )
24
+
25
+ # ### end Alembic commands ###
26
+
27
+
28
+ def downgrade() -> None:
29
+ # ### commands auto generated by Alembic - please adjust! ###
30
+ with op.batch_alter_table("applyworkflow", schema=None) as batch_op:
31
+ batch_op.alter_column(
32
+ "start_timestamp", existing_type=sa.DATETIME(), nullable=True
33
+ )
34
+
35
+ # ### end Alembic commands ###
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "fractal-server"
3
- version = "1.3.14a0"
3
+ version = "1.4.0a1"
4
4
  description = "Server component of the Fractal analytics platform"
5
5
  authors = [
6
6
  "Jacopo Nespolo <jacopo.nespolo@exact-lab.it>",
@@ -48,7 +48,7 @@ gunicorn = ["gunicorn"]
48
48
  asgi-lifespan = "^2"
49
49
  pytest = "^7.2"
50
50
  httpx = "^0.23"
51
- devtools = "^0.10"
51
+ devtools = "^0.12"
52
52
  pytest-asyncio = "^0.20"
53
53
  bumpver = "^2022.1120"
54
54
  pre-commit = "^2.19"
@@ -82,7 +82,7 @@ filterwarnings = [
82
82
  ]
83
83
 
84
84
  [tool.bumpver]
85
- current_version = "1.3.14a0"
85
+ current_version = "1.4.0a1"
86
86
  version_pattern = "MAJOR.MINOR.PATCH[PYTAGNUM]"
87
87
  commit_message = "bump version {old_version} -> {new_version}"
88
88
  commit = true
@@ -107,5 +107,5 @@ relative_files = true
107
107
  omit = ["tests/*"]
108
108
 
109
109
  [[tool.mypy.overrides]]
110
- module = ["devtools", "uvicorn"]
110
+ module = ["devtools", "uvicorn", "pytest", "asgi_lifespan", "asyncpg"]
111
111
  ignore_missing_imports = true
@@ -1 +0,0 @@
1
- __VERSION__ = "1.3.14a0"