fractal-server 2.4.1__py3-none-any.whl → 2.5.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. fractal_server/__init__.py +1 -1
  2. fractal_server/__main__.py +3 -4
  3. fractal_server/app/db/__init__.py +4 -1
  4. fractal_server/app/models/v1/task.py +0 -5
  5. fractal_server/app/models/v2/workflowtask.py +2 -10
  6. fractal_server/app/routes/admin/v2.py +0 -30
  7. fractal_server/app/routes/api/v2/__init__.py +0 -4
  8. fractal_server/app/routes/api/v2/_aux_functions.py +11 -46
  9. fractal_server/app/routes/api/v2/workflow.py +23 -54
  10. fractal_server/app/routes/api/v2/workflowtask.py +9 -33
  11. fractal_server/app/runner/executors/slurm/sudo/_subprocess_run_as_user.py +1 -1
  12. fractal_server/app/runner/executors/slurm/sudo/executor.py +1 -1
  13. fractal_server/app/runner/v2/__init__.py +1 -4
  14. fractal_server/app/runner/v2/_slurm_common/get_slurm_config.py +1 -4
  15. fractal_server/app/runner/v2/handle_failed_job.py +2 -9
  16. fractal_server/app/runner/v2/runner.py +42 -70
  17. fractal_server/app/runner/v2/runner_functions.py +0 -58
  18. fractal_server/app/runner/v2/runner_functions_low_level.py +7 -21
  19. fractal_server/app/schemas/v2/__init__.py +0 -1
  20. fractal_server/app/schemas/v2/dumps.py +2 -23
  21. fractal_server/app/schemas/v2/task.py +0 -5
  22. fractal_server/app/schemas/v2/workflowtask.py +4 -29
  23. fractal_server/app/security/__init__.py +22 -15
  24. fractal_server/migrations/env.py +4 -7
  25. fractal_server/migrations/naming_convention.py +7 -0
  26. fractal_server/migrations/versions/091b01f51f88_add_usergroup_and_linkusergroup_table.py +1 -1
  27. fractal_server/migrations/versions/501961cfcd85_remove_link_between_v1_and_v2_tasks_.py +97 -0
  28. {fractal_server-2.4.1.dist-info → fractal_server-2.5.0a0.dist-info}/METADATA +1 -1
  29. {fractal_server-2.4.1.dist-info → fractal_server-2.5.0a0.dist-info}/RECORD +32 -32
  30. fractal_server/app/routes/api/v2/task_legacy.py +0 -59
  31. fractal_server/app/runner/v2/v1_compat.py +0 -31
  32. {fractal_server-2.4.1.dist-info → fractal_server-2.5.0a0.dist-info}/LICENSE +0 -0
  33. {fractal_server-2.4.1.dist-info → fractal_server-2.5.0a0.dist-info}/WHEEL +0 -0
  34. {fractal_server-2.4.1.dist-info → fractal_server-2.5.0a0.dist-info}/entry_points.txt +0 -0
@@ -16,8 +16,6 @@ from .merge_outputs import merge_outputs
16
16
  from .runner_functions_low_level import run_single_task
17
17
  from .task_interface import InitTaskOutput
18
18
  from .task_interface import TaskOutput
19
- from .v1_compat import convert_v2_args_into_v1
20
- from fractal_server.app.models.v1 import Task as TaskV1
21
19
  from fractal_server.app.models.v2 import TaskV2
22
20
  from fractal_server.app.models.v2 import WorkflowTaskV2
23
21
  from fractal_server.app.runner.components import _COMPONENT_KEY_
@@ -28,7 +26,6 @@ __all__ = [
28
26
  "run_v2_task_non_parallel",
29
27
  "run_v2_task_parallel",
30
28
  "run_v2_task_compound",
31
- "run_v1_task_parallel",
32
29
  ]
33
30
 
34
31
  MAX_PARALLELIZATION_LIST_SIZE = 20_000
@@ -317,58 +314,3 @@ def run_v2_task_compound(
317
314
 
318
315
  merged_output = merge_outputs(outputs)
319
316
  return merged_output
320
-
321
-
322
- def run_v1_task_parallel(
323
- *,
324
- images: list[dict[str, Any]],
325
- task_legacy: TaskV1,
326
- wftask: WorkflowTaskV2,
327
- executor: Executor,
328
- workflow_dir_local: Path,
329
- workflow_dir_remote: Optional[Path] = None,
330
- logger_name: Optional[str] = None,
331
- submit_setup_call: Callable = no_op_submit_setup_call,
332
- ) -> TaskOutput:
333
-
334
- _check_parallelization_list_size(images)
335
-
336
- executor_options = _get_executor_options(
337
- wftask=wftask,
338
- workflow_dir_local=workflow_dir_local,
339
- workflow_dir_remote=workflow_dir_remote,
340
- submit_setup_call=submit_setup_call,
341
- which_type="parallel",
342
- )
343
-
344
- list_function_kwargs = []
345
- for ind, image in enumerate(images):
346
- list_function_kwargs.append(
347
- convert_v2_args_into_v1(
348
- kwargs_v2=dict(
349
- zarr_url=image["zarr_url"],
350
- **(wftask.args_parallel or {}),
351
- ),
352
- parallelization_level=task_legacy.parallelization_level,
353
- ),
354
- )
355
- list_function_kwargs[-1][_COMPONENT_KEY_] = _index_to_component(ind)
356
-
357
- results_iterator = executor.map(
358
- functools.partial(
359
- run_single_task,
360
- wftask=wftask,
361
- command=task_legacy.command,
362
- workflow_dir_local=workflow_dir_local,
363
- workflow_dir_remote=workflow_dir_remote,
364
- is_task_v1=True,
365
- ),
366
- list_function_kwargs,
367
- **executor_options,
368
- )
369
- # Explicitly iterate over the whole list, so that all futures are waited
370
- list(results_iterator)
371
-
372
- # Ignore any output metadata for V1 tasks, and return an empty object
373
- out = TaskOutput()
374
- return out
@@ -61,7 +61,6 @@ def run_single_task(
61
61
  workflow_dir_local: Path,
62
62
  workflow_dir_remote: Optional[Path] = None,
63
63
  logger_name: Optional[str] = None,
64
- is_task_v1: bool = False,
65
64
  ) -> dict[str, Any]:
66
65
  """
67
66
  Runs within an executor.
@@ -73,10 +72,7 @@ def run_single_task(
73
72
  if not workflow_dir_remote:
74
73
  workflow_dir_remote = workflow_dir_local
75
74
 
76
- if is_task_v1:
77
- task_name = wftask.task_legacy.name
78
- else:
79
- task_name = wftask.task.name
75
+ task_name = wftask.task.name
80
76
 
81
77
  component = args.pop(_COMPONENT_KEY_, None)
82
78
  task_files = get_task_file_paths(
@@ -92,18 +88,11 @@ def run_single_task(
92
88
  json.dump(args, f, indent=2)
93
89
 
94
90
  # Assemble full command
95
- if is_task_v1:
96
- full_command = (
97
- f"{command} "
98
- f"--json {task_files.args.as_posix()} "
99
- f"--metadata-out {task_files.metadiff.as_posix()}"
100
- )
101
- else:
102
- full_command = (
103
- f"{command} "
104
- f"--args-json {task_files.args.as_posix()} "
105
- f"--out-json {task_files.metadiff.as_posix()}"
106
- )
91
+ full_command = (
92
+ f"{command} "
93
+ f"--args-json {task_files.args.as_posix()} "
94
+ f"--out-json {task_files.metadiff.as_posix()}"
95
+ )
107
96
 
108
97
  try:
109
98
  _call_command_wrapper(
@@ -113,10 +102,7 @@ def run_single_task(
113
102
  except TaskExecutionError as e:
114
103
  e.workflow_task_order = wftask.order
115
104
  e.workflow_task_id = wftask.id
116
- if wftask.is_legacy_task:
117
- e.task_name = wftask.task_legacy.name
118
- else:
119
- e.task_name = wftask.task.name
105
+ e.task_name = wftask.task.name
120
106
  raise e
121
107
 
122
108
  try:
@@ -20,7 +20,6 @@ from .project import ProjectUpdateV2 # noqa F401
20
20
  from .task import TaskCreateV2 # noqa F401
21
21
  from .task import TaskExportV2 # noqa F401
22
22
  from .task import TaskImportV2 # noqa F401
23
- from .task import TaskLegacyReadV2 # noqa F401
24
23
  from .task import TaskReadV2 # noqa F401
25
24
  from .task import TaskUpdateV2 # noqa F401
26
25
  from .task_collection import CollectionStateReadV2 # noqa F401
@@ -12,9 +12,7 @@ from typing import Optional
12
12
 
13
13
  from pydantic import BaseModel
14
14
  from pydantic import Extra
15
- from pydantic import root_validator
16
15
 
17
- from fractal_server.app.schemas.v1.dumps import TaskDumpV1
18
16
  from fractal_server.images import Filters
19
17
 
20
18
 
@@ -45,29 +43,10 @@ class WorkflowTaskDumpV2(BaseModel):
45
43
  workflow_id: int
46
44
  order: Optional[int]
47
45
 
48
- is_legacy_task: bool
49
-
50
46
  input_filters: Filters
51
47
 
52
- task_id: Optional[int]
53
- task: Optional[TaskDumpV2]
54
- task_legacy_id: Optional[int]
55
- task_legacy: Optional[TaskDumpV1]
56
-
57
- # Validators
58
- @root_validator
59
- def task_v1_or_v2(cls, values):
60
- v1 = values.get("task_legacy_id")
61
- v2 = values.get("task_id")
62
- if ((v1 is not None) and (v2 is not None)) or (
63
- (v1 is None) and (v2 is None)
64
- ):
65
- message = "both" if (v1 and v2) else "none"
66
- raise ValueError(
67
- "One and only one must be provided between "
68
- f"'task_legacy_id' and 'task_id' (you provided {message})"
69
- )
70
- return values
48
+ task_id: int
49
+ task: TaskDumpV2
71
50
 
72
51
 
73
52
  class WorkflowDumpV2(BaseModel, extra=Extra.forbid):
@@ -11,7 +11,6 @@ from pydantic import validator
11
11
 
12
12
  from .._validators import valdictkeys
13
13
  from .._validators import valstr
14
- from ..v1.task import TaskReadV1
15
14
 
16
15
 
17
16
  class TaskCreateV2(BaseModel, extra=Extra.forbid):
@@ -101,10 +100,6 @@ class TaskReadV2(BaseModel):
101
100
  output_types: dict[str, bool]
102
101
 
103
102
 
104
- class TaskLegacyReadV2(TaskReadV1):
105
- is_v2_compatible: bool
106
-
107
-
108
103
  class TaskUpdateV2(BaseModel):
109
104
 
110
105
  name: Optional[str]
@@ -5,16 +5,12 @@ from typing import Optional
5
5
  from pydantic import BaseModel
6
6
  from pydantic import Extra
7
7
  from pydantic import Field
8
- from pydantic import root_validator
9
8
  from pydantic import validator
10
9
 
11
10
  from .._validators import valdictkeys
12
11
  from .._validators import valint
13
- from ..v1.task import TaskExportV1
14
- from ..v1.task import TaskImportV1
15
12
  from .task import TaskExportV2
16
13
  from .task import TaskImportV2
17
- from .task import TaskLegacyReadV2
18
14
  from .task import TaskReadV2
19
15
  from fractal_server.images import Filters
20
16
 
@@ -49,8 +45,6 @@ class WorkflowTaskCreateV2(BaseModel, extra=Extra.forbid):
49
45
  order: Optional[int]
50
46
  input_filters: Filters = Field(default_factory=Filters)
51
47
 
52
- is_legacy_task: bool = False
53
-
54
48
  # Validators
55
49
  _meta_non_parallel = validator("meta_non_parallel", allow_reuse=True)(
56
50
  valdictkeys("meta_non_parallel")
@@ -88,18 +82,6 @@ class WorkflowTaskCreateV2(BaseModel, extra=Extra.forbid):
88
82
  )
89
83
  return value
90
84
 
91
- @root_validator
92
- def validate_legacy_task(cls, values):
93
- if values["is_legacy_task"] and (
94
- values.get("meta_non_parallel") is not None
95
- or values.get("args_non_parallel") is not None
96
- ):
97
- raise ValueError(
98
- "If Task is legacy, 'args_non_parallel' and 'meta_non_parallel"
99
- "must be None"
100
- )
101
- return values
102
-
103
85
 
104
86
  class WorkflowTaskReadV2(BaseModel):
105
87
 
@@ -115,12 +97,9 @@ class WorkflowTaskReadV2(BaseModel):
115
97
 
116
98
  input_filters: Filters
117
99
 
118
- is_legacy_task: bool
119
100
  task_type: str
120
- task_id: Optional[int]
121
- task: Optional[TaskReadV2]
122
- task_legacy_id: Optional[int]
123
- task_legacy: Optional[TaskLegacyReadV2]
101
+ task_id: int
102
+ task: TaskReadV2
124
103
 
125
104
 
126
105
  class WorkflowTaskUpdateV2(BaseModel):
@@ -177,9 +156,7 @@ class WorkflowTaskImportV2(BaseModel):
177
156
 
178
157
  input_filters: Optional[Filters] = None
179
158
 
180
- is_legacy_task: bool = False
181
- task: Optional[TaskImportV2] = None
182
- task_legacy: Optional[TaskImportV1] = None
159
+ task: TaskImportV2
183
160
 
184
161
  _meta_non_parallel = validator("meta_non_parallel", allow_reuse=True)(
185
162
  valdictkeys("meta_non_parallel")
@@ -203,6 +180,4 @@ class WorkflowTaskExportV2(BaseModel):
203
180
  args_parallel: Optional[dict[str, Any]] = None
204
181
  input_filters: Filters = Field(default_factory=Filters)
205
182
 
206
- is_legacy_task: bool = False
207
- task: Optional[TaskExportV2]
208
- task_legacy: Optional[TaskExportV1]
183
+ task: TaskExportV2
@@ -55,9 +55,9 @@ from fractal_server.app.models import OAuthAccount
55
55
  from fractal_server.app.models import UserGroup
56
56
  from fractal_server.app.models import UserOAuth
57
57
  from fractal_server.app.schemas.user import UserCreate
58
- from fractal_server.logger import get_logger
58
+ from fractal_server.logger import set_logger
59
59
 
60
- logger = get_logger(__name__)
60
+ logger = set_logger(__name__)
61
61
 
62
62
  FRACTAL_DEFAULT_GROUP_NAME = "All"
63
63
 
@@ -264,9 +264,10 @@ async def _create_first_user(
264
264
  is_verified: `True` if the new user is verifie
265
265
  username:
266
266
  """
267
+ function_logger = set_logger("fractal_server.create_first_user")
268
+ function_logger.info(f"START _create_first_user, with email '{email}'")
267
269
  try:
268
270
  async with get_async_session_context() as session:
269
-
270
271
  if is_superuser is True:
271
272
  # If a superuser already exists, exit
272
273
  stm = select(UserOAuth).where( # noqa
@@ -275,9 +276,9 @@ async def _create_first_user(
275
276
  res = await session.execute(stm)
276
277
  existing_superuser = res.scalars().first()
277
278
  if existing_superuser is not None:
278
- logger.info(
279
- f"{existing_superuser.email} superuser already exists,"
280
- f" skip creation of {email}"
279
+ function_logger.info(
280
+ f"'{existing_superuser.email}' superuser already "
281
+ f"exists, skip creation of '{email}'"
281
282
  )
282
283
  return None
283
284
 
@@ -292,15 +293,19 @@ async def _create_first_user(
292
293
  if username is not None:
293
294
  kwargs["username"] = username
294
295
  user = await user_manager.create(UserCreate(**kwargs))
295
- logger.info(f"User {user.email} created")
296
+ function_logger.info(f"User '{user.email}' created")
296
297
 
297
298
  except UserAlreadyExists:
298
- logger.warning(f"User {email} already exists")
299
+ function_logger.warning(f"User '{email}' already exists")
300
+ finally:
301
+ function_logger.info(f"END _create_first_user, with email '{email}'")
299
302
 
300
303
 
301
304
  def _create_first_group():
302
- logger.info(
303
- f"START _create_first_group, with name {FRACTAL_DEFAULT_GROUP_NAME}"
305
+ function_logger = set_logger("fractal_server.create_first_group")
306
+
307
+ function_logger.info(
308
+ f"START _create_first_group, with name '{FRACTAL_DEFAULT_GROUP_NAME}'"
304
309
  )
305
310
  with next(get_sync_db()) as db:
306
311
  group_all = db.execute(select(UserGroup))
@@ -308,11 +313,13 @@ def _create_first_group():
308
313
  first_group = UserGroup(name=FRACTAL_DEFAULT_GROUP_NAME)
309
314
  db.add(first_group)
310
315
  db.commit()
311
- logger.info(f"Created group {FRACTAL_DEFAULT_GROUP_NAME}")
316
+ function_logger.info(
317
+ f"Created group '{FRACTAL_DEFAULT_GROUP_NAME}'"
318
+ )
312
319
  else:
313
- logger.info(
314
- f"Group {FRACTAL_DEFAULT_GROUP_NAME} already exists, skip."
320
+ function_logger.info(
321
+ f"Group '{FRACTAL_DEFAULT_GROUP_NAME}' already exists, skip."
315
322
  )
316
- logger.info(
317
- f"END _create_first_group, with name {FRACTAL_DEFAULT_GROUP_NAME}"
323
+ function_logger.info(
324
+ f"END _create_first_group, with name '{FRACTAL_DEFAULT_GROUP_NAME}'"
318
325
  )
@@ -7,6 +7,7 @@ from sqlmodel import SQLModel
7
7
 
8
8
  from fractal_server.app import models # noqa
9
9
  from fractal_server.config import get_settings
10
+ from fractal_server.migrations.naming_convention import NAMING_CONVENTION
10
11
  from fractal_server.syringe import Inject
11
12
 
12
13
  # this is the Alembic Config object, which provides
@@ -25,13 +26,7 @@ if config.config_file_name is not None:
25
26
  # from myapp import mymodel
26
27
  # target_metadata = mymodel.Base.metadata
27
28
  target_metadata = SQLModel.metadata
28
- target_metadata.naming_convention = {
29
- "ix": "ix_%(column_0_label)s",
30
- "uq": "uq_%(table_name)s_%(column_0_name)s",
31
- "ck": "ck_%(table_name)s_`%(constraint_name)s`",
32
- "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
33
- "pk": "pk_%(table_name)s",
34
- }
29
+ target_metadata.naming_convention = NAMING_CONVENTION
35
30
 
36
31
  # other values from the config, defined by the needs of env.py,
37
32
  # can be acquired:
@@ -58,6 +53,7 @@ def run_migrations_offline() -> None:
58
53
  target_metadata=target_metadata,
59
54
  literal_binds=True,
60
55
  dialect_opts={"paramstyle": "named"},
56
+ render_as_batch=True,
61
57
  )
62
58
 
63
59
  with context.begin_transaction():
@@ -68,6 +64,7 @@ def do_run_migrations(connection: Connection) -> None:
68
64
  context.configure(
69
65
  connection=connection,
70
66
  target_metadata=target_metadata,
67
+ render_as_batch=True,
71
68
  )
72
69
 
73
70
  with context.begin_transaction():
@@ -0,0 +1,7 @@
1
+ NAMING_CONVENTION = {
2
+ "ix": "ix_%(column_0_label)s",
3
+ "uq": "uq_%(table_name)s_%(column_0_name)s",
4
+ "ck": "ck_%(table_name)s_`%(constraint_name)s`",
5
+ "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
6
+ "pk": "pk_%(table_name)s",
7
+ }
@@ -1,4 +1,4 @@
1
- """Add_usergroup_and_linkusergroup_table
1
+ """Add_usergroup_and_linkusergroup_table
2
2
 
3
3
  Revision ID: 091b01f51f88
4
4
  Revises: 5bf02391cfef
@@ -0,0 +1,97 @@
1
+ """Remove link between v1 and v2 tasks/workflowtasks tables
2
+
3
+ Revision ID: d9a140db5d42
4
+ Revises: 5bf02391cfef
5
+ Create Date: 2024-09-09 14:15:34.415926
6
+
7
+ """
8
+ import sqlalchemy as sa
9
+ from alembic import op
10
+
11
+ from fractal_server.migrations.naming_convention import NAMING_CONVENTION
12
+
13
+ # revision identifiers, used by Alembic.
14
+ revision = "d9a140db5d42"
15
+ down_revision = "091b01f51f88"
16
+ branch_labels = None
17
+ depends_on = None
18
+
19
+
20
+ def upgrade() -> None:
21
+
22
+ with op.batch_alter_table("workflowtaskv2") as batch_op:
23
+ batch_op.alter_column(
24
+ "task_id", existing_type=sa.INTEGER(), nullable=False
25
+ )
26
+
27
+ # NOTE: in sqlite, this `drop_constraint` only works if
28
+ # `batch_alter_table` has a `naming_convention` set. Ref
29
+ # https://alembic.sqlalchemy.org/en/latest/batch.html#dropping-unnamed-or-named-foreign-key-constraints
30
+ with op.batch_alter_table(
31
+ "workflowtaskv2", naming_convention=NAMING_CONVENTION
32
+ ) as batch_op:
33
+ batch_op.drop_constraint(
34
+ "fk_workflowtaskv2_task_legacy_id_task", type_="foreignkey"
35
+ )
36
+
37
+ # NOTE: in sqlite, the `drop_index` command fails if the existing table
38
+ # has zero rows, while it succeeds if there are already some rows
39
+ if op.get_bind().dialect.name == "sqlite":
40
+ import sqlite3
41
+ import logging
42
+
43
+ logger = logging.getLogger("alembic.runtime.migration")
44
+ logger.warning(
45
+ f"Using sqlite, with {sqlite3.version=} and "
46
+ f"{sqlite3.sqlite_version=}"
47
+ )
48
+ logger.warning("Now drop index 'idx_workflowtaskv2_task_legacy_id'")
49
+ try:
50
+ with op.batch_alter_table("workflowtaskv2") as batch_op:
51
+ batch_op.drop_index("idx_workflowtaskv2_task_legacy_id")
52
+ except sa.exc.OperationalError:
53
+ logger.warning(
54
+ "Could not drop index; "
55
+ "this is expected, when the database is empty."
56
+ )
57
+ logger.warning("Continue.")
58
+
59
+ with op.batch_alter_table(
60
+ "workflowtaskv2", schema=None, naming_convention=NAMING_CONVENTION
61
+ ) as batch_op:
62
+ batch_op.drop_column("is_legacy_task")
63
+ batch_op.drop_column("task_legacy_id")
64
+
65
+ with op.batch_alter_table("task") as batch_op:
66
+ batch_op.drop_column("is_v2_compatible")
67
+
68
+
69
+ def downgrade() -> None:
70
+ # ### commands auto generated by Alembic - please adjust! ###
71
+ with op.batch_alter_table("task", schema=None) as batch_op:
72
+ batch_op.add_column(
73
+ sa.Column(
74
+ "is_v2_compatible",
75
+ sa.BOOLEAN(),
76
+ server_default=sa.text("(false)"),
77
+ nullable=False,
78
+ )
79
+ )
80
+ with op.batch_alter_table("workflowtaskv2", schema=None) as batch_op:
81
+ batch_op.add_column(
82
+ sa.Column("task_legacy_id", sa.INTEGER(), nullable=True)
83
+ )
84
+ batch_op.add_column(
85
+ sa.Column("is_legacy_task", sa.BOOLEAN(), nullable=False)
86
+ )
87
+ batch_op.create_foreign_key(
88
+ "fk_workflowtaskv2_task_legacy_id_task",
89
+ "task",
90
+ ["task_legacy_id"],
91
+ ["id"],
92
+ )
93
+ batch_op.alter_column(
94
+ "task_id", existing_type=sa.INTEGER(), nullable=True
95
+ )
96
+
97
+ # ### end Alembic commands ###
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: fractal-server
3
- Version: 2.4.1
3
+ Version: 2.5.0a0
4
4
  Summary: Server component of the Fractal analytics platform
5
5
  Home-page: https://github.com/fractal-analytics-platform/fractal-server
6
6
  License: BSD-3-Clause