fractal-server 2.11.0a10__py3-none-any.whl → 2.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. fractal_server/__init__.py +1 -1
  2. fractal_server/app/models/__init__.py +0 -2
  3. fractal_server/app/models/linkuserproject.py +0 -9
  4. fractal_server/app/models/v2/dataset.py +0 -4
  5. fractal_server/app/models/v2/workflowtask.py +0 -4
  6. fractal_server/app/routes/aux/_job.py +1 -3
  7. fractal_server/app/runner/executors/slurm/ssh/executor.py +9 -6
  8. fractal_server/app/runner/executors/slurm/sudo/executor.py +1 -5
  9. fractal_server/app/runner/filenames.py +0 -2
  10. fractal_server/app/runner/shutdown.py +3 -27
  11. fractal_server/app/schemas/_validators.py +0 -19
  12. fractal_server/config.py +1 -15
  13. fractal_server/main.py +1 -12
  14. fractal_server/migrations/versions/1eac13a26c83_drop_v1_tables.py +67 -0
  15. fractal_server/migrations/versions/af8673379a5c_drop_old_filter_columns.py +54 -0
  16. fractal_server/string_tools.py +0 -21
  17. fractal_server/tasks/utils.py +0 -28
  18. {fractal_server-2.11.0a10.dist-info → fractal_server-2.12.0.dist-info}/METADATA +1 -1
  19. {fractal_server-2.11.0a10.dist-info → fractal_server-2.12.0.dist-info}/RECORD +22 -66
  20. fractal_server/app/models/v1/__init__.py +0 -13
  21. fractal_server/app/models/v1/dataset.py +0 -71
  22. fractal_server/app/models/v1/job.py +0 -101
  23. fractal_server/app/models/v1/project.py +0 -29
  24. fractal_server/app/models/v1/state.py +0 -34
  25. fractal_server/app/models/v1/task.py +0 -85
  26. fractal_server/app/models/v1/workflow.py +0 -133
  27. fractal_server/app/routes/admin/v1.py +0 -377
  28. fractal_server/app/routes/api/v1/__init__.py +0 -26
  29. fractal_server/app/routes/api/v1/_aux_functions.py +0 -478
  30. fractal_server/app/routes/api/v1/dataset.py +0 -554
  31. fractal_server/app/routes/api/v1/job.py +0 -195
  32. fractal_server/app/routes/api/v1/project.py +0 -475
  33. fractal_server/app/routes/api/v1/task.py +0 -203
  34. fractal_server/app/routes/api/v1/task_collection.py +0 -239
  35. fractal_server/app/routes/api/v1/workflow.py +0 -355
  36. fractal_server/app/routes/api/v1/workflowtask.py +0 -187
  37. fractal_server/app/runner/async_wrap_v1.py +0 -27
  38. fractal_server/app/runner/v1/__init__.py +0 -415
  39. fractal_server/app/runner/v1/_common.py +0 -620
  40. fractal_server/app/runner/v1/_local/__init__.py +0 -186
  41. fractal_server/app/runner/v1/_local/_local_config.py +0 -105
  42. fractal_server/app/runner/v1/_local/_submit_setup.py +0 -48
  43. fractal_server/app/runner/v1/_local/executor.py +0 -100
  44. fractal_server/app/runner/v1/_slurm/__init__.py +0 -312
  45. fractal_server/app/runner/v1/_slurm/_submit_setup.py +0 -81
  46. fractal_server/app/runner/v1/_slurm/get_slurm_config.py +0 -163
  47. fractal_server/app/runner/v1/common.py +0 -117
  48. fractal_server/app/runner/v1/handle_failed_job.py +0 -141
  49. fractal_server/app/schemas/v1/__init__.py +0 -37
  50. fractal_server/app/schemas/v1/applyworkflow.py +0 -161
  51. fractal_server/app/schemas/v1/dataset.py +0 -165
  52. fractal_server/app/schemas/v1/dumps.py +0 -64
  53. fractal_server/app/schemas/v1/manifest.py +0 -126
  54. fractal_server/app/schemas/v1/project.py +0 -66
  55. fractal_server/app/schemas/v1/state.py +0 -18
  56. fractal_server/app/schemas/v1/task.py +0 -167
  57. fractal_server/app/schemas/v1/task_collection.py +0 -110
  58. fractal_server/app/schemas/v1/workflow.py +0 -212
  59. fractal_server/data_migrations/2_11_0.py +0 -168
  60. fractal_server/tasks/v1/_TaskCollectPip.py +0 -103
  61. fractal_server/tasks/v1/__init__.py +0 -0
  62. fractal_server/tasks/v1/background_operations.py +0 -352
  63. fractal_server/tasks/v1/endpoint_operations.py +0 -156
  64. fractal_server/tasks/v1/get_collection_data.py +0 -14
  65. fractal_server/tasks/v1/utils.py +0 -67
  66. {fractal_server-2.11.0a10.dist-info → fractal_server-2.12.0.dist-info}/LICENSE +0 -0
  67. {fractal_server-2.11.0a10.dist-info → fractal_server-2.12.0.dist-info}/WHEEL +0 -0
  68. {fractal_server-2.11.0a10.dist-info → fractal_server-2.12.0.dist-info}/entry_points.txt +0 -0
@@ -1 +1 @@
1
- __VERSION__ = "2.11.0a10"
1
+ __VERSION__ = "2.12.0"
@@ -4,9 +4,7 @@ thus we should always export all relevant database models from here or they
4
4
  will not be picked up by alembic.
5
5
  """
6
6
  from .linkusergroup import LinkUserGroup # noqa: F401
7
- from .linkuserproject import LinkUserProject # noqa: F401
8
7
  from .linkuserproject import LinkUserProjectV2 # noqa: F401
9
8
  from .security import * # noqa
10
9
  from .user_settings import UserSettings # noqa
11
- from .v1 import * # noqa
12
10
  from .v2 import * # noqa
@@ -2,15 +2,6 @@ from sqlmodel import Field
2
2
  from sqlmodel import SQLModel
3
3
 
4
4
 
5
- class LinkUserProject(SQLModel, table=True):
6
- """
7
- Crossing table between User and Project
8
- """
9
-
10
- project_id: int = Field(foreign_key="project.id", primary_key=True)
11
- user_id: int = Field(foreign_key="user_oauth.id", primary_key=True)
12
-
13
-
14
5
  class LinkUserProjectV2(SQLModel, table=True):
15
6
  """
16
7
  Crossing table between User and ProjectV2
@@ -1,6 +1,5 @@
1
1
  from datetime import datetime
2
2
  from typing import Any
3
- from typing import Literal
4
3
  from typing import Optional
5
4
 
6
5
  from sqlalchemy import Column
@@ -42,9 +41,6 @@ class DatasetV2(SQLModel, table=True):
42
41
  sa_column=Column(JSON, server_default="[]", nullable=False)
43
42
  )
44
43
 
45
- filters: Optional[
46
- dict[Literal["attributes", "types"], dict[str, Any]]
47
- ] = Field(sa_column=Column(JSON, nullable=True, server_default="null"))
48
44
  type_filters: dict[str, bool] = Field(
49
45
  sa_column=Column(JSON, nullable=False, server_default="{}")
50
46
  )
@@ -1,5 +1,4 @@
1
1
  from typing import Any
2
- from typing import Literal
3
2
  from typing import Optional
4
3
 
5
4
  from sqlalchemy import Column
@@ -25,9 +24,6 @@ class WorkflowTaskV2(SQLModel, table=True):
25
24
  args_parallel: Optional[dict[str, Any]] = Field(sa_column=Column(JSON))
26
25
  args_non_parallel: Optional[dict[str, Any]] = Field(sa_column=Column(JSON))
27
26
 
28
- input_filters: Optional[
29
- dict[Literal["attributes", "types"], dict[str, Any]]
30
- ] = Field(sa_column=Column(JSON, nullable=True, server_default="null"))
31
27
  type_filters: dict[str, bool] = Field(
32
28
  sa_column=Column(JSON, nullable=False, server_default="{}")
33
29
  )
@@ -1,12 +1,10 @@
1
1
  from pathlib import Path
2
- from typing import Union
3
2
 
4
- from ...models.v1 import ApplyWorkflow
5
3
  from ...models.v2 import JobV2
6
4
  from ...runner.filenames import SHUTDOWN_FILENAME
7
5
 
8
6
 
9
- def _write_shutdown_file(*, job: Union[ApplyWorkflow, JobV2]):
7
+ def _write_shutdown_file(*, job: JobV2):
10
8
  """
11
9
  Write job's shutdown file.
12
10
 
@@ -385,9 +385,7 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
385
385
  args_batches = []
386
386
  batch_size = tasks_per_job
387
387
  for ind_chunk in range(0, tot_tasks, batch_size):
388
- args_batches.append(
389
- list_args[ind_chunk : ind_chunk + batch_size] # noqa
390
- )
388
+ args_batches.append(list_args[ind_chunk : ind_chunk + batch_size])
391
389
  if len(args_batches) != math.ceil(tot_tasks / tasks_per_job):
392
390
  raise RuntimeError("Something wrong here while batching tasks")
393
391
 
@@ -536,10 +534,15 @@ class FractalSlurmSSHExecutor(SlurmExecutor):
536
534
  _prefixes = []
537
535
  _subfolder_names = []
538
536
  for component in components:
539
- if isinstance(component, dict):
537
+ # In Fractal, `component` is `dict` by construction (e.g.
538
+ # `component = {"zarr_url": "/something", "param": 1}``). The
539
+ # try/except covers the case of e.g. `executor.map([1, 2])`,
540
+ # which is useful for testing.
541
+ try:
540
542
  actual_component = component.get(_COMPONENT_KEY_, None)
541
- else:
542
- actual_component = component
543
+ except AttributeError:
544
+ actual_component = str(component)
545
+
543
546
  _task_file_paths = get_task_file_paths(
544
547
  workflow_dir_local=task_files.workflow_dir_local,
545
548
  workflow_dir_remote=task_files.workflow_dir_remote,
@@ -608,11 +608,7 @@ class FractalSlurmExecutor(SlurmExecutor):
608
608
  _prefixes = []
609
609
  _subfolder_names = []
610
610
  for component in components:
611
- if isinstance(component, dict):
612
- # This is needed for V2
613
- actual_component = component.get(_COMPONENT_KEY_, None)
614
- else:
615
- actual_component = component
611
+ actual_component = component.get(_COMPONENT_KEY_, None)
616
612
  _task_file_paths = get_task_file_paths(
617
613
  workflow_dir_local=task_files.workflow_dir_local,
618
614
  workflow_dir_remote=task_files.workflow_dir_remote,
@@ -1,4 +1,2 @@
1
- HISTORY_FILENAME_V1 = "history.json"
2
- METADATA_FILENAME_V1 = "metadata.json"
3
1
  SHUTDOWN_FILENAME = "shutdown"
4
2
  WORKFLOW_LOG_FILENAME = "workflow.log"
@@ -3,8 +3,6 @@ import time
3
3
  from sqlmodel import select
4
4
 
5
5
  from fractal_server.app.db import get_async_db
6
- from fractal_server.app.models.v1 import ApplyWorkflow
7
- from fractal_server.app.models.v1.job import JobStatusTypeV1
8
6
  from fractal_server.app.models.v2 import JobV2
9
7
  from fractal_server.app.models.v2.job import JobStatusTypeV2
10
8
  from fractal_server.app.routes.aux._job import _write_shutdown_file
@@ -13,9 +11,7 @@ from fractal_server.logger import get_logger
13
11
  from fractal_server.syringe import Inject
14
12
 
15
13
 
16
- async def cleanup_after_shutdown(
17
- *, jobsV1: list[int], jobsV2: list[int], logger_name: str
18
- ):
14
+ async def cleanup_after_shutdown(*, jobsV2: list[int], logger_name: str):
19
15
  logger = get_logger(logger_name)
20
16
  logger.info("Cleanup function after shutdown")
21
17
  stm_v2 = (
@@ -24,22 +20,12 @@ async def cleanup_after_shutdown(
24
20
  .where(JobV2.status == JobStatusTypeV2.SUBMITTED)
25
21
  )
26
22
 
27
- stm_v1 = (
28
- select(ApplyWorkflow)
29
- .where(ApplyWorkflow.id.in_(jobsV1))
30
- .where(ApplyWorkflow.status == JobStatusTypeV1.SUBMITTED)
31
- )
32
-
33
23
  async for session in get_async_db():
34
24
  jobsV2_db = (await session.execute(stm_v2)).scalars().all()
35
- jobsV1_db = (await session.execute(stm_v1)).scalars().all()
36
25
 
37
26
  for job in jobsV2_db:
38
27
  _write_shutdown_file(job=job)
39
28
 
40
- for job in jobsV1_db:
41
- _write_shutdown_file(job=job)
42
-
43
29
  settings = Inject(get_settings)
44
30
 
45
31
  t_start = time.perf_counter()
@@ -49,9 +35,8 @@ async def cleanup_after_shutdown(
49
35
  logger.info("Waiting 3 seconds before checking")
50
36
  time.sleep(3)
51
37
  jobsV2_db = (await session.execute(stm_v2)).scalars().all()
52
- jobsV1_db = (await session.execute(stm_v1)).scalars().all()
53
38
 
54
- if len(jobsV2_db) == 0 and len(jobsV1_db) == 0:
39
+ if len(jobsV2_db) == 0:
55
40
  logger.info(
56
41
  (
57
42
  "All jobs associated to this app are "
@@ -61,10 +46,7 @@ async def cleanup_after_shutdown(
61
46
  return
62
47
  else:
63
48
  logger.info(
64
- (
65
- f"Some jobs are still 'submitted' "
66
- f"{jobsV1_db=}, {jobsV2_db=}"
67
- )
49
+ (f"Some jobs are still 'submitted' " f"{jobsV2_db=}")
68
50
  )
69
51
  logger.info(
70
52
  (
@@ -79,10 +61,4 @@ async def cleanup_after_shutdown(
79
61
  session.add(job)
80
62
  await session.commit()
81
63
 
82
- for job in jobsV1_db:
83
- job.status = "failed"
84
- job.log = (job.log or "") + "\nJob stopped due to app shutdown\n"
85
- session.add(job)
86
- await session.commit()
87
-
88
64
  logger.info("Exit from shutdown logic")
@@ -48,25 +48,6 @@ def valdict_keys(attribute: str):
48
48
  return val
49
49
 
50
50
 
51
- def valint(attribute: str, min_val: int = 1):
52
- """
53
- Check that an integer attribute (e.g. if it is meant to be the ID of a
54
- database entry) is greater or equal to min_val.
55
- """
56
-
57
- def val(integer: Optional[int]) -> Optional[int]:
58
- if integer is None:
59
- raise ValueError(f"Integer attribute '{attribute}' cannot be None")
60
- if integer < min_val:
61
- raise ValueError(
62
- f"Integer attribute '{attribute}' cannot be less than "
63
- f"{min_val} (given {integer})"
64
- )
65
- return integer
66
-
67
- return val
68
-
69
-
70
51
  def val_absolute_path(attribute: str, accept_none: bool = False):
71
52
  """
72
53
  Check that a string attribute is an absolute path
fractal_server/config.py CHANGED
@@ -338,7 +338,7 @@ class Settings(BaseSettings):
338
338
 
339
339
  FRACTAL_API_MAX_JOB_LIST_LENGTH: int = 50
340
340
  """
341
- Number of ids that can be stored in the `jobsV1` and `jobsV2` attributes of
341
+ Number of ids that can be stored in the `jobsV2` attribute of
342
342
  `app.state`.
343
343
  """
344
344
 
@@ -496,13 +496,6 @@ class Settings(BaseSettings):
496
496
  `JobExecutionError`.
497
497
  """
498
498
 
499
- FRACTAL_API_SUBMIT_RATE_LIMIT: int = 2
500
- """
501
- Interval to wait (in seconds) to be allowed to call again
502
- `POST api/v1/{project_id}/workflow/{workflow_id}/apply/`
503
- with the same path and query parameters.
504
- """
505
-
506
499
  FRACTAL_RUNNER_TASKS_INCLUDE_IMAGE: str = (
507
500
  "Copy OME-Zarr structure;Convert Metadata Components from 2D to 3D"
508
501
  )
@@ -511,13 +504,6 @@ class Settings(BaseSettings):
511
504
  attribute in their input-arguments JSON file.
512
505
  """
513
506
 
514
- FRACTAL_API_V1_MODE: Literal[
515
- "include", "include_read_only", "exclude"
516
- ] = "include"
517
- """
518
- Whether to include the v1 API.
519
- """
520
-
521
507
  FRACTAL_PIP_CACHE_DIR: Optional[str] = None
522
508
  """
523
509
  Absolute path to the cache directory for `pip`; if unset,
fractal_server/main.py CHANGED
@@ -39,20 +39,11 @@ def collect_routers(app: FastAPI) -> None:
39
39
  The application to register the routers to.
40
40
  """
41
41
  from .app.routes.api import router_api
42
- from .app.routes.api.v1 import router_api_v1
43
42
  from .app.routes.api.v2 import router_api_v2
44
- from .app.routes.admin.v1 import router_admin_v1
45
43
  from .app.routes.admin.v2 import router_admin_v2
46
44
  from .app.routes.auth.router import router_auth
47
45
 
48
- settings = Inject(get_settings)
49
-
50
46
  app.include_router(router_api, prefix="/api")
51
- if settings.FRACTAL_API_V1_MODE.startswith("include"):
52
- app.include_router(router_api_v1, prefix="/api/v1")
53
- app.include_router(
54
- router_admin_v1, prefix="/admin/v1", tags=["V1 Admin area"]
55
- )
56
47
  app.include_router(router_api_v2, prefix="/api/v2")
57
48
  app.include_router(
58
49
  router_admin_v2, prefix="/admin/v2", tags=["V2 Admin area"]
@@ -84,7 +75,6 @@ def check_settings() -> None:
84
75
 
85
76
  @asynccontextmanager
86
77
  async def lifespan(app: FastAPI):
87
- app.state.jobsV1 = []
88
78
  app.state.jobsV2 = []
89
79
  logger = set_logger("fractal_server.lifespan")
90
80
  logger.info("Start application startup")
@@ -123,12 +113,11 @@ async def lifespan(app: FastAPI):
123
113
 
124
114
  logger.info(
125
115
  f"Current worker with pid {os.getpid()} is shutting down. "
126
- f"Current jobs: {app.state.jobsV1=}, {app.state.jobsV2=}"
116
+ f"Current jobs: {app.state.jobsV2=}"
127
117
  )
128
118
  if _backend_supports_shutdown(settings.FRACTAL_RUNNER_BACKEND):
129
119
  try:
130
120
  await cleanup_after_shutdown(
131
- jobsV1=app.state.jobsV1,
132
121
  jobsV2=app.state.jobsV2,
133
122
  logger_name="fractal_server.lifespan",
134
123
  )
@@ -0,0 +1,67 @@
1
+ """Drop V1 tables
2
+
3
+ Revision ID: 1eac13a26c83
4
+ Revises: af8673379a5c
5
+ Create Date: 2025-01-10 13:17:47.838607
6
+
7
+ """
8
+ import logging
9
+
10
+ from alembic import op
11
+ from sqlmodel import SQLModel
12
+
13
+ from fractal_server.migrations.naming_convention import NAMING_CONVENTION
14
+
15
+ # revision identifiers, used by Alembic.
16
+ revision = "1eac13a26c83"
17
+ down_revision = "af8673379a5c"
18
+ branch_labels = None
19
+ depends_on = None
20
+
21
+
22
+ TABLES_V1 = [
23
+ "resource",
24
+ "applyworkflow",
25
+ "task",
26
+ "workflow",
27
+ "workflowtask",
28
+ "linkuserproject",
29
+ "dataset",
30
+ "project",
31
+ "state",
32
+ ]
33
+
34
+
35
+ def upgrade() -> None:
36
+
37
+ logger = logging.getLogger("alembic.runtime.migration")
38
+
39
+ target_metadata = SQLModel.metadata
40
+ target_metadata.naming_convention = NAMING_CONVENTION
41
+
42
+ connection = op.get_bind()
43
+ target_metadata.reflect(
44
+ bind=connection,
45
+ extend_existing=True,
46
+ only=TABLES_V1,
47
+ )
48
+
49
+ logger.info("Starting non-reversible upgrade")
50
+ logger.info("Dropping all V1 ForeignKey constraints")
51
+ fk_names = []
52
+ for table_name in TABLES_V1:
53
+ table = target_metadata.tables[table_name]
54
+ for fk in table.foreign_keys:
55
+ op.drop_constraint(fk.name, table_name, type_="foreignkey")
56
+ fk_names.append(fk.name)
57
+ logger.info(f"Dropped all V1 ForeignKey constraints: {fk_names}")
58
+ logger.info(f"Dropping all V1 tables: {TABLES_V1}")
59
+ for table_name in TABLES_V1:
60
+ op.drop_table(table_name)
61
+
62
+
63
+ def downgrade() -> None:
64
+ raise RuntimeError(
65
+ "Cannot downgrade from 1eac13a26c83 to db09233ad13a, "
66
+ "because it's fully breaking."
67
+ )
@@ -0,0 +1,54 @@
1
+ """drop old filter columns
2
+
3
+ Revision ID: af8673379a5c
4
+ Revises: db09233ad13a
5
+ Create Date: 2025-01-30 14:44:04.302795
6
+
7
+ """
8
+ import sqlalchemy as sa
9
+ from alembic import op
10
+ from sqlalchemy.dialects import postgresql
11
+
12
+ # revision identifiers, used by Alembic.
13
+ revision = "af8673379a5c"
14
+ down_revision = "db09233ad13a"
15
+ branch_labels = None
16
+ depends_on = None
17
+
18
+
19
+ def upgrade() -> None:
20
+ # ### commands auto generated by Alembic - please adjust! ###
21
+ with op.batch_alter_table("datasetv2", schema=None) as batch_op:
22
+ batch_op.drop_column("filters")
23
+
24
+ with op.batch_alter_table("workflowtaskv2", schema=None) as batch_op:
25
+ batch_op.drop_column("input_filters")
26
+
27
+ # ### end Alembic commands ###
28
+
29
+
30
+ def downgrade() -> None:
31
+ # ### commands auto generated by Alembic - please adjust! ###
32
+ with op.batch_alter_table("workflowtaskv2", schema=None) as batch_op:
33
+ batch_op.add_column(
34
+ sa.Column(
35
+ "input_filters",
36
+ postgresql.JSON(astext_type=sa.Text()),
37
+ server_default=sa.text("'null'::json"),
38
+ autoincrement=False,
39
+ nullable=True,
40
+ )
41
+ )
42
+
43
+ with op.batch_alter_table("datasetv2", schema=None) as batch_op:
44
+ batch_op.add_column(
45
+ sa.Column(
46
+ "filters",
47
+ postgresql.JSON(astext_type=sa.Text()),
48
+ server_default=sa.text("'null'::json"),
49
+ autoincrement=False,
50
+ nullable=True,
51
+ )
52
+ )
53
+
54
+ # ### end Alembic commands ###
@@ -33,27 +33,6 @@ def sanitize_string(value: str) -> str:
33
33
  return new_value
34
34
 
35
35
 
36
- def slugify_task_name_for_source_v1(task_name: str) -> str:
37
- """
38
- NOTE: this function is used upon creation of tasks' sources, therefore
39
- for the moment we cannot replace it with its more comprehensive version
40
- from `fractal_server.string_tools.sanitize_string`, nor we can remove it.
41
-
42
- As of 2.3.1, we are renaming it to `slugify_task_name_for_source`, to make
43
- it clear that it should not be used for other purposes.
44
-
45
- As of 2.7.0, we are renaming it to `slugify_task_name_for_source_v1`, to
46
- make it clear that it is not used for v2.
47
-
48
- Args:
49
- task_name:
50
-
51
- Return:
52
- Slug-ified task name.
53
- """
54
- return task_name.replace(" ", "_").lower()
55
-
56
-
57
36
  def validate_cmd(
58
37
  command: str,
59
38
  *,
@@ -1,7 +1,5 @@
1
1
  from pathlib import Path
2
2
 
3
- from fractal_server.config import get_settings
4
- from fractal_server.syringe import Inject
5
3
 
6
4
  COLLECTION_FILENAME = "collection.json"
7
5
  COLLECTION_LOG_FILENAME = "collection.log"
@@ -9,31 +7,5 @@ COLLECTION_FREEZE_FILENAME = "collection_freeze.txt"
9
7
  FORBIDDEN_DEPENDENCY_STRINGS = ["github.com"]
10
8
 
11
9
 
12
- def get_absolute_venv_path_v1(venv_path: Path) -> Path:
13
- """
14
- If a path is not absolute, make it a relative path of FRACTAL_TASKS_DIR.
15
-
16
- As of v2.7.0, we rename this to v1 since it is only to be used in v1.
17
- """
18
- if venv_path.is_absolute():
19
- package_path = venv_path
20
- else:
21
- settings = Inject(get_settings)
22
- package_path = settings.FRACTAL_TASKS_DIR / venv_path
23
- return package_path
24
-
25
-
26
- def get_collection_path(base: Path) -> Path:
27
- return base / COLLECTION_FILENAME
28
-
29
-
30
10
  def get_log_path(base: Path) -> Path:
31
11
  return base / COLLECTION_LOG_FILENAME
32
-
33
-
34
- def get_collection_log_v1(path: Path) -> str:
35
- package_path = get_absolute_venv_path_v1(path)
36
- log_path = get_log_path(package_path)
37
- with log_path.open("r") as f:
38
- log = f.read()
39
- return log
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: fractal-server
3
- Version: 2.11.0a10
3
+ Version: 2.12.0
4
4
  Summary: Backend component of the Fractal analytics platform
5
5
  Home-page: https://github.com/fractal-analytics-platform/fractal-server
6
6
  License: BSD-3-Clause