indexify 0.4.29__py3-none-any.whl → 0.4.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. indexify/cli/build_image.py +56 -16
  2. indexify/cli/deploy.py +1 -1
  3. indexify/executor/function_executor_controller/__init__.py +2 -2
  4. indexify/executor/function_executor_controller/completed_task_allocation_metrics.py +87 -0
  5. indexify/executor/function_executor_controller/events.py +29 -33
  6. indexify/executor/function_executor_controller/{finalize_task.py → finalize_task_allocation.py} +45 -37
  7. indexify/executor/function_executor_controller/function_executor_controller.py +194 -180
  8. indexify/executor/function_executor_controller/loggers.py +15 -17
  9. indexify/executor/function_executor_controller/message_validators.py +4 -12
  10. indexify/executor/function_executor_controller/metrics/completed_task_allocation_metrics.py +70 -0
  11. indexify/executor/function_executor_controller/metrics/finalize_task_allocation.py +26 -0
  12. indexify/executor/function_executor_controller/metrics/function_executor_controller.py +12 -11
  13. indexify/executor/function_executor_controller/metrics/prepare_task_allocation.py +27 -0
  14. indexify/executor/function_executor_controller/{prepare_task.py → prepare_task_allocation.py} +33 -29
  15. indexify/executor/function_executor_controller/{run_task.py → run_task_allocation.py} +54 -51
  16. indexify/executor/function_executor_controller/{task_info.py → task_allocation_info.py} +6 -6
  17. indexify/executor/function_executor_controller/{task_input.py → task_allocation_input.py} +2 -2
  18. indexify/executor/function_executor_controller/{task_output.py → task_allocation_output.py} +24 -24
  19. indexify/executor/state_reconciler.py +23 -19
  20. {indexify-0.4.29.dist-info → indexify-0.4.31.dist-info}/METADATA +2 -2
  21. {indexify-0.4.29.dist-info → indexify-0.4.31.dist-info}/RECORD +24 -24
  22. indexify/executor/function_executor_controller/completed_task_metrics.py +0 -83
  23. indexify/executor/function_executor_controller/metrics/completed_task_metrics.py +0 -68
  24. indexify/executor/function_executor_controller/metrics/finalize_task.py +0 -20
  25. indexify/executor/function_executor_controller/metrics/prepare_task.py +0 -18
  26. /indexify/executor/function_executor_controller/metrics/{run_task.py → run_task_allocation.py} +0 -0
  27. {indexify-0.4.29.dist-info → indexify-0.4.31.dist-info}/WHEEL +0 -0
  28. {indexify-0.4.29.dist-info → indexify-0.4.31.dist-info}/entry_points.txt +0 -0
@@ -4,8 +4,7 @@ from typing import Any, Generator, Tuple
4
4
  import click
5
5
  import docker
6
6
  import docker.api.build
7
- import docker.models
8
- import docker.models.images
7
+ from docker.errors import BuildError
9
8
  from tensorlake.functions_sdk.image import Image
10
9
  from tensorlake.functions_sdk.workflow_module import (
11
10
  WorkflowModuleInfo,
@@ -16,7 +15,6 @@ from tensorlake.functions_sdk.workflow_module import (
16
15
  @click.command(
17
16
  short_help="Build images for graphs/workflows defined in the workflow file"
18
17
  )
19
- # Path to the file where the graphs/workflows are defined as global variables
20
18
  @click.argument(
21
19
  "workflow-file-path",
22
20
  type=click.Path(exists=True, file_okay=True, dir_okay=False),
@@ -31,6 +29,11 @@ def build_image(
31
29
  workflow_file_path: str,
32
30
  image_names: tuple[str, ...] = None,
33
31
  ):
32
+ """
33
+ Build the images associated to an Indexify workflow
34
+
35
+ A workflow is defined in a Python file, and the images are built using the local Docker daemon.
36
+ """
34
37
  try:
35
38
  workflow_module_info: WorkflowModuleInfo = load_workflow_module_info(
36
39
  workflow_file_path
@@ -48,7 +51,7 @@ def build_image(
48
51
  indexify_version: str = importlib.metadata.version("indexify")
49
52
  for image in workflow_module_info.images.keys():
50
53
  image: Image
51
- if image_names is not None and image.image_name not in image_names:
54
+ if len(image_names) > 0 and image.image_name not in image_names:
52
55
  click.echo(
53
56
  f"Skipping image `{image.image_name}` as it is not in the provided image names."
54
57
  )
@@ -57,28 +60,65 @@ def build_image(
57
60
  click.echo(f"Building image `{image.image_name}`")
58
61
 
59
62
  image.run(f"pip install 'indexify=={indexify_version}'")
60
- built_image, logs_generator = image.build()
61
- built_image: docker.models.images.Image
62
- for output in logs_generator:
63
- click.secho(output)
63
+ built_image, logs_generator = _build(image=image, docker_client=docker_client)
64
+ try:
65
+ built_image, logs_generator = _build(
66
+ image=image, docker_client=docker_client
67
+ )
68
+ _print_build_log(logs_generator)
69
+ click.secho(f"built image: {built_image.tags[0]}", fg="green")
70
+ except BuildError as e:
71
+ raise click.Abort() from e
64
72
 
65
73
  click.secho(f"built image: {built_image.tags[0]}", fg="green")
66
74
 
67
75
 
68
- def build(
76
+ def _build(
69
77
  image: Image, docker_client: docker.DockerClient
70
78
  ) -> Tuple[docker.models.images.Image, Generator[str, Any, None]]:
71
79
  docker_file = image.dockerfile()
72
- image_name = f"{image.image_name}:{image.image_tag}"
80
+ image_name = (
81
+ image.image_name
82
+ if ":" in image.image_name
83
+ else f"{image.image_name}:{image.image_tag}"
84
+ )
73
85
 
74
86
  docker.api.build.process_dockerfile = lambda dockerfile, path: (
75
87
  "Dockerfile",
76
88
  dockerfile,
77
89
  )
78
90
 
79
- return docker_client.images.build(
80
- path=".",
81
- dockerfile=docker_file,
82
- tag=image_name,
83
- rm=True,
84
- )
91
+ try:
92
+ built_image, logs_generator = docker_client.images.build(
93
+ path=".",
94
+ dockerfile=docker_file,
95
+ tag=image_name,
96
+ rm=True,
97
+ # pull=True, # optional: ensures fresh base images
98
+ # forcerm=True, # optional: always remove intermediate containers
99
+ )
100
+ return built_image, logs_generator
101
+ except BuildError as e:
102
+ click.secho("Docker build failed:", fg="red")
103
+ _print_build_log(e.build_log or [])
104
+ click.secho(str(e), fg="red")
105
+ raise
106
+
107
+
108
+ def _print_build_log(build_logs):
109
+ for log_entry in build_logs:
110
+ if isinstance(log_entry, dict):
111
+ if "stream" in log_entry:
112
+ click.echo(log_entry["stream"].rstrip("\n"))
113
+ elif "status" in log_entry:
114
+ if "id" in log_entry:
115
+ click.echo(f"{log_entry['status']}: {log_entry['id']}")
116
+ else:
117
+ click.echo(log_entry["status"])
118
+ if "errorDetail" in log_entry:
119
+ # This is the most useful bit when a RUN command fails
120
+ msg = log_entry["errorDetail"].get("message") or log_entry.get("error")
121
+ if msg:
122
+ click.secho(msg.rstrip("\n"), fg="red")
123
+ elif isinstance(log_entry, str):
124
+ click.echo(log_entry.rstrip("\n"))
indexify/cli/deploy.py CHANGED
@@ -18,7 +18,7 @@ from tensorlake.functions_sdk.workflow_module import (
18
18
  )
19
19
  @click.option(
20
20
  "-u",
21
- "--upgrade-queued-requests",
21
+ "--upgrade-queued-invocations",
22
22
  is_flag=True,
23
23
  default=False,
24
24
  help="Upgrade invocations that are already queued or running to use the deployed version of the graphs/workflows",
@@ -4,7 +4,7 @@ from .message_validators import (
4
4
  validate_function_executor_description,
5
5
  validate_task_allocation,
6
6
  )
7
- from .task_output import TaskOutput
7
+ from .task_allocation_output import TaskAllocationOutput
8
8
 
9
9
  __all__ = [
10
10
  "function_executor_logger",
@@ -12,5 +12,5 @@ __all__ = [
12
12
  "validate_function_executor_description",
13
13
  "validate_task_allocation",
14
14
  "FunctionExecutorController",
15
- "TaskOutput",
15
+ "TaskAllocationOutput",
16
16
  ]
@@ -0,0 +1,87 @@
1
+ import time
2
+ from typing import Any
3
+
4
+ from indexify.proto.executor_api_pb2 import (
5
+ TaskFailureReason,
6
+ TaskOutcomeCode,
7
+ )
8
+
9
+ from .metrics.completed_task_allocation_metrics import (
10
+ METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_ALL,
11
+ METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_FUNCTION_ERROR,
12
+ METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_FUNCTION_EXECUTOR_TERMINATED,
13
+ METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_INTERNAL_ERROR,
14
+ METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_NONE,
15
+ METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_TASK_CANCELLED,
16
+ METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_UNKNOWN,
17
+ METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_ALL,
18
+ METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_FAILURE,
19
+ METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_SUCCESS,
20
+ metric_task_allocation_completion_latency,
21
+ metric_task_allocations_completed,
22
+ )
23
+ from .task_allocation_info import TaskAllocationInfo
24
+
25
+
26
+ def emit_completed_task_allocation_metrics(
27
+ alloc_info: TaskAllocationInfo, logger: Any
28
+ ) -> None:
29
+ """Emits Prometheus metrics for a completed task allocation.
30
+
31
+ Doesn't raise any exceptions.
32
+ """
33
+ logger = logger.bind(module=__name__)
34
+ metric_task_allocation_completion_latency.observe(
35
+ time.monotonic() - alloc_info.start_time
36
+ )
37
+
38
+ task_outcome_code: TaskOutcomeCode = alloc_info.output.outcome_code
39
+ task_failure_reason: TaskFailureReason = alloc_info.output.failure_reason
40
+ metric_task_allocations_completed.labels(
41
+ outcome_code=METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_ALL,
42
+ failure_reason=METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_ALL,
43
+ ).inc()
44
+ if task_outcome_code == TaskOutcomeCode.TASK_OUTCOME_CODE_SUCCESS:
45
+ metric_task_allocations_completed.labels(
46
+ outcome_code=METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_SUCCESS,
47
+ failure_reason=METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_NONE,
48
+ ).inc()
49
+ elif task_outcome_code == TaskOutcomeCode.TASK_OUTCOME_CODE_FAILURE:
50
+ if task_failure_reason == TaskFailureReason.TASK_FAILURE_REASON_INTERNAL_ERROR:
51
+ metric_task_allocations_completed.labels(
52
+ outcome_code=METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_FAILURE,
53
+ failure_reason=METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_INTERNAL_ERROR,
54
+ ).inc()
55
+ elif (
56
+ task_failure_reason
57
+ == TaskFailureReason.TASK_FAILURE_REASON_FUNCTION_EXECUTOR_TERMINATED
58
+ ):
59
+ metric_task_allocations_completed.labels(
60
+ outcome_code=METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_FAILURE,
61
+ failure_reason=METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_FUNCTION_EXECUTOR_TERMINATED,
62
+ ).inc()
63
+ elif (
64
+ task_failure_reason == TaskFailureReason.TASK_FAILURE_REASON_TASK_CANCELLED
65
+ ):
66
+ metric_task_allocations_completed.labels(
67
+ outcome_code=METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_FAILURE,
68
+ failure_reason=METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_TASK_CANCELLED,
69
+ ).inc()
70
+ elif task_failure_reason in [
71
+ TaskFailureReason.TASK_FAILURE_REASON_FUNCTION_ERROR,
72
+ TaskFailureReason.TASK_FAILURE_REASON_FUNCTION_TIMEOUT,
73
+ TaskFailureReason.TASK_FAILURE_REASON_INVOCATION_ERROR,
74
+ ]:
75
+ metric_task_allocations_completed.labels(
76
+ outcome_code=METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_FAILURE,
77
+ failure_reason=METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_FUNCTION_ERROR,
78
+ ).inc()
79
+ else:
80
+ metric_task_allocations_completed.labels(
81
+ outcome_code=METRIC_TASK_ALLOCATIONS_COMPLETED_OUTCOME_CODE_FAILURE,
82
+ failure_reason=METRIC_TASK_ALLOCATIONS_COMPLETED_FAILURE_REASON_UNKNOWN,
83
+ ).inc()
84
+ logger.warning(
85
+ "unexpected task allocation failure reason",
86
+ failure_reason=TaskFailureReason.Name(task_failure_reason),
87
+ )
@@ -1,26 +1,22 @@
1
1
  from enum import Enum
2
2
  from typing import List, Optional
3
3
 
4
- from tensorlake.function_executor.proto.function_executor_pb2 import (
5
- FunctionInputs,
6
- )
7
-
8
4
  from indexify.executor.function_executor.function_executor import (
9
5
  FunctionExecutor,
10
6
  )
11
7
  from indexify.proto.executor_api_pb2 import FunctionExecutorTerminationReason
12
8
 
13
- from .task_info import TaskInfo
9
+ from .task_allocation_info import TaskAllocationInfo
14
10
 
15
11
 
16
12
  class EventType(Enum):
17
13
  FUNCTION_EXECUTOR_CREATED = 1
18
14
  FUNCTION_EXECUTOR_TERMINATED = 2
19
15
  SHUTDOWN_INITIATED = 3
20
- TASK_PREPARATION_FINISHED = 4
21
- SCHEDULE_TASK_EXECUTION = 5
22
- TASK_EXECUTION_FINISHED = 6
23
- TASK_OUTPUT_UPLOAD_FINISHED = 7
16
+ TASK_ALLOCATION_PREPARATION_FINISHED = 4
17
+ SCHEDULE_TASK_ALLOCATION_EXECUTION = 5
18
+ TASK_ALLOCATION_EXECUTION_FINISHED = 6
19
+ TASK_ALLOCATION_FINALIZATION_FINISHED = 7
24
20
 
25
21
 
26
22
  class BaseEvent:
@@ -94,50 +90,50 @@ class ShutdownInitiated(BaseEvent):
94
90
  super().__init__(EventType.SHUTDOWN_INITIATED)
95
91
 
96
92
 
97
- class TaskPreparationFinished(BaseEvent):
93
+ class TaskAllocationPreparationFinished(BaseEvent):
98
94
  """
99
- Event indicating that a task has been prepared for execution or failed to do that.
95
+ Event indicating that a task allocation has been prepared for execution or failed to do that.
100
96
  """
101
97
 
102
98
  def __init__(
103
99
  self,
104
- task_info: TaskInfo,
100
+ alloc_info: TaskAllocationInfo,
105
101
  is_success: bool,
106
102
  ):
107
- super().__init__(EventType.TASK_PREPARATION_FINISHED)
108
- self.task_info: TaskInfo = task_info
103
+ super().__init__(EventType.TASK_ALLOCATION_PREPARATION_FINISHED)
104
+ self.alloc_info: TaskAllocationInfo = alloc_info
109
105
  self.is_success: bool = is_success
110
106
 
111
107
  def __str__(self) -> str:
112
108
  return (
113
109
  f"Event(type={self.event_type.name}, "
114
- f"task_id={self.task_info.allocation.task.id}, "
115
- f"allocation_id={self.task_info.allocation.allocation_id}), "
110
+ f"task_id={self.alloc_info.allocation.task.id}, "
111
+ f"allocation_id={self.alloc_info.allocation.allocation_id}), "
116
112
  f"is_success={self.is_success}"
117
113
  )
118
114
 
119
115
 
120
- class ScheduleTaskExecution(BaseEvent):
116
+ class ScheduleTaskAllocationExecution(BaseEvent):
121
117
  """
122
- Event indicating that a task execution has been scheduled.
118
+ Event indicating that a task allocation has been scheduled.
123
119
  """
124
120
 
125
121
  def __init__(self):
126
- super().__init__(EventType.SCHEDULE_TASK_EXECUTION)
122
+ super().__init__(EventType.SCHEDULE_TASK_ALLOCATION_EXECUTION)
127
123
 
128
124
 
129
- class TaskExecutionFinished(BaseEvent):
125
+ class TaskAllocationExecutionFinished(BaseEvent):
130
126
  """
131
- Event indicating that a task execution has been finished on Function Executor.
127
+ Event indicating that a task allocation execution has been finished on Function Executor.
132
128
  """
133
129
 
134
130
  def __init__(
135
131
  self,
136
- task_info: TaskInfo,
132
+ alloc_info: TaskAllocationInfo,
137
133
  function_executor_termination_reason: FunctionExecutorTerminationReason, # type: Optional[FunctionExecutorTerminationReason]
138
134
  ):
139
- super().__init__(EventType.TASK_EXECUTION_FINISHED)
140
- self.task_info: TaskInfo = task_info
135
+ super().__init__(EventType.TASK_ALLOCATION_EXECUTION_FINISHED)
136
+ self.alloc_info: TaskAllocationInfo = alloc_info
141
137
  # Not None if the FE needs to get destroyed after running the task.
142
138
  self.function_executor_termination_reason = function_executor_termination_reason
143
139
 
@@ -151,26 +147,26 @@ class TaskExecutionFinished(BaseEvent):
151
147
  )
152
148
  return (
153
149
  f"Event(type={self.event_type.name}, "
154
- f"task_id={self.task_info.allocation.task.id}, "
155
- f"allocation_id={self.task_info.allocation.allocation_id}), "
150
+ f"task_id={self.alloc_info.allocation.task.id}, "
151
+ f"allocation_id={self.alloc_info.allocation.allocation_id}), "
156
152
  f"function_executor_termination_reason={function_executor_termination_reason_str}"
157
153
  )
158
154
 
159
155
 
160
- class TaskFinalizationFinished(BaseEvent):
156
+ class TaskAllocationFinalizationFinished(BaseEvent):
161
157
  """
162
- Event indicating that a task finalization is finished.
158
+ Event indicating that a task allocation finalization is finished.
163
159
  """
164
160
 
165
- def __init__(self, task_info: TaskInfo, is_success: bool):
166
- super().__init__(EventType.TASK_OUTPUT_UPLOAD_FINISHED)
167
- self.task_info: TaskInfo = task_info
161
+ def __init__(self, alloc_info: TaskAllocationInfo, is_success: bool):
162
+ super().__init__(EventType.TASK_ALLOCATION_FINALIZATION_FINISHED)
163
+ self.alloc_info: TaskAllocationInfo = alloc_info
168
164
  self.is_success: bool = is_success
169
165
 
170
166
  def __str__(self) -> str:
171
167
  return (
172
168
  f"Event(type={self.event_type.name}, "
173
- f"task_id={self.task_info.allocation.task.id}, "
174
- f"allocation_id={self.task_info.allocation.allocation_id}), "
169
+ f"task_id={self.alloc_info.allocation.task.id}, "
170
+ f"allocation_id={self.alloc_info.allocation.allocation_id}), "
175
171
  f"is_success={self.is_success}"
176
172
  )
@@ -8,21 +8,21 @@ from indexify.proto.executor_api_pb2 import (
8
8
  TaskOutcomeCode,
9
9
  )
10
10
 
11
- from .events import TaskFinalizationFinished
12
- from .metrics.finalize_task import (
13
- metric_task_finalization_errors,
14
- metric_task_finalization_latency,
15
- metric_task_finalizations,
16
- metric_tasks_finalizing,
11
+ from .events import TaskAllocationFinalizationFinished
12
+ from .metrics.finalize_task_allocation import (
13
+ metric_task_allocation_finalization_errors,
14
+ metric_task_allocation_finalization_latency,
15
+ metric_task_allocation_finalizations,
16
+ metric_task_allocations_finalizing,
17
17
  )
18
- from .task_info import TaskInfo
19
- from .task_input import TaskInput
20
- from .task_output import TaskOutput
18
+ from .task_allocation_info import TaskAllocationInfo
19
+ from .task_allocation_input import TaskAllocationInput
20
+ from .task_allocation_output import TaskAllocationOutput
21
21
 
22
22
 
23
- async def finalize_task(
24
- task_info: TaskInfo, blob_store: BLOBStore, logger: Any
25
- ) -> TaskFinalizationFinished:
23
+ async def finalize_task_allocation(
24
+ task_alloc: TaskAllocationInfo, blob_store: BLOBStore, logger: Any
25
+ ) -> TaskAllocationFinalizationFinished:
26
26
  """Prepares the task output for getting it reported to Server.
27
27
 
28
28
  The task output is either coming from a failed task or from its finished execution on the Function Executor.
@@ -32,34 +32,40 @@ async def finalize_task(
32
32
  start_time = time.monotonic()
33
33
 
34
34
  with (
35
- metric_tasks_finalizing.track_inprogress(),
36
- metric_task_finalization_latency.time(),
37
- metric_task_finalization_errors.count_exceptions(),
35
+ metric_task_allocations_finalizing.track_inprogress(),
36
+ metric_task_allocation_finalization_latency.time(),
37
+ metric_task_allocation_finalization_errors.count_exceptions(),
38
38
  ):
39
- metric_task_finalizations.inc()
39
+ metric_task_allocation_finalizations.inc()
40
40
  try:
41
- await _finalize_task_output(
42
- task_info=task_info,
41
+ await _finalize_task_alloc_output(
42
+ alloc_info=task_alloc,
43
43
  blob_store=blob_store,
44
44
  logger=logger,
45
45
  )
46
46
  logger.info(
47
- "task finalized",
47
+ "task allocation finalized",
48
48
  duration=time.monotonic() - start_time,
49
49
  )
50
- return TaskFinalizationFinished(task_info=task_info, is_success=True)
50
+ return TaskAllocationFinalizationFinished(
51
+ alloc_info=task_alloc, is_success=True
52
+ )
51
53
  except asyncio.CancelledError:
52
- return TaskFinalizationFinished(task_info=task_info, is_success=False)
54
+ return TaskAllocationFinalizationFinished(
55
+ alloc_info=task_alloc, is_success=False
56
+ )
53
57
  except BaseException as e:
54
58
  logger.error(
55
- "failed to finalize task",
59
+ "failed to finalize task allocation",
56
60
  exc_info=e,
57
61
  duration=time.monotonic() - start_time,
58
62
  )
59
- return TaskFinalizationFinished(task_info=task_info, is_success=False)
63
+ return TaskAllocationFinalizationFinished(
64
+ alloc_info=task_alloc, is_success=False
65
+ )
60
66
 
61
67
 
62
- class _TaskOutputSummary:
68
+ class _TaskAllocationOutputSummary:
63
69
  def __init__(self):
64
70
  self.output_count: int = 0
65
71
  self.output_bytes: int = 0
@@ -68,27 +74,27 @@ class _TaskOutputSummary:
68
74
  self.next_functions_count: int = 0
69
75
 
70
76
 
71
- async def _finalize_task_output(
72
- task_info: TaskInfo, blob_store: BLOBStore, logger: Any
77
+ async def _finalize_task_alloc_output(
78
+ alloc_info: TaskAllocationInfo, blob_store: BLOBStore, logger: Any
73
79
  ) -> None:
74
80
  """Finalizes the task output.
75
81
 
76
82
  Raises exception on error."""
77
- if task_info.input is None:
83
+ if alloc_info.input is None:
78
84
  raise Exception(
79
- "task input is None, this should never happen",
85
+ "task allocation input is None, this should never happen",
80
86
  )
81
- if task_info.output is None:
87
+ if alloc_info.output is None:
82
88
  raise Exception(
83
- "task output is None, this should never happen",
89
+ "task allocation output is None, this should never happen",
84
90
  )
85
91
 
86
- input: TaskInput = task_info.input
87
- output: TaskOutput = task_info.output
92
+ input: TaskAllocationInput = alloc_info.input
93
+ output: TaskAllocationOutput = alloc_info.output
88
94
 
89
- output_summary: _TaskOutputSummary = _task_output_summary(output)
95
+ output_summary: _TaskAllocationOutputSummary = _task_output_summary(output)
90
96
  logger.info(
91
- "task output summary",
97
+ "task allocation output summary",
92
98
  output_count=output_summary.output_count,
93
99
  output_bytes=output_summary.output_bytes,
94
100
  invocation_error_output_count=output_summary.invocation_error_output_count,
@@ -152,8 +158,10 @@ async def _finalize_task_output(
152
158
  )
153
159
 
154
160
 
155
- def _task_output_summary(task_output: TaskOutput) -> _TaskOutputSummary:
156
- summary: _TaskOutputSummary = _TaskOutputSummary()
161
+ def _task_output_summary(
162
+ task_output: TaskAllocationOutput,
163
+ ) -> _TaskAllocationOutputSummary:
164
+ summary: _TaskAllocationOutputSummary = _TaskAllocationOutputSummary()
157
165
 
158
166
  for output in task_output.function_outputs:
159
167
  summary.output_count += 1
@@ -172,7 +180,7 @@ def _task_output_summary(task_output: TaskOutput) -> _TaskOutputSummary:
172
180
 
173
181
  # Temporary workaround is logging customer metrics until we store them somewhere
174
182
  # for future retrieval and processing.
175
- def _log_function_metrics(output: TaskOutput, logger: Any):
183
+ def _log_function_metrics(output: TaskAllocationOutput, logger: Any):
176
184
  if output.metrics is None:
177
185
  return
178
186