clarifai 11.5.1__py3-none-any.whl → 11.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. clarifai/__init__.py +1 -1
  2. clarifai/cli/model.py +42 -1
  3. clarifai/cli/pipeline.py +137 -0
  4. clarifai/cli/pipeline_step.py +104 -0
  5. clarifai/cli/templates/__init__.py +1 -0
  6. clarifai/cli/templates/pipeline_step_templates.py +64 -0
  7. clarifai/cli/templates/pipeline_templates.py +150 -0
  8. clarifai/client/auth/helper.py +46 -21
  9. clarifai/client/auth/register.py +5 -0
  10. clarifai/client/auth/stub.py +116 -12
  11. clarifai/client/base.py +9 -0
  12. clarifai/client/model.py +111 -7
  13. clarifai/client/model_client.py +355 -6
  14. clarifai/client/user.py +81 -0
  15. clarifai/runners/models/model_builder.py +52 -9
  16. clarifai/runners/pipeline_steps/__init__.py +0 -0
  17. clarifai/runners/pipeline_steps/pipeline_step_builder.py +510 -0
  18. clarifai/runners/pipelines/__init__.py +0 -0
  19. clarifai/runners/pipelines/pipeline_builder.py +313 -0
  20. clarifai/runners/utils/code_script.py +40 -7
  21. clarifai/runners/utils/const.py +2 -2
  22. clarifai/runners/utils/model_utils.py +135 -0
  23. clarifai/runners/utils/pipeline_validation.py +153 -0
  24. {clarifai-11.5.1.dist-info → clarifai-11.5.3.dist-info}/METADATA +1 -1
  25. {clarifai-11.5.1.dist-info → clarifai-11.5.3.dist-info}/RECORD +30 -19
  26. /clarifai/cli/{model_templates.py → templates/model_templates.py} +0 -0
  27. {clarifai-11.5.1.dist-info → clarifai-11.5.3.dist-info}/WHEEL +0 -0
  28. {clarifai-11.5.1.dist-info → clarifai-11.5.3.dist-info}/entry_points.txt +0 -0
  29. {clarifai-11.5.1.dist-info → clarifai-11.5.3.dist-info}/licenses/LICENSE +0 -0
  30. {clarifai-11.5.1.dist-info → clarifai-11.5.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,313 @@
1
+ import json
2
+ import os
3
+ import sys
4
+ from typing import Any, Dict
5
+
6
+ import yaml
7
+ from clarifai_grpc.grpc.api import resources_pb2, service_pb2
8
+ from clarifai_grpc.grpc.api.status import status_code_pb2
9
+
10
+ from clarifai.client.base import BaseClient
11
+ from clarifai.runners.utils.pipeline_validation import PipelineConfigValidator
12
+ from clarifai.utils.logging import logger
13
+
14
+
15
+ class LiteralBlockDumper(yaml.SafeDumper):
16
+ """Custom YAML dumper that uses literal block style for multi-line strings."""
17
+
18
+ def represent_str(self, data):
19
+ if '\n' in data:
20
+ return self.represent_scalar('tag:yaml.org,2002:str', data, style='|')
21
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
22
+
23
+
24
+ LiteralBlockDumper.add_representer(str, LiteralBlockDumper.represent_str)
25
+
26
+
27
+ class PipelineBuilder:
28
+ """Pipeline Builder class for managing pipeline upload to Clarifai."""
29
+
30
+ def __init__(self, config_path: str):
31
+ """
32
+ Initialize PipelineBuilder.
33
+
34
+ :param config_path: Path to the pipeline configuration file
35
+ """
36
+ self._client = None
37
+ self.config_path = os.path.abspath(config_path)
38
+ self.config_dir = os.path.dirname(self.config_path)
39
+ self.config = self._load_config()
40
+ self.validator = PipelineConfigValidator()
41
+ self.validator.validate_config(self.config)
42
+
43
+ # Extract pipeline info
44
+ pipeline_config = self.config["pipeline"]
45
+ self.pipeline_id = pipeline_config["id"]
46
+ self.user_id = pipeline_config["user_id"]
47
+ self.app_id = pipeline_config["app_id"]
48
+
49
+ # Track uploaded pipeline step versions
50
+ self.uploaded_step_versions = {}
51
+
52
+ @property
53
+ def client(self):
54
+ """Get or create the Clarifai client."""
55
+ if self._client is None:
56
+ self._client = BaseClient(user_id=self.user_id, app_id=self.app_id)
57
+ return self._client
58
+
59
+ def _load_config(self) -> Dict[str, Any]:
60
+ """Load and return the configuration from config file."""
61
+ try:
62
+ with open(self.config_path, 'r') as file:
63
+ config = yaml.safe_load(file)
64
+ return config
65
+ except Exception as e:
66
+ raise ValueError(f"Error loading config file {self.config_path}: {e}")
67
+
68
+ def _save_config(self) -> None:
69
+ """Save the updated configuration back to the file."""
70
+ try:
71
+ with open(self.config_path, 'w', encoding="utf-8") as file:
72
+ yaml.dump(
73
+ self.config,
74
+ file,
75
+ Dumper=LiteralBlockDumper,
76
+ default_flow_style=False,
77
+ sort_keys=False,
78
+ )
79
+ except Exception as e:
80
+ raise ValueError(f"Error saving config file {self.config_path}: {e}")
81
+
82
+ def upload_pipeline_steps(self) -> bool:
83
+ """Upload all pipeline steps listed in step_directories."""
84
+ pipeline_config = self.config["pipeline"]
85
+ step_directories = pipeline_config.get("step_directories", [])
86
+
87
+ if not step_directories:
88
+ logger.info("No pipeline steps to upload (step_directories is empty)")
89
+ return True
90
+
91
+ logger.info(f"Uploading {len(step_directories)} pipeline steps...")
92
+
93
+ for step_dir in step_directories:
94
+ step_path = os.path.join(self.config_dir, step_dir)
95
+
96
+ if not os.path.exists(step_path):
97
+ logger.error(f"Pipeline step directory not found: {step_path}")
98
+ return False
99
+
100
+ logger.info(f"Uploading pipeline step from directory: {step_dir}")
101
+
102
+ # Create a backup of the original upload function to capture the version
103
+ # We'll need to modify the upload process to capture the version ID
104
+ success, version_id = self._upload_pipeline_step_with_version_capture(step_path)
105
+
106
+ if not success:
107
+ logger.error(f"Failed to upload pipeline step from directory: {step_dir}")
108
+ return False
109
+
110
+ # Store the version ID for later use
111
+ self.uploaded_step_versions[step_dir] = version_id
112
+ logger.info(
113
+ f"Successfully uploaded pipeline step {step_dir} with version {version_id}"
114
+ )
115
+
116
+ return True
117
+
118
+ def _upload_pipeline_step_with_version_capture(self, step_path: str) -> tuple[bool, str]:
119
+ """Upload a pipeline step and capture its version ID."""
120
+ try:
121
+ # Use the existing pipeline step builder
122
+ from clarifai.runners.pipeline_steps.pipeline_step_builder import PipelineStepBuilder
123
+
124
+ builder = PipelineStepBuilder(step_path)
125
+
126
+ # Create dockerfile if needed
127
+ builder.create_dockerfile()
128
+
129
+ # Check if step exists
130
+ exists = builder.check_pipeline_step_exists()
131
+ if exists:
132
+ logger.info(
133
+ f"Pipeline step {builder.pipeline_step_id} already exists, creating new version"
134
+ )
135
+ else:
136
+ logger.info(f"Creating new pipeline step {builder.pipeline_step_id}")
137
+
138
+ # Upload the pipeline step version directly without the interactive prompt
139
+ success = builder.upload_pipeline_step_version()
140
+
141
+ if success and builder.pipeline_step_version_id:
142
+ return True, builder.pipeline_step_version_id
143
+ else:
144
+ logger.error("Failed to get pipeline step version ID after upload")
145
+ return False, ""
146
+
147
+ except Exception as e:
148
+ logger.error(f"Error uploading pipeline step: {e}")
149
+ return False, ""
150
+
151
+ def update_config_with_versions(self) -> None:
152
+ """Update the config.yaml with uploaded pipeline step versions."""
153
+ if not self.uploaded_step_versions:
154
+ logger.info("No pipeline step versions to update in config")
155
+ return
156
+
157
+ logger.info("Updating config.yaml with pipeline step versions...")
158
+
159
+ # Update the orchestration spec
160
+ pipeline_config = self.config["pipeline"]
161
+ orchestration_spec = pipeline_config["orchestration_spec"]
162
+ argo_spec_str = orchestration_spec["argo_orchestration_spec"]
163
+ argo_spec = yaml.safe_load(argo_spec_str)
164
+
165
+ # Update templateRef names to include versions
166
+ self._update_template_refs_with_versions(argo_spec)
167
+
168
+ # Update the config
169
+ orchestration_spec["argo_orchestration_spec"] = yaml.dump(
170
+ argo_spec, Dumper=LiteralBlockDumper, default_flow_style=False
171
+ )
172
+
173
+ # Remove uploaded directories from step_directories
174
+ remaining_dirs = []
175
+ for step_dir in pipeline_config.get("step_directories", []):
176
+ if step_dir not in self.uploaded_step_versions:
177
+ remaining_dirs.append(step_dir)
178
+
179
+ pipeline_config["step_directories"] = remaining_dirs
180
+
181
+ # Save the updated config
182
+ self._save_config()
183
+ logger.info("Updated config.yaml with pipeline step versions")
184
+
185
+ def _update_template_refs_with_versions(self, argo_spec: Dict[str, Any]) -> None:
186
+ """Update templateRef names in Argo spec to include version information."""
187
+ for template in argo_spec["spec"]["templates"]:
188
+ if "steps" in template:
189
+ for step_group in template["steps"]:
190
+ for step in step_group:
191
+ if "templateRef" in step:
192
+ template_ref = step["templateRef"]
193
+ name = template_ref["name"]
194
+
195
+ # Check if this is a templateRef without version that we uploaded
196
+ if self.validator.TEMPLATE_REF_WITHOUT_VERSION_PATTERN.match(name):
197
+ # Extract step name
198
+ parts = name.split('/')
199
+ step_name = parts[-1]
200
+
201
+ # Find the corresponding directory and version
202
+ for step_dir, version_id in self.uploaded_step_versions.items():
203
+ # The step name should match the directory name or be derivable from it
204
+ if step_name == step_dir:
205
+ # Update the templateRef to include version
206
+ new_name = f"{name}/versions/{version_id}"
207
+ template_ref["name"] = new_name
208
+ template_ref["template"] = new_name
209
+ logger.info(
210
+ f"Updated templateRef from {name} to {new_name}"
211
+ )
212
+ break
213
+
214
+ def create_pipeline(self) -> bool:
215
+ """Create the pipeline using PostPipelines RPC."""
216
+ logger.info(f"Creating pipeline {self.pipeline_id}...")
217
+
218
+ try:
219
+ # Create pipeline proto
220
+ pipeline = resources_pb2.Pipeline(
221
+ id=self.pipeline_id, user_id=self.user_id, app_id=self.app_id
222
+ )
223
+
224
+ # Add orchestration spec
225
+ pipeline_config = self.config["pipeline"]
226
+ orchestration_spec = pipeline_config["orchestration_spec"]
227
+ argo_spec_str = orchestration_spec["argo_orchestration_spec"]
228
+
229
+ # Parse the Argo spec to get API version
230
+ argo_spec = yaml.safe_load(argo_spec_str)
231
+ api_version = argo_spec.get("apiVersion", "argoproj.io/v1alpha1")
232
+
233
+ # Create pipeline version with orchestration spec
234
+ pipeline_version = resources_pb2.PipelineVersion()
235
+ # Create orchestration spec proto
236
+ orchestration_spec_proto = resources_pb2.OrchestrationSpec()
237
+ # Create Argo orchestration spec proto
238
+ argo_orchestration_spec_proto = resources_pb2.ArgoOrchestrationSpec()
239
+ argo_orchestration_spec_proto.api_version = api_version
240
+ argo_orchestration_spec_proto.spec_json = json.dumps(argo_spec)
241
+
242
+ orchestration_spec_proto.argo_orchestration_spec.CopyFrom(
243
+ argo_orchestration_spec_proto
244
+ )
245
+ pipeline_version.orchestration_spec.CopyFrom(orchestration_spec_proto)
246
+
247
+ pipeline.pipeline_version.CopyFrom(pipeline_version)
248
+
249
+ # Make the RPC call
250
+ response = self.client.STUB.PostPipelines(
251
+ service_pb2.PostPipelinesRequest(
252
+ user_app_id=self.client.user_app_id, pipelines=[pipeline]
253
+ )
254
+ )
255
+
256
+ if response.status.code == status_code_pb2.SUCCESS:
257
+ logger.info(f"Successfully created pipeline {self.pipeline_id}")
258
+
259
+ # Log pipeline and version IDs if available in response
260
+ if response.pipelines:
261
+ created_pipeline = response.pipelines[0]
262
+ logger.info(f"Pipeline ID: {created_pipeline.id}")
263
+ if created_pipeline.pipeline_version and created_pipeline.pipeline_version.id:
264
+ logger.info(f"Pipeline version ID: {created_pipeline.pipeline_version.id}")
265
+
266
+ return True
267
+ else:
268
+ logger.error(f"Failed to create pipeline: {response.status.description}")
269
+ logger.error(f"Details: {response.status.details}")
270
+ return False
271
+
272
+ except Exception as e:
273
+ logger.error(f"Error creating pipeline: {e}")
274
+ return False
275
+
276
+
277
+ def upload_pipeline(path: str):
278
+ """
279
+ Upload a pipeline with associated pipeline steps to Clarifai.
280
+
281
+ :param path: Path to the pipeline configuration file or directory containing config.yaml
282
+ """
283
+ try:
284
+ # Determine if path is a directory or file
285
+ if os.path.isdir(path):
286
+ config_path = os.path.join(path, "config.yaml")
287
+ if not os.path.exists(config_path):
288
+ raise FileNotFoundError(f"config.yaml not found in directory: {path}")
289
+ else:
290
+ config_path = path
291
+
292
+ builder = PipelineBuilder(config_path)
293
+
294
+ logger.info(f"Starting pipeline upload from config: {config_path}")
295
+
296
+ # Step 1: Upload pipeline steps
297
+ if not builder.upload_pipeline_steps():
298
+ logger.error("Failed to upload pipeline steps")
299
+ sys.exit(1)
300
+
301
+ # Step 2: Update config with version information
302
+ builder.update_config_with_versions()
303
+
304
+ # Step 3: Create the pipeline
305
+ if not builder.create_pipeline():
306
+ logger.error("Failed to create pipeline")
307
+ sys.exit(1)
308
+
309
+ logger.info("Pipeline upload completed successfully!")
310
+
311
+ except Exception as e:
312
+ logger.error(f"Pipeline upload failed: {e}")
313
+ sys.exit(1)
@@ -28,6 +28,8 @@ def generate_client_script(
28
28
  model_id,
29
29
  base_url: str = None,
30
30
  deployment_id: str = None,
31
+ compute_cluster_id: str = None,
32
+ nodepool_id: str = None,
31
33
  use_ctx: bool = False,
32
34
  ) -> str:
33
35
  url_helper = ClarifaiUrlHelper()
@@ -96,15 +98,42 @@ from clarifai.client import Model
96
98
  from clarifai.runners.utils import data_types
97
99
  {model_section}
98
100
  """
101
+ if deployment_id and (compute_cluster_id or nodepool_id):
102
+ raise ValueError(
103
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id."
104
+ )
105
+ if compute_cluster_id and nodepool_id:
106
+ deployment_id = None
107
+ else:
108
+ deployment_id = (
109
+ "os.environ['CLARIFAI_DEPLOYMENT_ID']" if not deployment_id else repr(deployment_id)
110
+ )
99
111
 
100
- deployment_id = (
101
- "os.environ['CLARIFAI_DEPLOYMENT_ID']" if deployment_id is None else deployment_id
112
+ deployment_line = (
113
+ f'deployment_id = {deployment_id}, # Only needed for dedicated deployed models'
114
+ if deployment_id
115
+ else ""
116
+ )
117
+ compute_cluster_line = (
118
+ f'compute_cluster_id = "{compute_cluster_id}",' if compute_cluster_id else ""
119
+ )
120
+ nodepool_line = (
121
+ f'nodepool_id = "{nodepool_id}", # Only needed for dedicated nodepool'
122
+ if nodepool_id
123
+ else ""
102
124
  )
103
125
 
104
126
  base_url_str = ""
105
127
  if base_url is not None:
106
128
  base_url_str = f"base_url={base_url},"
107
129
 
130
+ # Join all non-empty lines
131
+ optional_lines = "\n ".join(
132
+ line
133
+ for line in [deployment_line, compute_cluster_line, nodepool_line, base_url_str]
134
+ if line
135
+ )
136
+
108
137
  if use_ctx:
109
138
  model_section = """
110
139
  model = Model.from_current_context()"""
@@ -112,8 +141,7 @@ model = Model.from_current_context()"""
112
141
  model_ui_url = url_helper.clarifai_url(user_id, app_id, "models", model_id)
113
142
  model_section = f"""
114
143
  model = Model("{model_ui_url}",
115
- deployment_id = {deployment_id}, # Only needed for dedicated deployed models
116
- {base_url_str}
144
+ {optional_lines}
117
145
  )
118
146
  """
119
147
 
@@ -128,14 +156,19 @@ model = Model("{model_ui_url}",
128
156
  method_name = method_signature.name
129
157
  client_script_str = f'response = model.{method_name}('
130
158
  annotations = _get_annotations_source(method_signature)
131
- for param_name, (param_type, default_value, required) in annotations.items():
159
+ for idx, (param_name, (param_type, default_value, required)) in enumerate(
160
+ annotations.items()
161
+ ):
132
162
  if param_name == "return":
133
163
  continue
134
164
  if default_value is None and required:
135
165
  default_value = _set_default_value(param_type)
166
+ if not default_value and idx == 0:
167
+ default_value = _set_default_value(param_type)
136
168
  if param_type == "str" and default_value is not None:
137
169
  default_value = json.dumps(default_value)
138
- client_script_str += f"{param_name}={default_value}, "
170
+ if default_value is not None:
171
+ client_script_str += f"{param_name}={default_value}, "
139
172
  client_script_str = client_script_str.rstrip(", ") + ")"
140
173
  if method_signature.method_type == resources_pb2.RunnerMethodType.UNARY_UNARY:
141
174
  client_script_str += "\nprint(response)"
@@ -229,7 +262,7 @@ def _map_default_value(field_type):
229
262
  default_value = None
230
263
 
231
264
  if field_type == "str":
232
- default_value = repr('What is the future of AI?')
265
+ default_value = 'What is the future of AI?'
233
266
  elif field_type == "bytes":
234
267
  default_value = b""
235
268
  elif field_type == "int":
@@ -4,7 +4,7 @@ registry = os.environ.get('CLARIFAI_BASE_IMAGE_REGISTRY', 'public.ecr.aws/clarif
4
4
 
5
5
  GIT_SHA = "42938da8e33b0f37ee7db16b83631da94c2348b9"
6
6
 
7
- AMD_GIT_SHA = "42938da8e33b0f37ee7db16b83631da94c2348b9"
7
+ AMD_GIT_SHA = "81e942130173f54927e7c9a65aabc7e32780616d"
8
8
 
9
9
  PYTHON_BASE_IMAGE = registry + '/python-base:{python_version}-' + GIT_SHA
10
10
  TORCH_BASE_IMAGE = registry + '/torch:{torch_version}-py{python_version}-{gpu_version}-' + GIT_SHA
@@ -22,7 +22,7 @@ AVAILABLE_PYTHON_IMAGES = ['3.11', '3.12']
22
22
 
23
23
  DEFAULT_PYTHON_VERSION = '3.12'
24
24
 
25
- DEFAULT_AMD_TORCH_VERSION = '2.8.0.dev20250511+rocm6.4'
25
+ DEFAULT_AMD_TORCH_VERSION = '2.8.0.dev20250514'
26
26
 
27
27
  DEFAULT_AMD_GPU_VERSION = 'rocm6.4'
28
28
 
@@ -0,0 +1,135 @@
1
+ import os
2
+ import shlex
3
+ import signal
4
+ import subprocess
5
+ import sys
6
+ import threading
7
+ import time
8
+
9
+ import psutil
10
+ import requests
11
+
12
+ from clarifai.utils.logging import logger
13
+
14
+
15
+ def kill_process_tree(parent_pid, include_parent: bool = True, skip_pid: int = None):
16
+ """Kill the process and all its child processes.
17
+
18
+ Args:
19
+ parent_pid (int): The PID of the parent process to kill. If None, uses current process.
20
+ include_parent (bool): Whether to kill the parent process as well.
21
+ skip_pid (int, optional): PID to skip when killing child processes.
22
+
23
+ Raises:
24
+ psutil.AccessDenied: If process cannot be accessed due to permissions.
25
+ psutil.NoSuchProcess: If process does not exist.
26
+ """
27
+ # Remove sigchld handler to avoid spammy logs.
28
+ if threading.current_thread() is threading.main_thread():
29
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
30
+
31
+ if parent_pid is None:
32
+ parent_pid = os.getpid()
33
+ include_parent = False
34
+
35
+ try:
36
+ itself = psutil.Process(parent_pid)
37
+ except psutil.NoSuchProcess:
38
+ logger.warning(f"Process {parent_pid} does not exist")
39
+ return
40
+ except psutil.AccessDenied:
41
+ logger.error(f"Cannot access process {parent_pid} due to permissions")
42
+ raise
43
+
44
+ children = itself.children(recursive=True)
45
+ for child in children:
46
+ if child.pid == skip_pid:
47
+ continue
48
+ try:
49
+ child.kill()
50
+ except (psutil.NoSuchProcess, psutil.AccessDenied) as e:
51
+ logger.warning(f"Failed to kill child process {child.pid}: {e}")
52
+
53
+ if include_parent:
54
+ try:
55
+ if parent_pid == os.getpid():
56
+ itself.kill()
57
+ sys.exit(0)
58
+
59
+ itself.kill()
60
+
61
+ # Sometime processes cannot be killed with SIGKILL (e.g, PID=1 launched by kubernetes),
62
+ # so we send an additional signal to kill them.
63
+ if hasattr(signal, 'SIGQUIT'):
64
+ itself.send_signal(signal.SIGQUIT)
65
+ except (psutil.NoSuchProcess, psutil.AccessDenied) as e:
66
+ logger.warning(f"Failed to kill parent process {parent_pid}: {e}")
67
+
68
+
69
+ def execute_shell_command(
70
+ command: str,
71
+ ) -> subprocess.Popen:
72
+ """Execute a shell command and return its process handle.
73
+
74
+ Args:
75
+ command (str): The shell command to execute.
76
+
77
+ Returns:
78
+ subprocess.Popen: Process handle for the executed command.
79
+
80
+ Raises:
81
+ ValueError: If command is empty or invalid.
82
+ subprocess.SubprocessError: If command execution fails.
83
+ """
84
+ if not command or not isinstance(command, str):
85
+ raise ValueError("command must be a non-empty string")
86
+
87
+ command = command.replace("\\\n", " ").replace("\\", " ")
88
+ parts = shlex.split(command)
89
+
90
+ try:
91
+ process = subprocess.Popen(parts, text=True, stderr=subprocess.STDOUT)
92
+
93
+ return process
94
+ except subprocess.SubprocessError as e:
95
+ logger.error(f"Failed to execute command: {e}")
96
+ raise
97
+
98
+
99
+ def terminate_process(process):
100
+ """
101
+ Terminate the process
102
+ """
103
+ kill_process_tree(process.pid)
104
+
105
+
106
+ def wait_for_server(base_url: str, timeout: int = None) -> None:
107
+ """Wait for the server to be ready by polling the /v1/models endpoint.
108
+
109
+ Args:
110
+ base_url: The base URL of the server
111
+ timeout: Maximum time to wait in seconds. None means wait forever.
112
+ """
113
+ start_time = time.perf_counter()
114
+ while True:
115
+ try:
116
+ response = requests.get(
117
+ f"{base_url}/v1/models",
118
+ headers={"Authorization": "Bearer None"},
119
+ )
120
+ if response.status_code == 200:
121
+ time.sleep(5)
122
+ logger.info(
123
+ """\n
124
+ NOTE: Typically, the server runs in a separate terminal.
125
+ In this notebook, we run the server and notebook code together, so their outputs are combined.
126
+ To improve clarity, the server logs are displayed in the original black color, while the notebook outputs are highlighted in blue.
127
+ We are running those notebooks in a CI parallel environment, so the throughput is not representative of the actual performance.
128
+ """
129
+ )
130
+ break
131
+
132
+ if timeout and time.perf_counter() - start_time > timeout:
133
+ raise TimeoutError("Server did not become ready within timeout period")
134
+ except requests.exceptions.RequestException:
135
+ time.sleep(1)