nemo-evaluator-launcher 0.1.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nemo-evaluator-launcher might be problematic. Click here for more details.

Files changed (60) hide show
  1. nemo_evaluator_launcher/__init__.py +79 -0
  2. nemo_evaluator_launcher/api/__init__.py +24 -0
  3. nemo_evaluator_launcher/api/functional.py +698 -0
  4. nemo_evaluator_launcher/api/types.py +98 -0
  5. nemo_evaluator_launcher/api/utils.py +19 -0
  6. nemo_evaluator_launcher/cli/__init__.py +15 -0
  7. nemo_evaluator_launcher/cli/export.py +267 -0
  8. nemo_evaluator_launcher/cli/info.py +512 -0
  9. nemo_evaluator_launcher/cli/kill.py +41 -0
  10. nemo_evaluator_launcher/cli/ls_runs.py +134 -0
  11. nemo_evaluator_launcher/cli/ls_tasks.py +136 -0
  12. nemo_evaluator_launcher/cli/main.py +226 -0
  13. nemo_evaluator_launcher/cli/run.py +200 -0
  14. nemo_evaluator_launcher/cli/status.py +164 -0
  15. nemo_evaluator_launcher/cli/version.py +55 -0
  16. nemo_evaluator_launcher/common/__init__.py +16 -0
  17. nemo_evaluator_launcher/common/execdb.py +283 -0
  18. nemo_evaluator_launcher/common/helpers.py +366 -0
  19. nemo_evaluator_launcher/common/logging_utils.py +357 -0
  20. nemo_evaluator_launcher/common/mapping.py +295 -0
  21. nemo_evaluator_launcher/common/printing_utils.py +93 -0
  22. nemo_evaluator_launcher/configs/__init__.py +15 -0
  23. nemo_evaluator_launcher/configs/default.yaml +28 -0
  24. nemo_evaluator_launcher/configs/deployment/generic.yaml +33 -0
  25. nemo_evaluator_launcher/configs/deployment/nim.yaml +32 -0
  26. nemo_evaluator_launcher/configs/deployment/none.yaml +16 -0
  27. nemo_evaluator_launcher/configs/deployment/sglang.yaml +38 -0
  28. nemo_evaluator_launcher/configs/deployment/trtllm.yaml +24 -0
  29. nemo_evaluator_launcher/configs/deployment/vllm.yaml +42 -0
  30. nemo_evaluator_launcher/configs/execution/lepton/default.yaml +92 -0
  31. nemo_evaluator_launcher/configs/execution/local.yaml +19 -0
  32. nemo_evaluator_launcher/configs/execution/slurm/default.yaml +34 -0
  33. nemo_evaluator_launcher/executors/__init__.py +22 -0
  34. nemo_evaluator_launcher/executors/base.py +120 -0
  35. nemo_evaluator_launcher/executors/lepton/__init__.py +16 -0
  36. nemo_evaluator_launcher/executors/lepton/deployment_helpers.py +609 -0
  37. nemo_evaluator_launcher/executors/lepton/executor.py +1004 -0
  38. nemo_evaluator_launcher/executors/lepton/job_helpers.py +398 -0
  39. nemo_evaluator_launcher/executors/local/__init__.py +15 -0
  40. nemo_evaluator_launcher/executors/local/executor.py +605 -0
  41. nemo_evaluator_launcher/executors/local/run.template.sh +103 -0
  42. nemo_evaluator_launcher/executors/registry.py +38 -0
  43. nemo_evaluator_launcher/executors/slurm/__init__.py +15 -0
  44. nemo_evaluator_launcher/executors/slurm/executor.py +1147 -0
  45. nemo_evaluator_launcher/exporters/__init__.py +36 -0
  46. nemo_evaluator_launcher/exporters/base.py +121 -0
  47. nemo_evaluator_launcher/exporters/gsheets.py +409 -0
  48. nemo_evaluator_launcher/exporters/local.py +502 -0
  49. nemo_evaluator_launcher/exporters/mlflow.py +619 -0
  50. nemo_evaluator_launcher/exporters/registry.py +40 -0
  51. nemo_evaluator_launcher/exporters/utils.py +624 -0
  52. nemo_evaluator_launcher/exporters/wandb.py +490 -0
  53. nemo_evaluator_launcher/package_info.py +38 -0
  54. nemo_evaluator_launcher/resources/mapping.toml +380 -0
  55. nemo_evaluator_launcher-0.1.28.dist-info/METADATA +494 -0
  56. nemo_evaluator_launcher-0.1.28.dist-info/RECORD +60 -0
  57. nemo_evaluator_launcher-0.1.28.dist-info/WHEEL +5 -0
  58. nemo_evaluator_launcher-0.1.28.dist-info/entry_points.txt +3 -0
  59. nemo_evaluator_launcher-0.1.28.dist-info/licenses/LICENSE +451 -0
  60. nemo_evaluator_launcher-0.1.28.dist-info/top_level.txt +1 -0
@@ -0,0 +1,490 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ """Weights & Biases results exporter."""
17
+
18
+ import os
19
+ import shutil
20
+ import tempfile
21
+ from pathlib import Path
22
+ from typing import Any, Dict, List, Optional
23
+
24
+ import yaml
25
+
26
+ try:
27
+ import wandb
28
+
29
+ WANDB_AVAILABLE = True
30
+ except ImportError:
31
+ WANDB_AVAILABLE = False
32
+
33
+ from nemo_evaluator_launcher.common.execdb import JobData
34
+ from nemo_evaluator_launcher.common.logging_utils import logger
35
+ from nemo_evaluator_launcher.exporters.base import BaseExporter, ExportResult
36
+ from nemo_evaluator_launcher.exporters.local import LocalExporter
37
+ from nemo_evaluator_launcher.exporters.registry import register_exporter
38
+ from nemo_evaluator_launcher.exporters.utils import (
39
+ extract_accuracy_metrics,
40
+ extract_exporter_config,
41
+ get_artifact_root,
42
+ get_available_artifacts,
43
+ get_benchmark_info,
44
+ get_task_name,
45
+ )
46
+
47
+
48
+ @register_exporter("wandb")
49
+ class WandBExporter(BaseExporter):
50
+ """Export accuracy metrics to W&B."""
51
+
52
+ def supports_executor(self, executor_type: str) -> bool:
53
+ return True
54
+
55
+ def is_available(self) -> bool:
56
+ return WANDB_AVAILABLE
57
+
58
+ def export_job(self, job_data: JobData) -> ExportResult:
59
+ """Export single job - same logic as invocation but for one job."""
60
+ if not self.is_available():
61
+ return ExportResult(
62
+ success=False, dest="wandb", message="wandb package not installed"
63
+ )
64
+
65
+ try:
66
+ wandb_config = extract_exporter_config(job_data, "wandb", self.config)
67
+ log_mode = wandb_config.get(
68
+ "log_mode", "per_task"
69
+ ) # Default per_task for immediate export
70
+
71
+ # Stage artifacts locally if remote_ssh (e.g., Slurm), so we can extract metrics
72
+ staged_base_dir = None
73
+ try:
74
+ paths = self.get_job_paths(job_data)
75
+ if paths.get("storage_type") == "remote_ssh":
76
+ tmp_stage = Path(tempfile.mkdtemp(prefix="wandb_stage_"))
77
+ LocalExporter(
78
+ {
79
+ "output_dir": str(tmp_stage),
80
+ "copy_logs": wandb_config.get("log_logs", False),
81
+ "only_required": wandb_config.get("only_required", True),
82
+ }
83
+ ).export_job(job_data)
84
+ staged_base_dir = (
85
+ tmp_stage / job_data.invocation_id / job_data.job_id
86
+ )
87
+ except Exception as e:
88
+ logger.warning(f"W&B: staging failed for {job_data.job_id}: {e}")
89
+
90
+ # Metrics (prefer staged if available)
91
+ log_metrics = wandb_config.get("log_metrics", [])
92
+ if staged_base_dir and (staged_base_dir / "artifacts").exists():
93
+ metrics = extract_accuracy_metrics(
94
+ job_data,
95
+ lambda _: {
96
+ "artifacts_dir": staged_base_dir / "artifacts",
97
+ "storage_type": "local_filesystem",
98
+ },
99
+ log_metrics,
100
+ )
101
+ else:
102
+ metrics = extract_accuracy_metrics(
103
+ job_data, self.get_job_paths, log_metrics
104
+ )
105
+
106
+ if not metrics:
107
+ return ExportResult(
108
+ success=False, dest="wandb", message="No metrics found"
109
+ )
110
+
111
+ # Choose either jobId or invocationId based on log_mode
112
+ if log_mode == "per_task":
113
+ # Create separate run per task
114
+ task_name = get_task_name(job_data)
115
+ identifier = f"{job_data.invocation_id}-{task_name}"
116
+ should_resume = False
117
+ run_id = None
118
+ elif log_mode == "multi_task":
119
+ # Append to shared run by invocation_id
120
+ identifier = job_data.invocation_id
121
+ should_resume, run_id = self._check_existing_run(
122
+ identifier, job_data, wandb_config
123
+ )
124
+ result = self._create_wandb_run(
125
+ identifier, wandb_config, metrics, job_data, should_resume, run_id
126
+ )
127
+ return ExportResult(
128
+ success=True, dest="wandb", message="Export completed", metadata=result
129
+ )
130
+
131
+ except Exception as e:
132
+ logger.error(f"W&B export failed: {e}")
133
+ return ExportResult(
134
+ success=False, dest="wandb", message=f"Failed: {str(e)}"
135
+ )
136
+
137
+ def export_invocation(self, invocation_id: str) -> Dict[str, Any]:
138
+ """Export all jobs in invocation as one W&B run."""
139
+ if not self.is_available():
140
+ return {"success": False, "error": "wandb package not installed"}
141
+
142
+ jobs = self.db.get_jobs(invocation_id)
143
+ if not jobs:
144
+ return {
145
+ "success": False,
146
+ "error": f"No jobs found for invocation {invocation_id}",
147
+ }
148
+
149
+ try:
150
+ first_job = list(jobs.values())[0]
151
+ wandb_config = extract_exporter_config(first_job, "wandb", self.config)
152
+
153
+ all_metrics = {}
154
+ for _, job_data in jobs.items():
155
+ log_metrics = wandb_config.get("log_metrics", [])
156
+ job_metrics = extract_accuracy_metrics(
157
+ job_data, self.get_job_paths, log_metrics
158
+ )
159
+ all_metrics.update(job_metrics)
160
+
161
+ if not all_metrics:
162
+ return {
163
+ "success": False,
164
+ "error": "No accuracy metrics found in any job",
165
+ }
166
+
167
+ should_resume, run_id = self._check_existing_run(
168
+ invocation_id, first_job, wandb_config
169
+ )
170
+
171
+ result = self._create_wandb_run(
172
+ invocation_id,
173
+ wandb_config,
174
+ all_metrics,
175
+ first_job,
176
+ should_resume,
177
+ run_id,
178
+ )
179
+
180
+ return {
181
+ "success": True,
182
+ "invocation_id": invocation_id,
183
+ "jobs": {
184
+ job_id: {
185
+ "success": True,
186
+ "message": "Contributed to invocation run",
187
+ }
188
+ for job_id in jobs.keys()
189
+ },
190
+ "metadata": result,
191
+ }
192
+
193
+ except Exception as e:
194
+ logger.error(f"W&B export failed for invocation {invocation_id}: {e}")
195
+ return {"success": False, "error": f"W&B export failed: {str(e)}"}
196
+
197
+ def _log_artifacts(
198
+ self,
199
+ job_data: JobData,
200
+ wandb_config: Dict[str, Any],
201
+ artifact,
202
+ register_staging_dir=None,
203
+ ) -> List[str]:
204
+ """Log evaluation artifacts to WandB using LocalExporter for staging."""
205
+ if not wandb_config.get("log_artifacts", True):
206
+ return []
207
+ try:
208
+ temp_dir = tempfile.mkdtemp(prefix="wandb_artifacts_")
209
+ if callable(register_staging_dir):
210
+ register_staging_dir(temp_dir)
211
+ local_exporter = LocalExporter(
212
+ {
213
+ "output_dir": temp_dir,
214
+ "copy_logs": wandb_config.get(
215
+ "log_logs", wandb_config.get("copy_logs", False)
216
+ ),
217
+ "only_required": wandb_config.get("only_required", True),
218
+ "format": wandb_config.get("format"),
219
+ "log_metrics": wandb_config.get("log_metrics", []),
220
+ "output_filename": wandb_config.get("output_filename"),
221
+ }
222
+ )
223
+ local_result = local_exporter.export_job(job_data)
224
+
225
+ if not local_result.success:
226
+ logger.error(f"Failed to download artifacts: {local_result.message}")
227
+ return []
228
+
229
+ base_dir = Path(local_result.dest)
230
+ artifacts_dir = base_dir / "artifacts"
231
+ logs_dir = base_dir / "logs"
232
+ logged_names: list[str] = []
233
+
234
+ artifact_root = get_artifact_root(job_data) # "<harness>.<benchmark>"
235
+
236
+ # Add config file only when artifacts logging is enabled
237
+ if wandb_config.get("log_artifacts", True):
238
+ cfg_added = False
239
+ for fname in ("config.yml", "run_config.yml"):
240
+ p = artifacts_dir / fname
241
+ if p.exists():
242
+ artifact.add_file(str(p), name=f"{artifact_root}/{fname}")
243
+ logged_names.append(fname)
244
+ cfg_added = True
245
+ break
246
+ if not cfg_added:
247
+ with tempfile.NamedTemporaryFile(
248
+ "w", suffix=".yaml", delete=False
249
+ ) as tmp_cfg:
250
+ yaml.dump(
251
+ job_data.config or {},
252
+ tmp_cfg,
253
+ default_flow_style=False,
254
+ sort_keys=False,
255
+ )
256
+ cfg_path = tmp_cfg.name
257
+ artifact.add_file(cfg_path, name=f"{artifact_root}/config.yaml")
258
+ os.unlink(cfg_path)
259
+ logged_names.append("config.yaml")
260
+
261
+ files_to_upload: list[Path] = []
262
+ if wandb_config.get("only_required", True):
263
+ for fname in get_available_artifacts(artifacts_dir):
264
+ p = artifacts_dir / fname
265
+ if p.exists():
266
+ files_to_upload.append(p)
267
+ else:
268
+ for p in artifacts_dir.iterdir():
269
+ if p.is_file():
270
+ files_to_upload.append(p)
271
+
272
+ for fpath in files_to_upload:
273
+ rel = fpath.relative_to(artifacts_dir).as_posix()
274
+ artifact.add_file(str(fpath), name=f"{artifact_root}/artifacts/{rel}")
275
+ logged_names.append(rel)
276
+
277
+ if wandb_config.get("log_logs", False) and logs_dir.exists():
278
+ for p in logs_dir.rglob("*"):
279
+ if p.is_file():
280
+ rel = p.relative_to(logs_dir).as_posix()
281
+ artifact.add_file(str(p), name=f"{artifact_root}/logs/{rel}")
282
+ logged_names.append(f"logs/{rel}")
283
+
284
+ return logged_names
285
+ except Exception as e:
286
+ logger.error(f"Error logging artifacts: {e}")
287
+ return []
288
+
289
+ def _check_existing_run(
290
+ self, identifier: str, job_data: JobData, config: Dict[str, Any]
291
+ ) -> tuple[bool, Optional[str]]:
292
+ """Check if run exists based on webhook metadata then name patterns."""
293
+ try:
294
+ import wandb
295
+
296
+ api = wandb.Api()
297
+ entity = config.get("entity")
298
+ project = config.get("project")
299
+ if not (entity and project):
300
+ return False, None
301
+
302
+ # Check webhook metadata for run_id first
303
+ webhook_meta = job_data.data.get("webhook_metadata", {})
304
+ if (
305
+ webhook_meta.get("webhook_source") == "wandb"
306
+ and config.get("triggered_by_webhook")
307
+ and "run_id" in webhook_meta
308
+ ):
309
+ try:
310
+ # Verify the run actually exists
311
+ run = api.run(f"{entity}/{project}/{webhook_meta['run_id']}")
312
+ return True, run.id
313
+ except Exception:
314
+ pass
315
+
316
+ # Check explicit name first
317
+ if config.get("name"):
318
+ runs = api.runs(f"{entity}/{project}")
319
+ for run in runs:
320
+ if run.display_name == config["name"]:
321
+ return True, run.id
322
+
323
+ # Check default pattern
324
+ default_run_name = f"eval-{identifier}"
325
+ runs = api.runs(f"{entity}/{project}")
326
+ for run in runs:
327
+ if run.display_name == default_run_name:
328
+ return True, run.id
329
+
330
+ return False, None
331
+ except Exception:
332
+ return False, None
333
+
334
+ def _create_wandb_run(
335
+ self,
336
+ identifier: str,
337
+ config: Dict[str, Any],
338
+ metrics: Dict[str, float],
339
+ job_data: JobData,
340
+ should_resume: bool,
341
+ existing_run_id: str,
342
+ ) -> Dict[str, Any]:
343
+ """Create or resume W&B run for single job."""
344
+ log_mode = config.get("log_mode", "per_task")
345
+ task_name = get_task_name(job_data)
346
+ bench_info = get_benchmark_info(job_data)
347
+ benchmark = bench_info.get("benchmark", task_name)
348
+ harness = bench_info.get("harness", "unknown")
349
+
350
+ if config.get("name"):
351
+ run_name = config["name"]
352
+ else:
353
+ run_name = (
354
+ f"eval-{job_data.invocation_id}-{benchmark}"
355
+ if log_mode == "per_task"
356
+ else f"eval-{identifier}"
357
+ )
358
+
359
+ run_args = {
360
+ "entity": config.get("entity"),
361
+ "project": config.get("project"),
362
+ "name": run_name,
363
+ "group": config.get("group", job_data.invocation_id),
364
+ "job_type": config.get("job_type", "evaluation"),
365
+ "tags": config.get("tags"),
366
+ "notes": config.get("description"),
367
+ }
368
+
369
+ # resume for multi_task runs
370
+ if log_mode == "multi_task":
371
+ stable_id = config.get("run_id") or identifier # invocation_id
372
+ run_args["id"] = stable_id
373
+ run_args["resume"] = "allow"
374
+ elif should_resume:
375
+ run_args["id"] = existing_run_id
376
+ run_args["resume"] = "allow"
377
+
378
+ # Config metadata
379
+ exec_type = (job_data.config or {}).get("execution", {}).get(
380
+ "type"
381
+ ) or job_data.executor
382
+ run_config = {
383
+ "invocation_id": job_data.invocation_id,
384
+ "executor": exec_type,
385
+ }
386
+
387
+ if log_mode == "per_task":
388
+ run_config["job_id"] = job_data.job_id
389
+ run_config["harness"] = harness
390
+ run_config["benchmark"] = benchmark
391
+
392
+ if config.get("triggered_by_webhook"):
393
+ run_config.update(
394
+ {
395
+ "webhook_triggered": True,
396
+ "webhook_source": config.get("webhook_source"),
397
+ "source_artifact": config.get("source_artifact"),
398
+ "config_source": config.get("config_source"),
399
+ }
400
+ )
401
+
402
+ run_config.update(config.get("extra_metadata", {}))
403
+ run_args["config"] = run_config
404
+
405
+ # Initialize
406
+ run = wandb.init(**{k: v for k, v in run_args.items() if v is not None})
407
+
408
+ # Track staging dirs for this run
409
+ staging_dirs: List[str] = []
410
+
411
+ def register_staging_dir(path: str) -> None:
412
+ if path and os.path.isdir(path):
413
+ staging_dirs.append(path)
414
+
415
+ # In multi_task, aggregate lists after init (no overwrite)
416
+ if log_mode == "multi_task":
417
+ try:
418
+ benchmarks = list(run.config.get("benchmarks", []))
419
+ if benchmark not in benchmarks:
420
+ benchmarks.append(benchmark)
421
+ harnesses = list(run.config.get("harnesses", []))
422
+ if harness not in harnesses:
423
+ harnesses.append(harness)
424
+ run.config.update(
425
+ {"benchmarks": benchmarks, "harnesses": harnesses},
426
+ allow_val_change=True,
427
+ )
428
+ except Exception:
429
+ pass
430
+
431
+ # Artifact naming
432
+ artifact_name = (
433
+ f"{job_data.invocation_id}_{benchmark}"
434
+ if log_mode == "per_task"
435
+ else job_data.invocation_id
436
+ )
437
+ artifact = wandb.Artifact(
438
+ name=artifact_name,
439
+ type="evaluation_result",
440
+ description="Evaluation results",
441
+ metadata={
442
+ "invocation_id": job_data.invocation_id,
443
+ "task": task_name,
444
+ "benchmark": benchmark,
445
+ "harness": harness,
446
+ },
447
+ )
448
+
449
+ logged_artifacts = self._log_artifacts(
450
+ job_data, config, artifact, register_staging_dir=register_staging_dir
451
+ )
452
+
453
+ try:
454
+ run.log_artifact(artifact)
455
+ # charts for each logged metric
456
+ try:
457
+ for k in metrics.keys():
458
+ run.define_metric(k, summary="last")
459
+ except Exception:
460
+ pass
461
+
462
+ # Log metrics with per-task step
463
+ try:
464
+ step_idx = int(job_data.job_id.split(".")[-1])
465
+ except Exception:
466
+ step_idx = 0
467
+ run.log(metrics, step=step_idx)
468
+
469
+ # metrics summary
470
+ try:
471
+ run.summary.update(metrics)
472
+ except Exception:
473
+ pass
474
+ finally:
475
+ for d in staging_dirs:
476
+ try:
477
+ shutil.rmtree(d, ignore_errors=True)
478
+ except Exception:
479
+ pass
480
+ try:
481
+ run.finish()
482
+ except Exception:
483
+ pass
484
+
485
+ return {
486
+ "run_id": run.id,
487
+ "run_url": run.url,
488
+ "metrics_logged": len(metrics),
489
+ "artifacts_logged": len(logged_artifacts),
490
+ }
@@ -0,0 +1,38 @@
1
+ # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ # Below is the _next_ version that will be published, not the currently published one.
17
+ MAJOR = 0
18
+ MINOR = 1
19
+ PATCH = 28
20
+ PRE_RELEASE = ""
21
+
22
+ # Use the following formatting: (major, minor, patch, pre-release)
23
+ VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
24
+
25
+ __shortversion__ = ".".join(map(str, VERSION[:3]))
26
+ __version__ = ".".join(map(str, VERSION[:3])) + "".join(VERSION[3:])
27
+
28
+ # BEGIN(if-changed): check the pyproject.toml, too
29
+ __package_name__ = "nemo_evaluator_launcher"
30
+ __contact_names__ = "NVIDIA"
31
+ __contact_emails__ = "nemo-toolkit@nvidia.com"
32
+ __homepage__ = "https://github.com/NVIDIA-NeMo/Eval"
33
+ __repository_url__ = "https://github.com/NVIDIA-NeMo/Eval"
34
+ __download_url__ = "https://github.com/NVIDIA-NeMo/Evaluator/releases"
35
+ __description__ = "Launcher for the evaluations provided by NeMo Evaluator containers with different runtime backends"
36
+ __license__ = "Apache2"
37
+ __keywords__ = "deep learning, evaluations, machine learning, gpu, NLP, pytorch, torch"
38
+ # END(if-changed)