torchx-nightly 2025.10.5__py3-none-any.whl → 2025.10.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of torchx-nightly might be problematic. Click here for more details.

torchx/components/dist.py CHANGED
@@ -92,6 +92,7 @@ def spmd(
92
92
  h: str = "gpu.small",
93
93
  j: str = "1x1",
94
94
  env: Optional[Dict[str, str]] = None,
95
+ metadata: Optional[Dict[str, str]] = None,
95
96
  max_retries: int = 0,
96
97
  mounts: Optional[List[str]] = None,
97
98
  debug: bool = False,
@@ -131,6 +132,7 @@ def spmd(
131
132
  h: the type of host to run on (e.g. aws_p4d.24xlarge). Must be one of the registered named resources
132
133
  j: {nnodes}x{nproc_per_node}. For GPU hosts omitting nproc_per_node will infer it from the GPU count on the host
133
134
  env: environment variables to be passed to the run (e.g. ENV1=v1,ENV2=v2,ENV3=v3)
135
+ metadata: metadata to be passed to the scheduler (e.g. KEY1=v1,KEY2=v2,KEY3=v3)
134
136
  max_retries: the number of scheduler retries allowed
135
137
  mounts: (for docker based runs only) mounts to mount into the worker environment/container
136
138
  (ex. type=<bind/volume>,src=/host,dst=/job[,readonly]).
@@ -150,6 +152,7 @@ def spmd(
150
152
  h=h,
151
153
  j=str(StructuredJArgument.parse_from(h, j)),
152
154
  env=env,
155
+ metadata=metadata,
153
156
  max_retries=max_retries,
154
157
  mounts=mounts,
155
158
  debug=debug,
@@ -168,6 +171,7 @@ def ddp(
168
171
  memMB: int = 1024,
169
172
  j: str = "1x2",
170
173
  env: Optional[Dict[str, str]] = None,
174
+ metadata: Optional[Dict[str, str]] = None,
171
175
  max_retries: int = 0,
172
176
  rdzv_port: int = 29500,
173
177
  rdzv_backend: str = "c10d",
@@ -201,6 +205,7 @@ def ddp(
201
205
  h: a registered named resource (if specified takes precedence over cpu, gpu, memMB)
202
206
  j: [{min_nnodes}:]{nnodes}x{nproc_per_node}, for gpu hosts, nproc_per_node must not exceed num gpus
203
207
  env: environment varibles to be passed to the run (e.g. ENV1=v1,ENV2=v2,ENV3=v3)
208
+ metadata: metadata to be passed to the scheduler (e.g. KEY1=v1,KEY2=v2,KEY3=v3)
204
209
  max_retries: the number of scheduler retries allowed
205
210
  rdzv_port: the port on rank0's host to use for hosting the c10d store used for rendezvous.
206
211
  Only takes effect when running multi-node. When running single node, this parameter
@@ -237,8 +242,8 @@ def ddp(
237
242
  # use $$ in the prefix to escape the '$' literal (rather than a string Template substitution argument)
238
243
  rdzv_endpoint = _noquote(f"$${{{macros.rank0_env}:=localhost}}:{rdzv_port}")
239
244
 
240
- if env is None:
241
- env = {}
245
+ env = env or {}
246
+ metadata = metadata or {}
242
247
 
243
248
  argname = StructuredNameArgument.parse_from(
244
249
  name=name,
@@ -299,6 +304,7 @@ def ddp(
299
304
  mounts=specs.parse_mounts(mounts) if mounts else [],
300
305
  )
301
306
  ],
307
+ metadata=metadata,
302
308
  )
303
309
 
304
310
 
torchx/runner/api.py CHANGED
@@ -426,26 +426,42 @@ class Runner:
426
426
 
427
427
  sched._pre_build_validate(app, scheduler, resolved_cfg)
428
428
 
429
- if workspace and isinstance(sched, WorkspaceMixin):
430
- role = app.roles[0]
431
- old_img = role.image
432
-
433
- logger.info(f"Checking for changes in workspace `{workspace}`...")
434
- logger.info(
435
- 'To disable workspaces pass: --workspace="" from CLI or workspace=None programmatically.'
436
- )
437
- sched.build_workspace_and_update_role2(role, workspace, resolved_cfg)
438
-
439
- if old_img != role.image:
440
- logger.info(
441
- f"Built new image `{role.image}` based on original image `{old_img}`"
442
- f" and changes in workspace `{workspace}` for role[0]={role.name}."
443
- )
444
- else:
445
- logger.info(
446
- f"Reusing original image `{old_img}` for role[0]={role.name}."
447
- " Either a patch was built or no changes to workspace was detected."
448
- )
429
+ if isinstance(sched, WorkspaceMixin):
430
+ for i, role in enumerate(app.roles):
431
+ role_workspace = role.workspace
432
+
433
+ if i == 0 and workspace:
434
+ # NOTE: torchx originally took workspace as a runner arg and only applied the workspace to role[0]
435
+ # later, torchx added support for the workspace attr in Role
436
+ # for BC, give precedence to the workspace argument over the workspace attr for role[0]
437
+ if role_workspace:
438
+ logger.info(
439
+ f"Using workspace={workspace} over role[{i}].workspace={role_workspace} for role[{i}]={role.name}."
440
+ " To use the role's workspace attr pass: --workspace='' from CLI or workspace=None programmatically." # noqa: B950
441
+ )
442
+ role_workspace = workspace
443
+
444
+ if role_workspace:
445
+ old_img = role.image
446
+ logger.info(
447
+ f"Checking for changes in workspace `{role_workspace}` for role[{i}]={role.name}..."
448
+ )
449
+ # TODO kiuk@ once we deprecate the `workspace` argument in runner APIs we can simplify the signature of
450
+ # build_workspace_and_update_role2() to just taking the role and resolved_cfg
451
+ sched.build_workspace_and_update_role2(
452
+ role, role_workspace, resolved_cfg
453
+ )
454
+
455
+ if old_img != role.image:
456
+ logger.info(
457
+ f"Built new image `{role.image}` based on original image `{old_img}`"
458
+ f" and changes in workspace `{role_workspace}` for role[{i}]={role.name}."
459
+ )
460
+ else:
461
+ logger.info(
462
+ f"Reusing original image `{old_img}` for role[{i}]={role.name}."
463
+ " Either a patch was built or no changes to workspace was detected."
464
+ )
449
465
 
450
466
  sched._validate(app, scheduler, resolved_cfg)
451
467
  dryrun_info = sched.submit_dryrun(app, resolved_cfg)
@@ -49,15 +49,14 @@ def get_scheduler_factories(
49
49
  The first scheduler in the dictionary is used as the default scheduler.
50
50
  """
51
51
 
52
- default_schedulers: dict[str, SchedulerFactory] = {}
53
- for scheduler, path in DEFAULT_SCHEDULER_MODULES.items():
54
- default_schedulers[scheduler] = _defer_load_scheduler(path)
55
-
56
- return load_group(
57
- group,
58
- default=default_schedulers,
59
- skip_defaults=skip_defaults,
60
- )
52
+ if skip_defaults:
53
+ default_schedulers = {}
54
+ else:
55
+ default_schedulers: dict[str, SchedulerFactory] = {}
56
+ for scheduler, path in DEFAULT_SCHEDULER_MODULES.items():
57
+ default_schedulers[scheduler] = _defer_load_scheduler(path)
58
+
59
+ return load_group(group, default=default_schedulers)
61
60
 
62
61
 
63
62
  def get_default_scheduler_name() -> str:
torchx/specs/__init__.py CHANGED
@@ -45,6 +45,7 @@ from torchx.specs.api import (
45
45
  UnknownAppException,
46
46
  UnknownSchedulerException,
47
47
  VolumeMount,
48
+ Workspace,
48
49
  )
49
50
  from torchx.specs.builders import make_app_handle, materialize_appdef, parse_mounts
50
51
 
@@ -236,4 +237,6 @@ __all__ = [
236
237
  "torchx_run_args_from_json",
237
238
  "TorchXRunArgs",
238
239
  "ALL",
240
+ "TORCHX_HOME",
241
+ "Workspace",
239
242
  ]
torchx/specs/api.py CHANGED
@@ -350,6 +350,78 @@ class DeviceMount:
350
350
  permissions: str = "rwm"
351
351
 
352
352
 
353
+ @dataclass
354
+ class Workspace:
355
+ """
356
+ Specifies a local "workspace" (a set of directories). Workspaces are ad-hoc built
357
+ into an (usually ephemeral) image. This effectively mirrors the local code changes
358
+ at job submission time.
359
+
360
+ For example:
361
+
362
+ 1. ``projects={"~/github/torch": "torch"}`` copies ``~/github/torch/**`` into ``$REMOTE_WORKSPACE_ROOT/torch/**``
363
+ 2. ``projects={"~/github/torch": ""}`` copies ``~/github/torch/**`` into ``$REMOTE_WORKSPACE_ROOT/**``
364
+
365
+ The exact location of ``$REMOTE_WORKSPACE_ROOT`` is implementation dependent and varies between
366
+ different implementations of :py:class:`~torchx.workspace.api.WorkspaceMixin`.
367
+ Check the scheduler documentation for details on which workspace it supports.
368
+
369
+ Note: ``projects`` maps the location of the local project to a sub-directory in the remote workspace root directory.
370
+ Typically the local project location is a directory path (e.g. ``/home/foo/github/torch``).
371
+
372
+
373
+ Attributes:
374
+ projects: mapping of local project to the sub-dir in the remote workspace dir.
375
+ """
376
+
377
+ projects: dict[str, str]
378
+
379
+ def __bool__(self) -> bool:
380
+ """False if no projects mapping. Lets us use workspace object in an if-statement"""
381
+ return bool(self.projects)
382
+
383
+ def is_unmapped_single_project(self) -> bool:
384
+ """
385
+ Returns ``True`` if this workspace only has 1 project
386
+ and its target mapping is an empty string.
387
+ """
388
+ return len(self.projects) == 1 and not next(iter(self.projects.values()))
389
+
390
+ @staticmethod
391
+ def from_str(workspace: str | None) -> "Workspace":
392
+ import yaml
393
+
394
+ if not workspace:
395
+ return Workspace({})
396
+
397
+ projects = yaml.safe_load(workspace)
398
+ if isinstance(projects, str): # single project workspace
399
+ projects = {projects: ""}
400
+ else: # multi-project workspace
401
+ # Replace None mappings with "" (empty string)
402
+ projects = {k: ("" if v is None else v) for k, v in projects.items()}
403
+
404
+ return Workspace(projects)
405
+
406
+ def __str__(self) -> str:
407
+ """
408
+ Returns a string representation of the Workspace by concatenating
409
+ the project mappings using ';' as a delimiter and ':' between key and value.
410
+ If the single-project workspace with no target mapping, then simply
411
+ returns the src (local project dir)
412
+
413
+ NOTE: meant to be used for logging purposes not serde.
414
+ Therefore not symmetric with :py:func:`Workspace.from_str`.
415
+
416
+ """
417
+ if self.is_unmapped_single_project():
418
+ return next(iter(self.projects))
419
+ else:
420
+ return ";".join(
421
+ k if not v else f"{k}:{v}" for k, v in self.projects.items()
422
+ )
423
+
424
+
353
425
  @dataclass
354
426
  class Role:
355
427
  """
@@ -402,6 +474,10 @@ class Role:
402
474
  metadata: Free form information that is associated with the role, for example
403
475
  scheduler specific data. The key should follow the pattern: ``$scheduler.$key``
404
476
  mounts: a list of mounts on the machine
477
+ workspace: local project directories to be mirrored on the remote job.
478
+ NOTE: The workspace argument provided to the :py:class:`~torchx.runner.api.Runner` APIs
479
+ only takes effect on ``appdef.role[0]`` and overrides this attribute.
480
+
405
481
  """
406
482
 
407
483
  name: str
@@ -417,9 +493,10 @@ class Role:
417
493
  resource: Resource = field(default_factory=_null_resource)
418
494
  port_map: Dict[str, int] = field(default_factory=dict)
419
495
  metadata: Dict[str, Any] = field(default_factory=dict)
420
- mounts: List[Union[BindMount, VolumeMount, DeviceMount]] = field(
421
- default_factory=list
422
- )
496
+ mounts: List[BindMount | VolumeMount | DeviceMount] = field(default_factory=list)
497
+ workspace: Workspace | None = None
498
+
499
+ # DEPRECATED DO NOT SET, WILL BE REMOVED SOON
423
500
  overrides: Dict[str, Any] = field(default_factory=dict)
424
501
 
425
502
  # pyre-ignore
@@ -69,9 +69,7 @@ def _defer_load_ep(ep: EntryPoint) -> object:
69
69
  return run
70
70
 
71
71
 
72
- def load_group(
73
- group: str, default: Optional[Dict[str, Any]] = None, skip_defaults: bool = False
74
- ):
72
+ def load_group(group: str, default: Optional[Dict[str, Any]] = None):
75
73
  """
76
74
  Loads all the entry points specified by ``group`` and returns
77
75
  the entry points as a map of ``name (str) -> deferred_load_fn``.
@@ -90,7 +88,6 @@ def load_group(
90
88
  1. ``load_group("foo")["bar"]("baz")`` -> equivalent to calling ``this.is.a_fn("baz")``
91
89
  1. ``load_group("food")`` -> ``None``
92
90
  1. ``load_group("food", default={"hello": this.is.c_fn})["hello"]("world")`` -> equivalent to calling ``this.is.c_fn("world")``
93
- 1. ``load_group("food", default={"hello": this.is.c_fn}, skip_defaults=True)`` -> ``None``
94
91
 
95
92
 
96
93
  If the entrypoint is a module (versus a function as shown above), then calling the ``deferred_load_fn``
@@ -115,8 +112,6 @@ def load_group(
115
112
  entrypoints = metadata.entry_points().get(group, ())
116
113
 
117
114
  if len(entrypoints) == 0:
118
- if skip_defaults:
119
- return None
120
115
  return default
121
116
 
122
117
  eps = {}
torchx/workspace/api.py CHANGED
@@ -26,7 +26,7 @@ from typing import (
26
26
  Union,
27
27
  )
28
28
 
29
- from torchx.specs import AppDef, CfgVal, Role, runopts
29
+ from torchx.specs import AppDef, CfgVal, Role, runopts, Workspace
30
30
 
31
31
  if TYPE_CHECKING:
32
32
  from fsspec import AbstractFileSystem
@@ -88,71 +88,6 @@ class WorkspaceBuilder(Generic[PackageType, WorkspaceConfigType]):
88
88
  pass
89
89
 
90
90
 
91
- @dataclass
92
- class Workspace:
93
- """
94
- Specifies a local "workspace" (a set of directories). Workspaces are ad-hoc built
95
- into an (usually ephemeral) image. This effectively mirrors the local code changes
96
- at job submission time.
97
-
98
- For example:
99
-
100
- 1. ``projects={"~/github/torch": "torch"}`` copies ``~/github/torch/**`` into ``$REMOTE_WORKSPACE_ROOT/torch/**``
101
- 2. ``projects={"~/github/torch": ""}`` copies ``~/github/torch/**`` into ``$REMOTE_WORKSPACE_ROOT/**``
102
-
103
- The exact location of ``$REMOTE_WORKSPACE_ROOT`` is implementation dependent and varies between
104
- different implementations of :py:class:`~torchx.workspace.api.WorkspaceMixin`.
105
- Check the scheduler documentation for details on which workspace it supports.
106
-
107
- Note: ``projects`` maps the location of the local project to a sub-directory in the remote workspace root directory.
108
- Typically the local project location is a directory path (e.g. ``/home/foo/github/torch``).
109
-
110
-
111
- Attributes:
112
- projects: mapping of local project to the sub-dir in the remote workspace dir.
113
- """
114
-
115
- projects: dict[str, str]
116
-
117
- def is_unmapped_single_project(self) -> bool:
118
- """
119
- Returns ``True`` if this workspace only has 1 project
120
- and its target mapping is an empty string.
121
- """
122
- return len(self.projects) == 1 and not next(iter(self.projects.values()))
123
-
124
- @staticmethod
125
- def from_str(workspace: str) -> "Workspace":
126
- import yaml
127
-
128
- projects = yaml.safe_load(workspace)
129
- if isinstance(projects, str): # single project workspace
130
- projects = {projects: ""}
131
- else: # multi-project workspace
132
- # Replace None mappings with "" (empty string)
133
- projects = {k: ("" if v is None else v) for k, v in projects.items()}
134
-
135
- return Workspace(projects)
136
-
137
- def __str__(self) -> str:
138
- """
139
- Returns a string representation of the Workspace by concatenating
140
- the project mappings using ';' as a delimiter and ':' between key and value.
141
- If the single-project workspace with no target mapping, then simply
142
- returns the src (local project dir)
143
-
144
- NOTE: meant to be used for logging purposes not serde.
145
- Therefore not symmetric with :py:func:`Workspace.from_str`.
146
-
147
- """
148
- if self.is_unmapped_single_project():
149
- return next(iter(self.projects))
150
- else:
151
- return ";".join(
152
- k if not v else f"{k}:{v}" for k, v in self.projects.items()
153
- )
154
-
155
-
156
91
  class WorkspaceMixin(abc.ABC, Generic[T]):
157
92
  """
158
93
  Note: (Prototype) this interface may change without notice!
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: torchx-nightly
3
- Version: 2025.10.5
3
+ Version: 2025.10.8
4
4
  Summary: TorchX SDK and Components
5
5
  Home-page: https://github.com/meta-pytorch/torchx
6
6
  Author: TorchX Devs
@@ -24,7 +24,7 @@ torchx/cli/colors.py,sha256=yLMes7e_UoLAfhxE0W6edhc58t83UHAlnCN2ANPeuXw,568
24
24
  torchx/cli/main.py,sha256=1Jf2cnO6Y2W69Adt88avmNPVrL6ZR4Hkff6GVB4293k,3484
25
25
  torchx/components/__init__.py,sha256=JaVte0j9Gqi6IrjZKudJ2Kr3gkdHsvlCdRTo-zYpSRo,11815
26
26
  torchx/components/component_test_base.py,sha256=22iNSdVa_qTW3SMM30Pw5UEWlK4DZVw0C03EqYiaLOI,4150
27
- torchx/components/dist.py,sha256=xmWf7nNuadcwPkbNHowd2bgiaPyZ9QDHv_5tSov11N0,14593
27
+ torchx/components/dist.py,sha256=6DNPEvHVqEifmM8g1L7HVY169cQv_7tSfSlh3o6lTp4,14930
28
28
  torchx/components/interpret.py,sha256=g8gkKdDJvsBfX1ZrpVT7n2bMEtmwRV_1AqDyAnnQ_aA,697
29
29
  torchx/components/metrics.py,sha256=1gbp8BfzZWGa7PD1db5vRADlONzmae4qSBUUdCWayr0,2814
30
30
  torchx/components/serve.py,sha256=uxIC5gU2ecg0EJIPX_oEPzNNOXRAre4j2eXusrgwGAI,2156
@@ -48,7 +48,7 @@ torchx/examples/apps/lightning/profiler.py,sha256=SSSihnwjeUTkBoz0E3qn1b-wbkfUIo
48
48
  torchx/examples/apps/lightning/train.py,sha256=0wvvshGHvZowePB4LfclXwn40X7i9euM0ReETWBcPSo,6253
49
49
  torchx/pipelines/__init__.py,sha256=2MbRVk5xwRjg-d2qPemeXpEhDsocMQumPQ53lsesZAI,606
50
50
  torchx/runner/__init__.py,sha256=x8Sz7s_tLxPgJgvWIhK4ju9BNZU61uBFywGwDY6CqJs,315
51
- torchx/runner/api.py,sha256=0kDyOgmAcq0X-bTWiBIqX3BJOCYSa5-TZ7o2Hrqxzdw,30053
51
+ torchx/runner/api.py,sha256=KydGtgeSwJwbqcDnI5fZ0MCuijDlzfeoSCRiDef8RCE,31294
52
52
  torchx/runner/config.py,sha256=SaKOB50d79WaMFPWK8CC4as6UaNFaRGhrBkfajq3KC4,18311
53
53
  torchx/runner/events/__init__.py,sha256=cMiNjnr4eUNQ2Nxxtu4nsvN5lu56b-a6nJ-ct3i7DQk,5536
54
54
  torchx/runner/events/api.py,sha256=bvxKBAYK8LzbrBNaNLgL1x0aivtfANmWo1EMGOrSR8k,2668
@@ -56,7 +56,7 @@ torchx/runner/events/handlers.py,sha256=ThHCIJW21BfBgB7b6ftyjASJmD1KdizpjuTtsyqn
56
56
  torchx/runtime/__init__.py,sha256=Wxje2BryzeQneFu5r6P9JJiEKG-_C9W1CcZ_JNrKT6g,593
57
57
  torchx/runtime/tracking/__init__.py,sha256=dYnAPnrXYREfPXkpHhdOFkcYIODWEbA13PdD-wLQYBo,3055
58
58
  torchx/runtime/tracking/api.py,sha256=SmUQyUKZqG3KlAhT7CJOGqRz1O274E4m63wQeOVq3CU,5472
59
- torchx/schedulers/__init__.py,sha256=_Wx6-X3FNh8RJR82UGgUwKg7V_VQYsAkrveDoSSk2xU,2195
59
+ torchx/schedulers/__init__.py,sha256=FQN9boQM4mwOD3sK9LZ3GBgw-gJ7Vx4MFj6z6ATQIrc,2211
60
60
  torchx/schedulers/api.py,sha256=lfxNhrEO6eYYqVuQzzj9sTXrZShuZkyYxJ1jPE-Lvpo,14561
61
61
  torchx/schedulers/aws_batch_scheduler.py,sha256=-HpjNVhSFBDxZo3cebK-3YEguB49dxoaud2gz30cAVM,29437
62
62
  torchx/schedulers/aws_sagemaker_scheduler.py,sha256=flN8GumKE2Dz4X_foAt6Jnvt-ZVojWs6pcyrHwB0hz0,20921
@@ -69,8 +69,8 @@ torchx/schedulers/local_scheduler.py,sha256=ttnxFDy48_DSYDEW-no27OirFZOyfrjwJ2S1
69
69
  torchx/schedulers/lsf_scheduler.py,sha256=YS6Yel8tXJqLPxbcGz95lZG2nCi36AQXdNDyuBJePKg,17661
70
70
  torchx/schedulers/slurm_scheduler.py,sha256=vypGaCZe61bkyNkqRlK4Iwmk_NaAUQi-DsspaWd6BZw,31873
71
71
  torchx/schedulers/streams.py,sha256=8_SLezgnWgfv_zXUsJCUM34-h2dtv25NmZuxEwkzmxw,2007
72
- torchx/specs/__init__.py,sha256=RNjj4cV64AXP-2XowHLJJpzub1zYuyS17-2SU-dCcN0,6632
73
- torchx/specs/api.py,sha256=ZJEqBnEFG2jMMfQuIrBFHiX-Thr_wz2mAMiYeGf-fWo,42311
72
+ torchx/specs/__init__.py,sha256=XpyR3PPcv5IwZg5iX18KDoRhDFqUoQm7o4ANo6lOo78,6683
73
+ torchx/specs/api.py,sha256=Rm0G5wR4j2OnHj-IrU9wLw-AwnNtvkElTNUDvy2brcE,45413
74
74
  torchx/specs/builders.py,sha256=Ye3of4MupJ-da8vLaX6_-nzGo_FRw1BFpYsX6dAZCNk,13730
75
75
  torchx/specs/file_linter.py,sha256=z0c4mKJv47BWiPaWCdUM0A8kHwnj4b1s7oTmESuD9Tc,14407
76
76
  torchx/specs/finder.py,sha256=gWQNEFrLYqrZoI0gMMhQ70YAC4sxqS0ZFpoWAmcVi44,17438
@@ -90,7 +90,7 @@ torchx/tracker/backend/fsspec.py,sha256=528xKryBE27Rm_OHD7r2R6fmVAclknBtoy1s034N
90
90
  torchx/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
91
91
  torchx/util/cuda.py,sha256=-ZTa1WCLnY2WtSWAdWufLQqZSDCZfZsloBuiS84LIkU,1099
92
92
  torchx/util/datetime.py,sha256=hV6Sg0u5KTBe68yrmy_RGCC5su0i4Tb_mAYphWamiXI,405
93
- torchx/util/entrypoints.py,sha256=XgwCjQ5f1xchUVxABiPqODgd3--SrOtUTlgtMlAeKKc,3980
93
+ torchx/util/entrypoints.py,sha256=YUv7F-Vr4uuY4_82IBPdrz5vrch_qsx_dIr6e08kSD4,3800
94
94
  torchx/util/io.py,sha256=HNpWLcFUX0WTAP3CsdamHz--FR5A4kSdLCPfNqa2UkA,1807
95
95
  torchx/util/log_tee_helpers.py,sha256=wPyozmh9BOt_2d3Gxa0iNogwnjzwFitIIMBJOJ1arIw,6330
96
96
  torchx/util/modules.py,sha256=o4y_d07gTpJ4nIVBcoUVJ0JtXIHEsEC5kbgBM6NGpgA,2135
@@ -99,12 +99,12 @@ torchx/util/shlex.py,sha256=eXEKu8KC3zIcd8tEy9_s8Ds5oma8BORr-0VGWNpG2dk,463
99
99
  torchx/util/strings.py,sha256=7Ef1loz2IYMrzeJ6Lewywi5cBIc3X3g7lSPbT1Tn_z4,664
100
100
  torchx/util/types.py,sha256=E9dxAWQnsJkIDuHtg-poeOJ4etucSI_xP_Z5kNJX8uI,9229
101
101
  torchx/workspace/__init__.py,sha256=cZsKVvUWwDYcGhe6SCXQGBQfbk_yTnKEImOkI6xmu30,809
102
- torchx/workspace/api.py,sha256=MGBQauBoH7wZdvXHXOx7JqefCF41rK0AHWF68IUwr4k,11276
102
+ torchx/workspace/api.py,sha256=h2SaC-pYPBLuo3XtkXJ0APMoro-C-ry7KucI7r3EUf4,8753
103
103
  torchx/workspace/dir_workspace.py,sha256=npNW_IjUZm_yS5r-8hrRkH46ndDd9a_eApT64m1S1T4,2268
104
104
  torchx/workspace/docker_workspace.py,sha256=PFu2KQNVC-0p2aKJ-W_BKA9ZOmXdCY2ABEkCExp3udQ,10269
105
- torchx_nightly-2025.10.5.dist-info/LICENSE,sha256=WVHfXhFC0Ia8LTKt_nJVYobdqTJVg_4J3Crrfm2A8KQ,1721
106
- torchx_nightly-2025.10.5.dist-info/METADATA,sha256=RS0F_3CwozkGMCoK12JV22O1vzLGptlzkoDYiXEU3uE,5068
107
- torchx_nightly-2025.10.5.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
108
- torchx_nightly-2025.10.5.dist-info/entry_points.txt,sha256=T328AMXeKI3JZnnxfkEew2ZcMN1oQDtkXjMz7lkV-P4,169
109
- torchx_nightly-2025.10.5.dist-info/top_level.txt,sha256=pxew3bc2gsiViS0zADs0jb6kC5v8o_Yy_85fhHj_J1A,7
110
- torchx_nightly-2025.10.5.dist-info/RECORD,,
105
+ torchx_nightly-2025.10.8.dist-info/LICENSE,sha256=WVHfXhFC0Ia8LTKt_nJVYobdqTJVg_4J3Crrfm2A8KQ,1721
106
+ torchx_nightly-2025.10.8.dist-info/METADATA,sha256=IK3YoeN_3TfpZ6MgFJknUbYqDV6oSaMO-PkXIehOEOE,5068
107
+ torchx_nightly-2025.10.8.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
108
+ torchx_nightly-2025.10.8.dist-info/entry_points.txt,sha256=T328AMXeKI3JZnnxfkEew2ZcMN1oQDtkXjMz7lkV-P4,169
109
+ torchx_nightly-2025.10.8.dist-info/top_level.txt,sha256=pxew3bc2gsiViS0zADs0jb6kC5v8o_Yy_85fhHj_J1A,7
110
+ torchx_nightly-2025.10.8.dist-info/RECORD,,