pipen-cli-gbatch 0.0.6__tar.gz → 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pipen-cli-gbatch might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pipen-cli-gbatch
3
- Version: 0.0.6
3
+ Version: 0.1.0
4
4
  Summary: A pipen cli plugin to run command via Google Cloud Batch
5
5
  License: MIT
6
6
  Author: pwwang
@@ -79,7 +79,7 @@ from pipen.cli import CLIPlugin
79
79
  from pipen.scheduler import GbatchScheduler
80
80
  from pipen_poplog import LogsPopulator
81
81
 
82
- __version__ = "0.0.6"
82
+ __version__ = "0.1.0"
83
83
  __all__ = ("CliGbatchPlugin", "CliGbatchDaemon")
84
84
  MOUNTED_CWD = "/mnt/disks/.cwd"
85
85
 
@@ -135,6 +135,7 @@ class CliGbatchDaemon:
135
135
  for key, val in (item.split("=", 1) for item in self.config.labels)
136
136
  }
137
137
  self.command = command
138
+ self._command_workdir = None
138
139
 
139
140
  def _get_arg_from_command(self, arg: str) -> str | None:
140
141
  """Get the value of the given argument from the command line.
@@ -218,8 +219,10 @@ class CliGbatchDaemon:
218
219
  if from_mount_as_cwd:
219
220
  self.config.workdir = f"{self.mount_as_cwd}/.pipen/{command_name}"
220
221
 
221
- command_workdir = self._get_arg_from_command("workdir")
222
- workdir = self.config.get("workdir", None) or command_workdir
222
+ # self._command_workdir to save the original command workdir
223
+ self._command_workdir = workdir = (
224
+ self.config.get("workdir", None) or self._get_arg_from_command("workdir")
225
+ )
223
226
 
224
227
  if not workdir or not isinstance(AnyPath(workdir), GSPath):
225
228
  print(
@@ -247,8 +250,16 @@ class CliGbatchDaemon:
247
250
  command_outdir = self._get_arg_from_command("outdir")
248
251
 
249
252
  if command_outdir:
250
- self._add_mount(command_outdir, GbatchScheduler.MOUNTED_OUTDIR)
251
- self._replace_arg_in_command("outdir", GbatchScheduler.MOUNTED_OUTDIR)
253
+ coudir = AnyPath(command_outdir)
254
+ if (
255
+ not isinstance(coudir, GSPath)
256
+ and not coudir.is_absolute()
257
+ and self.mount_as_cwd
258
+ ):
259
+ self._replace_arg_in_command("outdir", f"{MOUNTED_CWD}/{coudir}")
260
+ else:
261
+ self._add_mount(command_outdir, GbatchScheduler.MOUNTED_OUTDIR)
262
+ self._replace_arg_in_command("outdir", GbatchScheduler.MOUNTED_OUTDIR)
252
263
  elif self.mount_as_cwd:
253
264
  command_name = self._get_arg_from_command("name") or self.config.name
254
265
  self._replace_arg_in_command(
@@ -304,7 +315,13 @@ class CliGbatchDaemon:
304
315
  and not self.config.view_logs
305
316
  and "logging" not in plugin.get_all_plugin_names()
306
317
  ):
307
- plugins.append(XquteCliGbatchPlugin())
318
+ if self.config.plain:
319
+ # use the stdout file from daemon
320
+ stdout_file = None
321
+ else:
322
+ stdout_file = AnyPath(f"{self._command_workdir}/run-latest.log")
323
+
324
+ plugins.append(XquteCliGbatchPlugin(stdout_file=stdout_file))
308
325
 
309
326
  return Xqute(
310
327
  "gbatch",
@@ -516,6 +533,12 @@ class CliGbatchDaemon:
516
533
  self._handle_outdir()
517
534
  self._infer_jobname_prefix()
518
535
  else:
536
+ if "name" not in self.config or not self.config.name:
537
+ self.config["name"] = "PipenCliGbatchDaemon"
538
+
539
+ if not self.config.workdir and self.mount_as_cwd:
540
+ self.config.workdir = f"{self.mount_as_cwd}/.pipen"
541
+
519
542
  if not self.config.workdir or not isinstance(
520
543
  AnyPath(self.config.workdir),
521
544
  GSPath,
@@ -526,9 +549,6 @@ class CliGbatchDaemon:
526
549
  )
527
550
  sys.exit(1)
528
551
 
529
- if "name" not in self.config or not self.config.name:
530
- self.config["name"] = "PipenCliGbatchDaemon"
531
-
532
552
  async def run(self): # pragma: no cover
533
553
  """Execute the daemon pipeline based on configuration.
534
554
 
@@ -560,12 +580,15 @@ class XquteCliGbatchPlugin: # pragma: no cover
560
580
 
561
581
  Attributes:
562
582
  name (str): The plugin name.
563
- log_start (bool): Whether to start logging when job starts.
564
583
  stdout_populator (LogsPopulator): Handles stdout log population.
565
584
  stderr_populator (LogsPopulator): Handles stderr log population.
566
585
  """
567
586
 
568
- def __init__(self, name: str = "logging", log_start: bool = True):
587
+ def __init__(
588
+ self,
589
+ name: str = "logging",
590
+ stdout_file: str | Path | GSPath | None = None,
591
+ ):
569
592
  """Initialize the logging plugin.
570
593
 
571
594
  Args:
@@ -573,7 +596,7 @@ class XquteCliGbatchPlugin: # pragma: no cover
573
596
  log_start: Whether to start logging when job starts.
574
597
  """
575
598
  self.name = name
576
- self.log_start = log_start
599
+ self.stdout_file = stdout_file
577
600
  self.stdout_populator = LogsPopulator()
578
601
  self.stderr_populator = LogsPopulator()
579
602
 
@@ -586,17 +609,6 @@ class XquteCliGbatchPlugin: # pragma: no cover
586
609
  logger.error(f"/STDERR {self.stderr_populator.residue}")
587
610
  self.stderr_populator.residue = ""
588
611
 
589
- @plugin.impl
590
- async def on_job_init(self, scheduler, job):
591
- """Handle job initialization event.
592
-
593
- Args:
594
- scheduler: The scheduler instance.
595
- job: The job being initialized.
596
- """
597
- self.stdout_populator.logfile = scheduler.workdir.joinpath("0", "job.stdout")
598
- self.stderr_populator.logfile = scheduler.workdir.joinpath("0", "job.stderr")
599
-
600
612
  @plugin.impl
601
613
  async def on_job_started(self, scheduler, job):
602
614
  """Handle job start event by setting up log file paths.
@@ -605,10 +617,31 @@ class XquteCliGbatchPlugin: # pragma: no cover
605
617
  scheduler: The scheduler instance.
606
618
  job: The job that started.
607
619
  """
608
- if not self.log_start:
609
- return
610
-
611
620
  logger.info("Job is picked up by Google Batch, pulling stdout/stderr...")
621
+ if not self.stdout_file:
622
+ self.stdout_populator.logfile = scheduler.workdir.joinpath(
623
+ "0", "job.stdout"
624
+ )
625
+ elif not self.stdout_file.exists():
626
+ await asyncio.sleep(3) # wait a bit for the file to be created
627
+ if not self.stdout_file.exists():
628
+ logger.warning(f"Running logs not found: {self.stdout_file}")
629
+ logger.warning(
630
+ "Make sure pipen-log2file plugin is enabled for your pipeline."
631
+ )
632
+ logger.warning("Falling back to pull logs from daemon...")
633
+ self.stdout_populator.logfile = scheduler.workdir.joinpath(
634
+ "0", "job.stdout"
635
+ )
636
+ else:
637
+ self.stdout_populator.logfile = (
638
+ self.stdout_file.resolve()
639
+ if self.stdout_file.is_symlink()
640
+ else self.stdout_file
641
+ )
642
+ else:
643
+ self.stdout_populator.logfile = self.stdout_file
644
+ self.stderr_populator.logfile = scheduler.workdir.joinpath("0", "job.stderr")
612
645
 
613
646
  @plugin.impl
614
647
  async def on_job_polling(self, scheduler, job, counter):
@@ -809,11 +842,7 @@ class CliGbatchPlugin(CLIPlugin): # pragma: no cover
809
842
 
810
843
  # update parsed with the defaults
811
844
  for key, val in defaults.items():
812
- if (
813
- key == "mount"
814
- and val
815
- and getattr(known_parsed, key, None)
816
- ):
845
+ if key == "mount" and val and getattr(known_parsed, key, None):
817
846
  if not isinstance(val, (tuple, list)):
818
847
  val = [val]
819
848
  val = list(val)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "pipen-cli-gbatch"
3
- version = "0.0.6"
3
+ version = "0.1.0"
4
4
  description = "A pipen cli plugin to run command via Google Cloud Batch"
5
5
  authors = ["pwwang <pwwang@pwwang.com>"]
6
6
  license = "MIT"
@@ -17,7 +17,7 @@ entry_points = \
17
17
 
18
18
  setup_kwargs = {
19
19
  'name': 'pipen-cli-gbatch',
20
- 'version': '0.0.6',
20
+ 'version': '0.1.0',
21
21
  'description': 'A pipen cli plugin to run command via Google Cloud Batch',
22
22
  'long_description': '# pipen-cli-gbatch\n\nA pipen CLI plugin to run commands via Google Cloud Batch.\n\nThe idea is to submit the command using xqute and use the gbatch scheduler to run it on Google Cloud Batch.\n\n## Installation\n\n```bash\npip install pipen-cli-gbatch\n```\n\n## Usage\n\n### Basic Command Execution\n\nTo run a command like:\n\n```bash\npython myscript.py --input input.txt --output output.txt\n```\n\nYou can run it with:\n\n```bash\npipen gbatch -- python myscript.py --input input.txt --output output.txt\n```\n\n### With Configuration File\n\nIn order to provide configurations like we do for a normal pipen pipeline, you can also provide a config file (the `[pipen-cli-gbatch]` section will be used):\n\n```bash\npipen gbatch @config.toml -- \\\n python myscript.py --input input.txt --output output.txt\n```\n\n### Detached Mode\n\nWe can also use the `--nowait` option to run the command in a detached mode:\n\n```bash\npipen gbatch --nowait -- \\\n python myscript.py --input input.txt --output output.txt\n```\n\nOr by default, it will wait for the command to complete:\n\n```bash\npipen gbatch -- \\\n python myscript.py --input input.txt --output output.txt\n```\n\nWhile waiting, the running logs will be pulled and shown in the terminal.\n\n### View Logs\n\nWhen running in detached mode, one can also pull the logs later by:\n\n```bash\npipen gbatch --view-logs -- \\\n python myscript.py --input input.txt --output output.txt\n\n# or just provide the workdir\npipen gbatch --view-logs --workdir gs://my-bucket/workdir\n```\n\n## Configuration\n\nBecause the daemon pipeline is running on Google Cloud Batch, a Google Storage Bucket path is required for the workdir. For example: `gs://my-bucket/workdir`\n\nA unique job ID will be generated per the name (`--name`) and workdir, so that if the same command is run again with the same name and workdir, it will not start a new job, but just attach to the existing job and pull the logs.\n\nIf `--name` is not provided in the command line, it will try to grab the name (`--name`) from the command line arguments after `--`, or else use "name" from the root section of the configuration file, with a "GbatchDaemon" suffix. If nothing can be found, a default name "PipenGbatchDaemon" will be used.\n\nThen a workdir `{workdir}/<daemon pipeline name>/` will be created to store the meta information.\n\nWith `--profile` provided, the scheduler options (`scheduler_opts`) defined in `~/.pipen.toml` and `./.pipen.toml` will be used as default.\n\n## All Options\n\n```bash\n> pipen gbatch --help\nUsage: pipen gbatch [-h] [--nowait | --view-logs {all,stdout,stderr}] [--workdir WORKDIR]\n [--error-strategy {retry,halt}] [--num-retries NUM_RETRIES] [--prescript PRESCRIPT]\n [--postscript POSTSCRIPT] [--jobname-prefix JOBNAME_PREFIX] [--recheck-interval RECHECK_INTERVAL]\n [--cwd CWD] [--project PROJECT] [--location LOCATION] [--mount MOUNT]\n [--service-account SERVICE_ACCOUNT] [--network NETWORK] [--subnetwork SUBNETWORK]\n [--no-external-ip-address] [--machine-type MACHINE_TYPE] [--provisioning-model {STANDARD,SPOT}]\n [--image-uri IMAGE_URI] [--entrypoint ENTRYPOINT] [--commands COMMANDS] [--runnables RUNNABLES]\n [--allocationPolicy ALLOCATIONPOLICY] [--taskGroups TASKGROUPS] [--labels LABELS] [--gcloud GCLOUD]\n [--name NAME] [--profile PROFILE] [--version]\n [--loglevel {DEBUG,INFO,WARNING,ERROR,CRITICAL,debug,info,warning,error,critical}]\n ...\n\nSimplify running commands via Google Cloud Batch.\n\nKey Options:\n The key options to run the command.\n\n --workdir WORKDIR The workdir (a Google Storage Bucket path is required) to store the meta information of the\n daemon pipeline.\n If not provided, the one from the command will be used.\n command The command passed after `--` to run, with all its arguments. Note that the command should be\n provided after `--`.\n\nScheduler Options:\n The options to configure the gbatch scheduler.\n\n --error-strategy {retry,halt}\n The strategy when there is error happened [default: halt]\n --num-retries NUM_RETRIES\n The number of retries when there is error happened. Only valid when --error-strategy is \'retry\'.\n [default: 0]\n --prescript PRESCRIPT\n The prescript to run before the main command.\n --postscript POSTSCRIPT\n The postscript to run after the main command.\n --jobname-prefix JOBNAME_PREFIX\n The prefix of the name prefix of the daemon job.\n If not provided, try to generate one from the command to run.\n If the command is also not provided, use \'pipen-gbatch-daemon\' as the prefix.\n --recheck-interval RECHECK_INTERVAL\n The interval to recheck the job status, each takes about 0.1 seconds. [default: 600]\n --cwd CWD The working directory to run the command. If not provided, the current directory is used. You\n can pass either a mounted path (inside the VM) or a Google Storage Bucket path (gs://...). If a\n Google Storage Bucket path is provided, the mounted path will be inferred from the mounted paths\n of the VM.\n --project PROJECT The Google Cloud project to run the job.\n --location LOCATION The location to run the job.\n --mount MOUNT The list of mounts to mount to the VM, each in the format of SOURCE:TARGET, where SOURCE must be\n either a Google Storage Bucket path (gs://...). [default: []]\n --service-account SERVICE_ACCOUNT\n The service account to run the job.\n --network NETWORK The network to run the job.\n --subnetwork SUBNETWORK\n The subnetwork to run the job.\n --no-external-ip-address\n Whether to disable external IP address for the VM.\n --machine-type MACHINE_TYPE\n The machine type of the VM.\n --provisioning-model {STANDARD,SPOT}\n The provisioning model of the VM.\n --image-uri IMAGE_URI\n The custom image URI of the VM.\n --entrypoint ENTRYPOINT\n The entry point of the container to run the command.\n --commands COMMANDS The list of commands to run in the container, each as a separate string. [default: []]\n --runnables RUNNABLES\n The JSON string of extra settings of runnables add to the job.json.\n Refer to https://cloud.google.com/batch/docs/reference/rest/v1/projects.locations.jobs#Runnable\n for details.\n You can have an extra key \'order\' for each runnable, where negative values mean to run before\n the main command,\n and positive values mean to run after the main command.\n --allocationPolicy ALLOCATIONPOLICY\n The JSON string of extra settings of allocationPolicy add to the job.json. Refer to\n https://cloud.google.com/batch/docs/reference/rest/v1/projects.locations.jobs#AllocationPolicy\n for details. [default: {}]\n --taskGroups TASKGROUPS\n The JSON string of extra settings of taskGroups add to the job.json. Refer to\n https://cloud.google.com/batch/docs/reference/rest/v1/projects.locations.jobs#TaskGroup for\n details. [default: []]\n --labels LABELS The JSON string of labels to add to the job. Refer to\n https://cloud.google.com/batch/docs/reference/rest/v1/projects.locations.jobs#Job.FIELDS.labels\n for details. [default: {}]\n --gcloud GCLOUD The path to the gcloud command. [default: gcloud]\n\nOptions:\n -h, --help show this help message and exit\n --nowait Run the command in a detached mode without waiting for its completion. [default: False]\n --view-logs {all,stdout,stderr}\n View the logs of a job.\n --name NAME The name of the daemon pipeline.\n If not provided, try to generate one from the command to run.\n If the command is also not provided, use \'PipenCliGbatchDaemon\' as the name.\n --profile PROFILE Use the `scheduler_opts` as the Scheduler Options of a given profile from pipen configuration\n files,\n including ~/.pipen.toml and ./pipen.toml.\n Note that if not provided, nothing will be loaded from the configuration files.\n --version Show the version of the pipen-cli-gbatch package. [default: False]\n --loglevel {DEBUG,INFO,WARNING,ERROR,CRITICAL,debug,info,warning,error,critical}\n Set the logging level for the daemon process. [default: INFO]\n\nExamples:\n \u200b\n # Run a command and wait for it to complete\n > pipen gbatch --workdir gs://my-bucket/workdir -- \\\n python myscript.py --input input.txt --output output.txt\n\n # Use named mounts\n > pipen gbatch --workdir gs://my-bucket/workdir --mount INFILE=gs://bucket/path/to/file \\\n --mount OUTDIR=gs://bucket/path/to/outdir -- \\\n cat $INFILE > $OUTDIR/output.txt\n \u200b\n # Run a command in a detached mode\n > pipen gbatch --nowait --project $PROJECT --location $LOCATION \\\n --workdir gs://my-bucket/workdir -- \\\n python myscript.py --input input.txt --output output.txt\n \u200b\n # If you have a profile defined in ~/.pipen.toml or ./.pipen.toml\n > pipen gbatch --profile myprofile -- \\\n python myscript.py --input input.txt --output output.txt\n \u200b\n # View the logs of a previously run command\n > pipen gbatch --view-logs all --name my-daemon-name \\\n --workdir gs://my-bucket/workdir\n```\n\n## API\n\nThe API can also be used to run commands programmatically:\n\n```python\nimport asyncio\nfrom pipen_cli_gbatch import CliGbatchDaemon\n\npipe = CliGbatchDaemon(config_for_daemon, command)\nasyncio.run(pipe.run())\n```\n\nNote that the daemon pipeline will always be running without caching, so that the command will always be executed when the pipeline is run.\n',
23
23
  'author': 'pwwang',