scientiflow-cli 0.4.6__tar.gz → 0.4.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/PKG-INFO +1 -1
  2. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/pyproject.toml +4 -1
  3. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/main.py +9 -2
  4. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/pipeline/decode_and_execute.py +84 -54
  5. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/services/executor.py +9 -46
  6. scientiflow_cli-0.4.8/scientiflow_cli/services/modes.py +45 -0
  7. scientiflow_cli-0.4.8/scientiflow_cli/services/request_handler.py +44 -0
  8. scientiflow_cli-0.4.8/scientiflow_cli/utils/config.py +9 -0
  9. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/utils/file_manager.py +5 -2
  10. scientiflow_cli-0.4.6/scientiflow_cli/config.py +0 -9
  11. scientiflow_cli-0.4.6/scientiflow_cli/services/request_handler.py +0 -31
  12. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/LICENSE.md +0 -0
  13. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/README.md +0 -0
  14. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/__init__.py +0 -0
  15. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/__main__.py +0 -0
  16. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/cli/__init__.py +0 -0
  17. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/cli/auth_utils.py +0 -0
  18. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/cli/login.py +0 -0
  19. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/cli/logout.py +0 -0
  20. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/pipeline/__init__.py +0 -0
  21. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/pipeline/container_manager.py +0 -0
  22. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/pipeline/get_jobs.py +0 -0
  23. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/services/__init__.py +0 -0
  24. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/services/auth_service.py +0 -0
  25. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/services/base_directory.py +0 -0
  26. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/services/rich_printer.py +0 -0
  27. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/services/status_updater.py +0 -0
  28. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/utils/__init__.py +0 -0
  29. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/utils/encryption.py +0 -0
  30. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/utils/logger.py +0 -0
  31. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/utils/mock.py +0 -0
  32. {scientiflow_cli-0.4.6 → scientiflow_cli-0.4.8}/scientiflow_cli/utils/singularity.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: scientiflow-cli
3
- Version: 0.4.6
3
+ Version: 0.4.8
4
4
  Summary: CLI tool for scientiflow. This application runs on the client side, decodes pipelines, and executes them in the configured order!
5
5
  License: Proprietary
6
6
  Author: ScientiFlow
@@ -1,6 +1,9 @@
1
+ [tool.scientiflow]
2
+ mode = "prod"
3
+
1
4
  [tool.poetry]
2
5
  name = "scientiflow-cli"
3
- version = "0.4.6"
6
+ version = "0.4.8"
4
7
  description = "CLI tool for scientiflow. This application runs on the client side, decodes pipelines, and executes them in the configured order!"
5
8
  authors = ["ScientiFlow <scientiflow@gmail.com>"]
6
9
  license = "Proprietary"
@@ -7,10 +7,11 @@ from importlib.metadata import PackageNotFoundError
7
7
  from scientiflow_cli.cli.login import login_user
8
8
  from scientiflow_cli.cli.logout import logout_user
9
9
  from scientiflow_cli.pipeline.get_jobs import get_jobs
10
- from scientiflow_cli.services.executor import execute_jobs, execute_jobs_sync, execute_job_id
10
+ from scientiflow_cli.services.executor import execute_jobs
11
11
  from scientiflow_cli.services.base_directory import set_base_directory, get_base_directory
12
12
  from scientiflow_cli.utils.singularity import install_singularity_main as install_singularity
13
13
  from scientiflow_cli.pipeline.container_manager import manage_containers
14
+ from scientiflow_cli.services.modes import set_mode
14
15
  from scientiflow_cli.services.rich_printer import RichPrinter
15
16
  from art import text2art
16
17
 
@@ -82,6 +83,8 @@ def main():
82
83
  parser.add_argument('--execute-jobs', nargs='*', type=int, help="Execute jobs. Specify job IDs as arguments \n(e.g., --execute-jobs jobID1 jobID2 ...) or leave empty to execute all jobs.\nUse -p or --parallel flag to execute jobs in parallel.")
83
84
  parser.add_argument('--hostname', type=str, help=argparse.SUPPRESS) # Hide --hostname from --help
84
85
  parser.add_argument('--token', type=str, help=argparse.SUPPRESS) # Hide --token from --help
86
+ parser.add_argument('--set-mode', type=str, choices=["dev", "prod"], help=argparse.SUPPRESS) # Hide --set-mode from --help
87
+ parser.add_argument('--cloud-job', action='store_true', help=argparse.SUPPRESS) # Hide --cloud-job from --help
85
88
 
86
89
  # Parse arguments
87
90
  args, unknown_args = parser.parse_known_args()
@@ -120,7 +123,7 @@ def main():
120
123
  elif args.execute_jobs is not None:
121
124
  # Use the parsed `parallel` flag
122
125
  job_ids = args.execute_jobs if args.execute_jobs else None
123
- execute_jobs(job_ids=job_ids, parallel=args.parallel)
126
+ execute_jobs(job_ids=job_ids, parallel=args.parallel, is_cloud=args.cloud_job)
124
127
  elif args.install_singularity:
125
128
  install_singularity(enable_gpu=args.enable_gpu)
126
129
  elif args.manage_containers:
@@ -128,6 +131,10 @@ def main():
128
131
  if not base_dir:
129
132
  sys.exit(2)
130
133
  manage_containers(base_dir=base_dir)
134
+ elif args.set_mode:
135
+ logout_user()
136
+ set_mode(args.set_mode)
137
+ sys.exit(0)
131
138
  else:
132
139
  display_title()
133
140
  printer.print_message("No arguments specified. Use --help to see available options", style="bold red")
@@ -12,6 +12,85 @@ from scientiflow_cli.services.rich_printer import RichPrinter
12
12
 
13
13
  printer = RichPrinter()
14
14
 
15
+ # Global background job tracker
16
+ class GlobalBackgroundJobTracker:
17
+ _instance = None
18
+ _lock = threading.Lock()
19
+
20
+ def __new__(cls):
21
+ if cls._instance is None:
22
+ with cls._lock:
23
+ if cls._instance is None:
24
+ cls._instance = super().__new__(cls)
25
+ cls._instance._initialize()
26
+ return cls._instance
27
+
28
+ def _initialize(self):
29
+ self.background_executors = []
30
+ self.background_jobs_count = 0
31
+ self.background_jobs_completed = 0
32
+ self.background_jobs_lock = threading.Lock()
33
+
34
+ def register_background_job(self, executor, futures, node_label, log_file_path):
35
+ """Register a background job for global tracking."""
36
+ with self.background_jobs_lock:
37
+ self.background_jobs_count += 1
38
+ self.background_executors.append(executor)
39
+
40
+ # Start monitoring in a separate thread
41
+ monitor_thread = threading.Thread(
42
+ target=self._monitor_job,
43
+ args=(futures, node_label, executor, log_file_path),
44
+ daemon=True
45
+ )
46
+ monitor_thread.start()
47
+
48
+ def _monitor_job(self, futures, node_label, executor, log_file_path):
49
+ """Monitor background job completion."""
50
+ all_successful = True
51
+ for future in as_completed(futures):
52
+ success = future.result()
53
+ if not success:
54
+ all_successful = False
55
+
56
+ if not all_successful:
57
+ with open(log_file_path, 'a') as f:
58
+ f.write(f"[ERROR] Background job {node_label} failed\n")
59
+ printer.print_message(f"[BACKGROUND JOB] {node_label} Failed - some commands in background job failed", style="bold red")
60
+ else:
61
+ printer.print_message(f"[BACKGROUND JOB] {node_label} Execution completed in the background", style="bold green")
62
+
63
+ # Clean up executor
64
+ executor.shutdown(wait=False)
65
+ with self.background_jobs_lock:
66
+ if executor in self.background_executors:
67
+ self.background_executors.remove(executor)
68
+ self.background_jobs_completed += 1
69
+
70
+ def wait_for_all_jobs(self):
71
+ """Wait for all background jobs to complete."""
72
+ import time
73
+ if self.background_jobs_count > 0:
74
+ printer.print_message(f"[INFO] Waiting for {self.background_jobs_count} background job(s) to complete...", style="bold yellow")
75
+
76
+ while True:
77
+ with self.background_jobs_lock:
78
+ if self.background_jobs_completed >= self.background_jobs_count:
79
+ break
80
+ time.sleep(0.5) # Check every 500ms
81
+
82
+ printer.print_message("[INFO] All background jobs completed.", style="bold green")
83
+
84
+ def reset(self):
85
+ """Reset the tracker for a new execution cycle."""
86
+ with self.background_jobs_lock:
87
+ self.background_executors = []
88
+ self.background_jobs_count = 0
89
+ self.background_jobs_completed = 0
90
+
91
+ # Global tracker instance
92
+ global_bg_tracker = GlobalBackgroundJobTracker()
93
+
15
94
  def execute_background_command_standalone(command: str, log_file_path: str):
16
95
  """Execute a command in background without real-time output display - standalone function for multiprocessing."""
17
96
  try:
@@ -51,10 +130,6 @@ class PipelineExecutor:
51
130
  self.current_node = None
52
131
  self.job_status = job_status
53
132
  self.current_node_from_config = current_node_from_config
54
- self.background_executors = [] # Keep track of background executors
55
- self.background_jobs_count = 0 # Track number of active background jobs
56
- self.background_jobs_completed = 0 # Track completed background jobs
57
- self.background_jobs_lock = threading.Lock() # Thread-safe counter updates
58
133
 
59
134
  # For resuming: flag to track if we've reached the resume point
60
135
  self.resume_mode = (job_status == "running" and current_node_from_config is not None)
@@ -165,47 +240,7 @@ class PipelineExecutor:
165
240
  self.update_terminal_output()
166
241
  raise SystemExit("[ERROR] Pipeline execution terminated due to an unexpected error.")
167
242
 
168
- def monitor_background_job(self, futures, node_label, executor):
169
- """Monitor background job completion in a separate thread."""
170
- def monitor():
171
- all_successful = True
172
- for future in as_completed(futures):
173
- success = future.result()
174
- if not success:
175
- all_successful = False
176
-
177
- if not all_successful:
178
- self.log_output(f"[ERROR] Background job {node_label} failed")
179
- printer.print_message(f"[BACKGROUND JOB] {node_label} Failed - some commands in background job failed", style="bold red")
180
- else:
181
- printer.print_message(f"[BACKGROUND JOB] {node_label} Execution completed in the background", style="bold green")
182
-
183
- # Clean up executor
184
- executor.shutdown(wait=False)
185
- if executor in self.background_executors:
186
- self.background_executors.remove(executor)
187
-
188
- # Update background job completion count
189
- with self.background_jobs_lock:
190
- self.background_jobs_completed += 1
191
-
192
- # Start monitoring thread
193
- monitor_thread = threading.Thread(target=monitor, daemon=True)
194
- monitor_thread.start()
195
243
 
196
- def wait_for_background_jobs(self):
197
- """Wait for all background jobs to complete."""
198
- import time
199
- if self.background_jobs_count > 0:
200
- printer.print_message(f"[INFO] Waiting for {self.background_jobs_count} background job(s) to complete...", style="bold yellow")
201
-
202
- while True:
203
- with self.background_jobs_lock:
204
- if self.background_jobs_completed >= self.background_jobs_count:
205
- break
206
- time.sleep(0.5) # Check every 500ms
207
-
208
- printer.print_message("[INFO] All background jobs completed.", style="bold green")
209
244
 
210
245
  def dfs(self, node: str):
211
246
  """Perform Depth-First Search (DFS) for executing pipeline nodes."""
@@ -284,19 +319,14 @@ class PipelineExecutor:
284
319
 
285
320
  # Execute commands in background using ProcessPoolExecutor (non-blocking)
286
321
  if command_list:
287
- # Increment background jobs counter
288
- with self.background_jobs_lock:
289
- self.background_jobs_count += 1
290
-
291
322
  executor = ProcessPoolExecutor(max_workers=numberOfThreads)
292
- self.background_executors.append(executor) # Keep reference to prevent garbage collection
293
323
  futures = []
294
324
  for cmd in command_list:
295
325
  future = executor.submit(execute_background_command_standalone, cmd, self.log_file_path)
296
326
  futures.append(future)
297
327
 
298
- # Start monitoring in a separate thread (non-blocking)
299
- self.monitor_background_job(futures, node_label, executor)
328
+ # Register with global tracker (non-blocking)
329
+ global_bg_tracker.register_background_job(executor, futures, node_label, self.log_file_path)
300
330
 
301
331
  # Don't wait for completion, immediately continue to next node
302
332
  else:
@@ -344,9 +374,9 @@ class PipelineExecutor:
344
374
  if starting_node:
345
375
  self.dfs(starting_node)
346
376
 
347
- # Wait for all background jobs to complete before marking pipeline as completed
348
- self.wait_for_background_jobs()
349
-
377
+ # Don't wait for background jobs here - let them continue across multiple jobs
378
+ # Background jobs will be waited for at the end of all job executions
379
+
350
380
  update_job_status(self.project_job_id, "completed")
351
381
  update_stopped_at_node(self.project_id, self.project_job_id, self.current_node)
352
382
 
@@ -1,7 +1,7 @@
1
1
  from concurrent.futures import ThreadPoolExecutor
2
2
  import asyncio
3
3
  from scientiflow_cli.pipeline.get_jobs import get_jobs
4
- from scientiflow_cli.pipeline.decode_and_execute import decode_and_execute_pipeline
4
+ from scientiflow_cli.pipeline.decode_and_execute import decode_and_execute_pipeline, global_bg_tracker
5
5
  from scientiflow_cli.pipeline.container_manager import get_job_containers
6
6
  from scientiflow_cli.utils.file_manager import create_job_dirs, get_job_files
7
7
  from scientiflow_cli.services.rich_printer import RichPrinter
@@ -21,7 +21,7 @@ def get_all_pending_jobs() -> list[dict]:
21
21
  return []
22
22
 
23
23
 
24
- def execute_jobs(job_ids: list[int] = None, parallel: bool = False) -> None:
24
+ def execute_jobs(job_ids: list[int] = None, parallel: bool = False, is_cloud: bool = False) -> None:
25
25
  """
26
26
  Execute jobs based on the provided job IDs. If no job IDs are provided, execute all pending jobs.
27
27
  If `parallel` is True, execute jobs asynchronously.
@@ -50,26 +50,8 @@ def execute_jobs(job_ids: list[int] = None, parallel: bool = False) -> None:
50
50
  else:
51
51
  # Execute jobs synchronously
52
52
  for job in jobs_to_execute:
53
- execute_single_job(job)
54
-
55
-
56
- def execute_jobs_sync(job_ids: list[int] = None) -> None:
57
- """
58
- Execute all jobs synchronously and in order
59
- """
60
-
61
- all_pending_jobs: list[dict] = []
62
- all_pending_jobs = get_all_pending_jobs()
63
- all_pending_jobs = sort_jobs_by_id(all_pending_jobs)
64
-
65
- job_dict: dict[int, dict] = store_jobs_in_dict(all_pending_jobs)
66
-
67
- for job_id in job_ids:
68
- if job_id not in job_dict:
69
- printer.print_error(f"No job found with ID: {job_id}")
70
- continue
71
- execute_single_job(job_dict[job_id])
72
-
53
+ execute_single_job(job, is_cloud)
54
+
73
55
 
74
56
  def sort_jobs_by_id(all_pending_jobs: list[dict]) -> list[dict]:
75
57
  """
@@ -97,28 +79,6 @@ def store_jobs_in_dict(all_pending_jobs: list[dict]) -> dict:
97
79
 
98
80
  return job_dict
99
81
 
100
-
101
- def execute_job_id(job_id: int) -> None:
102
- """
103
- Execute job with the given job_id
104
- """
105
-
106
- # Retrieve all jobs using 'get_jobs'
107
- all_pending_jobs: list[dict] = []
108
-
109
- all_pending_jobs = get_all_pending_jobs()
110
-
111
- # Store jobs in order of their job_id
112
- job_dict: dict[int, dict] = store_jobs_in_dict(all_pending_jobs)
113
-
114
- if job_id not in job_dict:
115
- printer.print_error(f"No job found with ID: {job_id}")
116
- return
117
-
118
- execute_single_job(job_dict[job_id])
119
-
120
-
121
-
122
82
  async def execute_async(jobs: list[dict]) -> None:
123
83
  """Execute jobs asynchronously."""
124
84
  running_jobs = []
@@ -150,9 +110,12 @@ async def execute_async(jobs: list[dict]) -> None:
150
110
 
151
111
  await asyncio.gather(*running_jobs) # Wait for all jobs to complete
152
112
  printer.print_success("[ASYNC COMPLETE] All jobs finished!")
113
+
114
+ # Wait for all background jobs from all executed jobs to complete
115
+ global_bg_tracker.wait_for_all_jobs()
153
116
 
154
117
 
155
- def execute_single_job(job: dict) -> None:
118
+ def execute_single_job(job: dict, is_cloud: bool = False) -> None:
156
119
  """Function to decode and execute a job."""
157
120
  try:
158
121
  # Validate the job dictionary
@@ -189,7 +152,7 @@ def execute_single_job(job: dict) -> None:
189
152
  if job_status != "running":
190
153
  if job["new_job"] == 1:
191
154
  # Initialize folders for the project / project_job
192
- create_job_dirs(job)
155
+ create_job_dirs(job, is_cloud)
193
156
 
194
157
  # Fetch the files and folder from the backend
195
158
  get_job_files(job)
@@ -0,0 +1,45 @@
1
+ from pathlib import Path
2
+ from scientiflow_cli.services.rich_printer import RichPrinter
3
+ import os
4
+
5
+ printer = RichPrinter()
6
+
7
+ # Use ~/.scientiflow/mode for mode configuration (plain text file)
8
+ MODE_FILE = os.path.expanduser("~/.scientiflow/mode")
9
+ MODE_DIR = os.path.dirname(MODE_FILE)
10
+
11
+ def set_mode(mode: str):
12
+ mode = mode.lower()
13
+ if mode not in {"dev", "prod"}:
14
+ raise ValueError("Mode must be either 'dev' or 'prod'")
15
+
16
+ # Ensure directory exists
17
+ os.makedirs(MODE_DIR, exist_ok=True)
18
+
19
+ # Write mode to plain text file
20
+ with open(MODE_FILE, "w") as file:
21
+ file.write(mode)
22
+
23
+ printer.print_message(
24
+ f"ScientiFlow mode set to '{mode}'",
25
+ style="bold green"
26
+ )
27
+
28
+
29
+ def get_mode() -> str:
30
+ # Create mode file with default "prod" if it doesn't exist
31
+ if not os.path.exists(MODE_FILE):
32
+ os.makedirs(MODE_DIR, exist_ok=True)
33
+ with open(MODE_FILE, "w") as file:
34
+ file.write("prod")
35
+ return "prod"
36
+
37
+ # Read mode from file
38
+ try:
39
+ with open(MODE_FILE, "r") as file:
40
+ mode = file.read().strip().lower()
41
+ if mode in {"dev", "prod"}:
42
+ return mode
43
+ return "prod"
44
+ except Exception:
45
+ return "prod"
@@ -0,0 +1,44 @@
1
+ import requests
2
+ from scientiflow_cli.utils.config import get_app_base_url
3
+ from scientiflow_cli.cli.auth_utils import getAuthToken
4
+
5
+
6
+ def handle_response(response, error_message):
7
+ return response
8
+
9
+ def make_auth_request(endpoint, method, data=None, params=None, error_message=None):
10
+ headers = {'Authorization': f'Bearer {getAuthToken()}'}
11
+ base_url = get_app_base_url()
12
+
13
+ try:
14
+ if method == 'GET':
15
+ response = requests.get(base_url + endpoint, headers=headers, params=params)
16
+ elif method == 'POST':
17
+ response = requests.post(base_url + endpoint, json=data, headers=headers)
18
+ else:
19
+ raise ValueError("Unsupported HTTP method")
20
+
21
+ return handle_response(response, error_message)
22
+
23
+ except requests.RequestException as e:
24
+ print(e)
25
+ return "Request failed"
26
+
27
+
28
+ def make_no_auth_request(endpoint, method, data=None, error_message=None):
29
+ base_url = get_app_base_url()
30
+
31
+ try:
32
+ if method == 'GET':
33
+ response = requests.get(base_url + endpoint)
34
+ elif method == 'POST':
35
+ response = requests.post(base_url + endpoint, json=data)
36
+ print(response)
37
+ else:
38
+ raise ValueError("Unsupported HTTP method")
39
+
40
+ return handle_response(response, error_message)
41
+
42
+ except requests.RequestException as e:
43
+ print(e)
44
+ return "Request failed"
@@ -0,0 +1,9 @@
1
+ from scientiflow_cli.services.modes import get_mode
2
+
3
+
4
+ def get_app_base_url() -> str:
5
+ mode = get_mode()
6
+ if mode == "dev":
7
+ return "https://www.scientiflow-backend-dev.scientiflow.com/api"
8
+
9
+ return "https://www.backend.scientiflow.com/api"
@@ -37,9 +37,12 @@ def get_job_files(job: dict) -> None:
37
37
 
38
38
  printer.print_success(f"[+] Files extracted to {str(project_dir_name).strip()}")
39
39
 
40
- def create_job_dirs(job: dict) -> None:
40
+ def create_job_dirs(job: dict, is_cloud: bool = False) -> None:
41
41
  base_dir = Path(job['server']['base_directory'])
42
- project_dir = base_dir / job['project']['project_title']
42
+ if is_cloud:
43
+ project_dir = base_dir / (job['project']['id']+"_"+job['project']['project_title'])
44
+ else:
45
+ project_dir = base_dir / job['project']['project_title']
43
46
  job_dir = project_dir / job['project_job']['job_directory']
44
47
  job_dir.mkdir(parents=True, exist_ok=True)
45
48
 
@@ -1,9 +0,0 @@
1
- import os
2
-
3
- class Config:
4
- mode="prod"
5
- if mode=="prod":
6
- APP_BASE_URL = os.getenv("APP_BASE_URL", "https://www.backend.scientiflow.com/api")
7
- elif mode=="dev":
8
- APP_BASE_URL = "http://127.0.0.1:8000/api"
9
- # APP_BASE_URL = os.getenv("APP_BASE_URL", "https://www.scientiflow-backend-dev.scientiflow.com/api")
@@ -1,31 +0,0 @@
1
- import requests
2
- from scientiflow_cli.config import Config
3
- from scientiflow_cli.cli.auth_utils import getAuthToken
4
-
5
- app_base_url = Config.APP_BASE_URL
6
-
7
-
8
- def handle_response(response, error_message):
9
- return response
10
-
11
- def make_auth_request(endpoint,method,data=None,params=None,error_message=None):
12
- headers = {'Authorization': f'Bearer {getAuthToken()}'}
13
- try:
14
- if method == 'GET':
15
- response = requests.get(app_base_url+endpoint,headers=headers,params=params)
16
- elif method == 'POST':
17
- response = requests.post(app_base_url+endpoint, json=data,headers=headers)
18
- return handle_response(response, error_message)
19
- except requests.RequestException as e:
20
- return "Unsupported HTTP method"
21
-
22
- def make_no_auth_request(endpoint,method,data=None,error_message=None):
23
- try:
24
- if method == 'GET':
25
- response = requests.get(app_base_url+endpoint)
26
- elif method == 'POST':
27
- response = requests.post(app_base_url+endpoint, json=data)
28
- return handle_response(response, error_message)
29
- except requests.RequestException as e:
30
- return "Unsupported HTTP method"
31
-