cloudsim 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. cloudsim/.env.example +14 -0
  2. cloudsim/__init__.py +1 -0
  3. cloudsim/app.py +26 -0
  4. cloudsim/cli.py +10 -0
  5. cloudsim/cli_services/__init__.py +0 -0
  6. cloudsim/cli_services/common.py +74 -0
  7. cloudsim/cli_services/handlers/__init__.py +0 -0
  8. cloudsim/cli_services/handlers/autoscale.py +286 -0
  9. cloudsim/cli_services/handlers/config.py +174 -0
  10. cloudsim/cli_services/handlers/data.py +308 -0
  11. cloudsim/cli_services/handlers/general.py +29 -0
  12. cloudsim/cli_services/handlers/lb.py +66 -0
  13. cloudsim/cli_services/handlers/network.py +130 -0
  14. cloudsim/cli_services/handlers/vm.py +184 -0
  15. cloudsim/cli_services/main.py +10 -0
  16. cloudsim/cli_services/parser.py +210 -0
  17. cloudsim/cli_services/runtime.py +52 -0
  18. cloudsim/database.py +302 -0
  19. cloudsim/docs/README_BACKEND.md +137 -0
  20. cloudsim/docs/README_FRONTEND.md +31 -0
  21. cloudsim/docs/README_ROOT.md +68 -0
  22. cloudsim/docs/SIMULATION_GUIDE.md +164 -0
  23. cloudsim/docs/WALKTHROUGH.md +106 -0
  24. cloudsim/docs/course/module-0-setup.md +44 -0
  25. cloudsim/docs/course/module-1-compute.md +46 -0
  26. cloudsim/docs/course/module-2-database.md +44 -0
  27. cloudsim/docs/course/module-3-loadbalancer.md +50 -0
  28. cloudsim/docs/course/module-4-networking.md +54 -0
  29. cloudsim/paths.py +86 -0
  30. cloudsim/scripts/math_service.py +112 -0
  31. cloudsim/scripts/sample_app.py +53 -0
  32. cloudsim/services/__init__.py +0 -0
  33. cloudsim/services/autoscaler_service.py +51 -0
  34. cloudsim/services/cloud_armor_service.py +37 -0
  35. cloudsim/services/constants.py +56 -0
  36. cloudsim/services/dns_resolution_service.py +36 -0
  37. cloudsim/services/errors.py +8 -0
  38. cloudsim/services/load_balancer_service.py +59 -0
  39. cloudsim/services/mongo_service.py +47 -0
  40. cloudsim/services/postgres_service.py +240 -0
  41. cloudsim/services/real_postgres_service.py +182 -0
  42. cloudsim/services/request_queue_service.py +25 -0
  43. cloudsim/services/routing_service.py +13 -0
  44. cloudsim/services/script_runtime_service.py +19 -0
  45. cloudsim/services/sqlite_utils.py +26 -0
  46. cloudsim/services/time_utils.py +7 -0
  47. cloudsim/services/vm_sdk_source.py +4 -0
  48. cloudsim/services/vm_service.py +61 -0
  49. cloudsim/services/vm_sqlite_service.py +58 -0
  50. cloudsim/simulator.py +707 -0
  51. cloudsim/web/__init__.py +0 -0
  52. cloudsim/web/app_factory.py +43 -0
  53. cloudsim/web/deps.py +24 -0
  54. cloudsim/web/env_loader.py +28 -0
  55. cloudsim/web/error_handlers.py +23 -0
  56. cloudsim/web/middleware.py +129 -0
  57. cloudsim/web/routes_api.py +192 -0
  58. cloudsim-1.0.0.dist-info/METADATA +248 -0
  59. cloudsim-1.0.0.dist-info/RECORD +63 -0
  60. cloudsim-1.0.0.dist-info/WHEEL +5 -0
  61. cloudsim-1.0.0.dist-info/entry_points.txt +3 -0
  62. cloudsim-1.0.0.dist-info/licenses/LICENSE +21 -0
  63. cloudsim-1.0.0.dist-info/top_level.txt +1 -0
cloudsim/.env.example ADDED
@@ -0,0 +1,14 @@
1
+ # Example environment for CloudSim
2
+
3
+ # Where CloudSim writes runtime state (VM folders and SQLite state).
4
+ # Recommended: set an absolute path.
5
+ # Windows example: E:\cloudsim-workspace
6
+ # macOS/Linux example: /home/you/cloudsim-workspace
7
+ CLOUDSIM_WORKSPACE=cloudsim-workspace
8
+
9
+ # Optional: real PostgreSQL admin connection (for real Cloud SQL simulation)
10
+ CLOUDSIM_PG_HOST=127.0.0.1
11
+ CLOUDSIM_PG_PORT=5432
12
+ CLOUDSIM_PG_ADMIN_USER=postgres
13
+ CLOUDSIM_PG_ADMIN_PASSWORD=pg123
14
+ CLOUDSIM_PG_ADMIN_DB=postgres
cloudsim/__init__.py ADDED
@@ -0,0 +1 @@
1
+ """Nimbus Cloud Simulator backend package."""
cloudsim/app.py ADDED
@@ -0,0 +1,26 @@
1
+ import argparse
2
+ import sys
3
+ from cloudsim.web.app_factory import create_app
4
+
5
+ app = create_app()
6
+
7
+
8
+ def main(argv: list[str] | None = None) -> int:
9
+ if argv is None:
10
+ argv = sys.argv[1:]
11
+
12
+ parser = argparse.ArgumentParser(prog="cloudsim-server", description="Start the CloudSim API server.")
13
+ parser.add_argument("--host", default="127.0.0.1", help="The interface to bind to (default: 127.0.0.1)")
14
+ parser.add_argument("--port", type=int, default=5000, help="The port to bind to (default: 5000)")
15
+ parser.add_argument("--debug", action="store_true", help="Enable Flask debug mode")
16
+
17
+ args = parser.parse_args(argv)
18
+
19
+ print(f"[*] Starting CloudSim Server on {args.host}:{args.port} (debug={args.debug})")
20
+ app.run(host=args.host, port=args.port, debug=args.debug)
21
+ return 0
22
+
23
+
24
+ if __name__ == "__main__":
25
+ sys.exit(main())
26
+
cloudsim/cli.py ADDED
@@ -0,0 +1,10 @@
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+
5
+ from cloudsim.cli_services.main import main
6
+
7
+
8
+ if __name__ == "__main__":
9
+ raise SystemExit(main(sys.argv[1:]))
10
+
File without changes
@@ -0,0 +1,74 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import urllib.error
6
+ import urllib.request
7
+ from pathlib import Path
8
+ from typing import Any
9
+
10
+ from cloudsim import paths
11
+
12
+ DEFAULT_API_URL = os.environ.get("CLOUDSIM_API_URL", "http://127.0.0.1:5000").rstrip("/")
13
+
14
+
15
+ def get_repo_root() -> Path:
16
+ # Keep legacy name used across CLI handlers; this is the workspace root.
17
+ return paths.workspace_root()
18
+
19
+
20
+ class CliError(Exception):
21
+ def __init__(self, message: str, status_code: int = 1) -> None:
22
+ super().__init__(message)
23
+ self.message = message
24
+ self.status_code = status_code
25
+
26
+
27
+ def _parse_json(value: str, label: str) -> Any:
28
+ try:
29
+ return json.loads(value)
30
+ except json.JSONDecodeError as error:
31
+ raise CliError(f"Invalid JSON for {label}: {error.msg}") from error
32
+
33
+
34
+ def _request(base_url: str, method: str, path: str, payload: dict[str, Any] | None = None) -> dict[str, Any]:
35
+ url = f"{base_url}{path}"
36
+ headers = {"Content-Type": "application/json"}
37
+ body = None if payload is None else json.dumps(payload).encode("utf-8")
38
+ req = urllib.request.Request(url=url, method=method, data=body, headers=headers)
39
+ try:
40
+ with urllib.request.urlopen(req, timeout=15) as response:
41
+ raw = response.read().decode("utf-8")
42
+ data = json.loads(raw) if raw else {}
43
+ except urllib.error.HTTPError as error:
44
+ raw = error.read().decode("utf-8")
45
+ message = f"HTTP {error.code}"
46
+ if raw:
47
+ try:
48
+ data = json.loads(raw)
49
+ message = data.get("error") or data.get("message") or message
50
+ except json.JSONDecodeError:
51
+ message = f"{message}: {raw}"
52
+ raise CliError(message, status_code=error.code)
53
+ except urllib.error.URLError as error:
54
+ raise CliError(f"Unable to reach backend at {base_url}: {error}") from error
55
+ except json.JSONDecodeError as error:
56
+ raise CliError(f"Invalid JSON response from backend: {error}") from error
57
+
58
+ if isinstance(data, dict) and data.get("ok") is False:
59
+ raise CliError(str(data.get("error") or "Request failed"))
60
+ return data
61
+
62
+
63
+ def _print(data: Any, key: str | None = None) -> None:
64
+ if key and isinstance(data, dict):
65
+ if "state" in data:
66
+ data = data["state"]
67
+ if key in data:
68
+ target = data[key]
69
+ if isinstance(target, list) and target:
70
+ print(json.dumps(target[0], indent=2, sort_keys=True))
71
+ else:
72
+ print(json.dumps(target, indent=2, sort_keys=True))
73
+ return
74
+ print(json.dumps(data, indent=2, sort_keys=True))
File without changes
@@ -0,0 +1,286 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import itertools
5
+ import time
6
+ from typing import Any, Callable
7
+
8
+ from ..common import CliError, _request
9
+
10
+
11
+ def _prompt_yes_no(question: str, default_yes: bool = True) -> bool:
12
+ suffix = " [Y/n]: " if default_yes else " [y/N]: "
13
+ raw = input(question + suffix).strip().lower()
14
+ if not raw:
15
+ return default_yes
16
+ return raw in {"y", "yes"}
17
+
18
+
19
+ def _find_running_vm_count_for_lb(state: dict[str, Any], lb_id: str) -> int:
20
+ lb = next((item for item in state.get("lbs", []) if item.get("id") == lb_id), None)
21
+ if not lb:
22
+ return 0
23
+ vm_ids = set(lb.get("backendVmIds", []))
24
+ return sum(1 for vm in state.get("vms", []) if vm.get("id") in vm_ids and vm.get("status") == "running")
25
+
26
+
27
+ def _find_running_autoscaled_count_for_lb(state: dict[str, Any], lb_id: str) -> int:
28
+ lb = next((item for item in state.get("lbs", []) if item.get("id") == lb_id), None)
29
+ if not lb:
30
+ return 0
31
+ vm_ids = set(lb.get("backendVmIds", []))
32
+ return sum(
33
+ 1
34
+ for vm in state.get("vms", [])
35
+ if vm.get("id") in vm_ids
36
+ and vm.get("status") == "running"
37
+ and vm.get("autoScaled")
38
+ and vm.get("autoScaledForLbId") == lb_id
39
+ )
40
+
41
+
42
+ def _wait_for_state(
43
+ args: argparse.Namespace,
44
+ condition: Callable[[dict[str, Any]], bool],
45
+ label: str,
46
+ timeout_seconds: float = 20.0,
47
+ poll_seconds: float = 0.5,
48
+ ) -> dict[str, Any]:
49
+ spinner = itertools.cycle(["|", "/", "-", "\\"])
50
+ deadline = time.time() + timeout_seconds
51
+ last_state: dict[str, Any] = {}
52
+ while time.time() < deadline:
53
+ response = _request(args.base_url, "GET", "/api/state")
54
+ state = response.get("state", {})
55
+ last_state = state
56
+ if condition(state):
57
+ print(f"\r{label} ... done ")
58
+ return state
59
+ print(f"\r{label} ... {next(spinner)}", end="", flush=True)
60
+ time.sleep(poll_seconds)
61
+ print("")
62
+ raise CliError(f"Timeout while waiting: {label}")
63
+
64
+
65
+ def _animate_autoscale_spinup(
66
+ args: argparse.Namespace,
67
+ lb_id: str,
68
+ autoscaled_now: int,
69
+ target_total: int,
70
+ step_delay_seconds: float,
71
+ ) -> dict[str, Any]:
72
+ spinner = itertools.cycle(["|", "/", "-", "\\"])
73
+ deadline = time.time() + max(30.0, autoscaled_now * (step_delay_seconds + 3.0))
74
+ next_stage_at = time.time() + 0.1
75
+ required_autoscaled = 0
76
+ last_state: dict[str, Any] = {}
77
+ announced_vms: set[str] = set()
78
+
79
+ print("\n[*] COMPUTE ENGINE: Signal received. Initializing lifecycle events...")
80
+
81
+ while time.time() < deadline:
82
+ response = _request(args.base_url, "GET", "/api/state")
83
+ state = response.get("state", {})
84
+ last_state = state
85
+
86
+ lb = next((item for item in state.get("lbs", []) if item.get("id") == lb_id), None)
87
+ lb_vm_ids = set(lb.get("backendVmIds", [])) if lb else set()
88
+
89
+ running_total = _find_running_vm_count_for_lb(state, lb_id)
90
+ running_autoscaled = _find_running_autoscaled_count_for_lb(state, lb_id)
91
+
92
+ # Notify when an instance becomes ready
93
+ for vm in state.get("vms", []):
94
+ v_id = vm.get("id")
95
+ if not v_id:
96
+ continue
97
+ if v_id in lb_vm_ids and vm.get("status") == "running" and v_id not in announced_vms:
98
+ announced_vms.add(v_id)
99
+ prefix = "[AUTO]" if vm.get("autoScaled") else "[BASE]"
100
+ print(f"\r {prefix} Instance {v_id} is healthy. (port {vm.get('reservedPort')}) ")
101
+
102
+ now = time.time()
103
+ if autoscaled_now > 0 and now >= next_stage_at and required_autoscaled < autoscaled_now:
104
+ required_autoscaled += 1
105
+ next_stage_at = now + step_delay_seconds
106
+
107
+ stage_met = running_autoscaled >= required_autoscaled
108
+ total_met = running_total >= target_total
109
+
110
+ progress = (running_total / target_total) if target_total > 0 else 0
111
+ bar_len = 20
112
+ filled = int(progress * bar_len)
113
+ bar = "#" * filled + "." * (bar_len - filled)
114
+
115
+ status_text = (
116
+ f"\r{next(spinner)} [{bar}] Scaling compute footprint... "
117
+ f"({running_total}/{target_total} active) "
118
+ )
119
+ print(status_text, end="", flush=True)
120
+
121
+ if stage_met and total_met and required_autoscaled >= autoscaled_now:
122
+ print(f"\r[OK] [{bar}] Fleet stabilization complete. ({target_total}/{target_total} active) \n")
123
+ return state
124
+ time.sleep(0.15)
125
+
126
+ print("")
127
+ raise CliError("Fleet took too long to stabilize.")
128
+
129
+
130
+ def _handle_autoscale_test(args: argparse.Namespace) -> None:
131
+ print("\n" + "=" * 60)
132
+ print("CLOUDSIM AUTOSCALE LABORATORY".center(60))
133
+ print("=" * 60)
134
+ print("This scenario simulates a high-traffic event to trigger autoscaling.")
135
+
136
+ if not _prompt_yes_no("Launch infrastructure simulation?", default_yes=True):
137
+ print("Cancelled.")
138
+ return
139
+
140
+ state = _request(args.base_url, "GET", "/api/state").get("state", {})
141
+ constants = state.get("constants", {})
142
+ backend_profiles = state.get("backendProfiles", [])
143
+ if not backend_profiles:
144
+ raise CliError("No backend profiles available from server state.")
145
+
146
+ region = constants.get("vmRegions", ["us-central1"])[0]
147
+ machine = constants.get("vmMachineTypes", ["e2-standard-2"])[0]
148
+ image = constants.get("vmImages", ["Ubuntu 22.04 LTS"])[0]
149
+ backend_profile_id = backend_profiles[0].get("id", "python-fastapi")
150
+ endpoint_key = "GET /health"
151
+
152
+ print(f"\n[*] Provisioning static resources in {region}...")
153
+ vm_name = f"as-base-{int(time.time())}"
154
+ vm_payload = {
155
+ "name": vm_name,
156
+ "region": region,
157
+ "machineType": machine,
158
+ "image": image,
159
+ "backendProfileId": backend_profile_id,
160
+ "runtimeMode": "profile",
161
+ "scriptPath": "",
162
+ }
163
+ vm_resp = _request(args.base_url, "POST", "/api/vms", vm_payload)
164
+ base_vm_id = vm_resp["state"]["vms"][0]["id"]
165
+
166
+ _wait_for_state(
167
+ args,
168
+ lambda s: any(vm.get("id") == base_vm_id and vm.get("status") == "running" for vm in s.get("vms", [])),
169
+ "Wait: Base VM boot",
170
+ )
171
+
172
+ lb_name = f"as-lb-{int(time.time())}"
173
+ lb_payload = {
174
+ "name": lb_name,
175
+ "type": "HTTP(S)",
176
+ "region": constants.get("lbRegions", ["global"])[0],
177
+ "distributionAlgorithmId": "round_robin",
178
+ "backendVmIds": [base_vm_id],
179
+ "autoScaleProfileId": "aggressive",
180
+ "queueServiceTier": "disabled",
181
+ "cloudArmorPolicyId": "relaxed",
182
+ }
183
+ lb_resp = _request(args.base_url, "POST", "/api/load-balancers", lb_payload)
184
+ lb_id = lb_resp["state"]["lbs"][0]["id"]
185
+
186
+ _wait_for_state(
187
+ args,
188
+ lambda s: any(lb.get("id") == lb_id and lb.get("status") == "active" for lb in s.get("lbs", [])),
189
+ "Wait: LB Propagation",
190
+ )
191
+
192
+ print(f"\n[!] READY: Load Balancer {lb_id} is live with 1 backend.")
193
+ if not _prompt_yes_no("Initiate traffic spike?", default_yes=True):
194
+ print("Test aborted.")
195
+ return
196
+
197
+ print(f"\n[*] BURST: Sending {args.requests} requests to {lb_id}...")
198
+ simulate_payload = {
199
+ "lbId": lb_id,
200
+ "endpointKey": endpoint_key,
201
+ "requests": args.requests,
202
+ "clientId": "autoscale-lab-client",
203
+ }
204
+ sim_resp = _request(args.base_url, "POST", "/api/simulate", simulate_payload)
205
+ traffic = sim_resp.get("state", {}).get("lastTrafficResult", {}) or {}
206
+ autoscaled_now = int(traffic.get("autoscaledCount", 0))
207
+ target_total = min(args.max_instances, 1 + max(0, autoscaled_now))
208
+
209
+ print(f"[*] RESPONSE: Autoscaler detected overload. Triggering +{autoscaled_now} instances.")
210
+
211
+ state = _animate_autoscale_spinup(
212
+ args=args,
213
+ lb_id=lb_id,
214
+ autoscaled_now=autoscaled_now,
215
+ target_total=target_total,
216
+ step_delay_seconds=args.spinup_delay,
217
+ )
218
+
219
+ print("=" * 60)
220
+ print("TRAFFIC DISTRIBUTION ANALYSIS".center(60))
221
+ print("-" * 60)
222
+ vm_usage = traffic.get("vmUsage", {})
223
+ total_served = int(traffic.get("served", 0) or 0)
224
+ print(f" Total Requests Processed: {total_served}")
225
+ print(f" Average Latency: {traffic.get('avgLatencyMs')}ms")
226
+ print(f" Distribution Algorithm: {traffic.get('distributionAlgorithmId')}")
227
+ print("\n INSTANCE RELAY MAP:")
228
+
229
+ lb = next((item for item in state.get("lbs", []) if item.get("id") == lb_id), None)
230
+ vm_ids = lb.get("backendVmIds", []) if lb else []
231
+
232
+ for v_id in vm_ids:
233
+ vm = next((v for v in state.get("vms", []) if v.get("id") == v_id), None)
234
+ if not vm:
235
+ continue
236
+ count = int(vm_usage.get(v_id, 0) or 0)
237
+ share = (count / total_served * 100) if total_served > 0 else 0
238
+ tag = "[AUTO]" if vm.get("autoScaled") else "[BASE]"
239
+ status_mark = "*" if vm.get("status") == "running" else "!"
240
+ bar = "#" * int(share / 5)
241
+ print(f" {status_mark} {v_id} {tag:<6} | {count:4} reqs ({share:4.1f}%) {bar}")
242
+ print("=" * 60 + "\n")
243
+
244
+ if _prompt_yes_no("Destroy ephemeral resources?", default_yes=True):
245
+ current_state = _request(args.base_url, "GET", "/api/state").get("state", {})
246
+ lb_current = next((item for item in current_state.get("lbs", []) if item.get("id") == lb_id), None)
247
+ lb_vm_ids = set(lb_current.get("backendVmIds", [])) if lb_current else set()
248
+ autoscaled = [
249
+ vm
250
+ for vm in current_state.get("vms", [])
251
+ if vm.get("id") in lb_vm_ids
252
+ and vm.get("autoScaled")
253
+ and vm.get("autoScaledForLbId") == lb_id
254
+ ]
255
+ autoscaled_ids = [vm.get("id") for vm in autoscaled if vm.get("id")]
256
+ for vm_id in autoscaled_ids:
257
+ _request(args.base_url, "DELETE", f"/api/vms/{vm_id}")
258
+
259
+ time.sleep(0.4)
260
+ verify_state = _request(args.base_url, "GET", "/api/state").get("state", {})
261
+ remaining_ids = {
262
+ vm.get("id")
263
+ for vm in verify_state.get("vms", [])
264
+ if vm.get("autoScaled") and vm.get("autoScaledForLbId") == lb_id
265
+ }
266
+ retry_ids = [vm_id for vm_id in autoscaled_ids if vm_id in remaining_ids]
267
+ for vm_id in retry_ids:
268
+ _request(args.base_url, "DELETE", f"/api/vms/{vm_id}")
269
+
270
+ final_state = _request(args.base_url, "GET", "/api/state").get("state", {})
271
+ final_remaining = [
272
+ vm.get("id")
273
+ for vm in final_state.get("vms", [])
274
+ if vm.get("autoScaled") and vm.get("autoScaledForLbId") == lb_id
275
+ ]
276
+ destroyed_count = len(autoscaled_ids) - len(final_remaining)
277
+ print(f"Destroyed {destroyed_count} auto-scaled VM(s).")
278
+ if final_remaining:
279
+ print(f"Warning: still present auto-scaled VM(s): {', '.join(final_remaining)}")
280
+ else:
281
+ print("Kept auto-scaled VM(s) running.")
282
+
283
+ if _prompt_yes_no("Destroy temporary LB and base VM too?", default_yes=False):
284
+ _request(args.base_url, "DELETE", f"/api/load-balancers/{lb_id}")
285
+ _request(args.base_url, "DELETE", f"/api/vms/{base_vm_id}")
286
+ print("Destroyed temporary LB and base VM.")
@@ -0,0 +1,174 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import os
5
+ from pathlib import Path
6
+
7
+ import cloudsim
8
+ from cloudsim.paths import (
9
+ autoscale_vms_dir,
10
+ ensure_state_dir,
11
+ read_global_config,
12
+ vms_dir,
13
+ workspace_root,
14
+ write_global_config,
15
+ )
16
+ from cloudsim.web.env_loader import load_env_file
17
+
18
+ from ..common import CliError, _print
19
+
20
+
21
+ def _package_env_example_path() -> Path:
22
+ return Path(cloudsim.__file__).resolve().parent / ".env.example"
23
+
24
+
25
+ def _render_env_from_example(example_text: str, workspace: Path) -> str:
26
+ lines = example_text.splitlines()
27
+
28
+ def is_ws_line(line: str) -> bool:
29
+ return line.strip().startswith("CLOUDSIM_WORKSPACE=")
30
+
31
+ out: list[str] = []
32
+ replaced = False
33
+ for line in lines:
34
+ if is_ws_line(line):
35
+ out.append(f"CLOUDSIM_WORKSPACE={workspace}")
36
+ replaced = True
37
+ else:
38
+ out.append(line)
39
+
40
+ if not replaced:
41
+ out.append(f"CLOUDSIM_WORKSPACE={workspace}")
42
+
43
+ return "\n".join(out).rstrip() + "\n"
44
+
45
+
46
+ def _handle_config_init(args: argparse.Namespace) -> None:
47
+ ws = (args.workspace or "").strip()
48
+ if ws:
49
+ workspace = Path(ws).expanduser().resolve()
50
+ else:
51
+ workspace = workspace_root()
52
+
53
+ # Ensure this process uses the chosen workspace immediately (even if user didn't export env yet).
54
+ os.environ["CLOUDSIM_WORKSPACE"] = str(workspace)
55
+
56
+ workspace.mkdir(parents=True, exist_ok=True)
57
+
58
+ # Create expected top-level dirs.
59
+ ensure_state_dir()
60
+ vms_dir().mkdir(parents=True, exist_ok=True)
61
+ autoscale_vms_dir().mkdir(parents=True, exist_ok=True)
62
+
63
+ env_path = workspace / ".env"
64
+ if env_path.exists() and not args.force:
65
+ raise CliError(f"{env_path} already exists. Use --force to overwrite.")
66
+
67
+ example_path = _package_env_example_path()
68
+ if not example_path.exists():
69
+ raise CliError(f"Missing packaged env example at {example_path}")
70
+
71
+ rendered = _render_env_from_example(example_path.read_text(encoding="utf-8"), workspace)
72
+ env_path.write_text(rendered, encoding="utf-8")
73
+
74
+ # Load it for this process so immediate follow-up commands work.
75
+ load_env_file(env_path)
76
+
77
+ default_cfg_path = ""
78
+ if not getattr(args, "no_default", False):
79
+ cfg = read_global_config()
80
+ if not isinstance(cfg, dict):
81
+ cfg = {}
82
+ cfg["workspace"] = str(workspace)
83
+ default_cfg_path = str(write_global_config(cfg))
84
+
85
+ _print(
86
+ {
87
+ "ok": True,
88
+ "workspace": str(workspace),
89
+ "envFile": str(env_path),
90
+ "defaultConfig": default_cfg_path,
91
+ "next": {
92
+ "windowsPowerShellSession": f"$env:CLOUDSIM_WORKSPACE = '{workspace}'",
93
+ "windowsPermanentUser": f'setx CLOUDSIM_WORKSPACE "{workspace}"',
94
+ "macLinuxSession": f'export CLOUDSIM_WORKSPACE="{workspace}"',
95
+ },
96
+ }
97
+ )
98
+
99
+
100
+ def _mask(value: str) -> str:
101
+ if not value:
102
+ return ""
103
+ if len(value) <= 4:
104
+ return "****"
105
+ return value[:2] + "****" + value[-2:]
106
+
107
+
108
+ def _handle_config_show(args: argparse.Namespace) -> None:
109
+ # Show how workspace was resolved.
110
+ ws_env = (os.environ.get("CLOUDSIM_WORKSPACE") or "").strip()
111
+ cfg = read_global_config()
112
+ ws_cfg = (cfg.get("workspace") or "").strip() if isinstance(cfg, dict) else ""
113
+
114
+ # Prefer reporting the *stable* source a new user relies on.
115
+ # If both env and default config exist but disagree, treat env as an override.
116
+ if ws_env and ws_cfg and Path(ws_env).expanduser().resolve() != Path(ws_cfg).expanduser().resolve():
117
+ source = "env_override"
118
+ elif ws_cfg:
119
+ source = "default_config"
120
+ elif ws_env:
121
+ source = "env"
122
+ else:
123
+ source = "cwd"
124
+
125
+ ws = workspace_root()
126
+ env_path = ws / ".env"
127
+
128
+ # Parse .env without mutating env (for display).
129
+ env_keys: list[str] = []
130
+ if env_path.exists():
131
+ for raw_line in env_path.read_text(encoding="utf-8").splitlines():
132
+ line = raw_line.strip()
133
+ if not line or line.startswith("#"):
134
+ continue
135
+ if "=" not in line:
136
+ continue
137
+ k = line.split("=", 1)[0].strip()
138
+ if k:
139
+ env_keys.append(k)
140
+
141
+ # Load it for this process too (safe: env_loader won't override explicit env vars).
142
+ load_env_file(env_path)
143
+
144
+ def getenv(k: str) -> str:
145
+ return (os.environ.get(k) or "").strip()
146
+
147
+ pg = {
148
+ "host": getenv("CLOUDSIM_PG_HOST"),
149
+ "port": getenv("CLOUDSIM_PG_PORT"),
150
+ "adminUser": getenv("CLOUDSIM_PG_ADMIN_USER"),
151
+ "adminPassword": _mask(getenv("CLOUDSIM_PG_ADMIN_PASSWORD")),
152
+ "adminDb": getenv("CLOUDSIM_PG_ADMIN_DB"),
153
+ }
154
+
155
+ _print(
156
+ {
157
+ "ok": True,
158
+ "workspaceSource": source,
159
+ "workspace": str(ws),
160
+ "envFile": str(env_path),
161
+ "envFileExists": env_path.exists(),
162
+ "envFileKeys": sorted(list(set(env_keys))),
163
+ "postgresConfigured": all(
164
+ [
165
+ pg["host"],
166
+ pg["port"],
167
+ pg["adminUser"],
168
+ getenv("CLOUDSIM_PG_ADMIN_PASSWORD"),
169
+ pg["adminDb"],
170
+ ]
171
+ ),
172
+ "postgres": pg,
173
+ }
174
+ )