wafer-cli 0.2.39__tar.gz → 0.2.41__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/PKG-INFO +1 -1
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/pyproject.toml +1 -1
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/cli.py +32 -16
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/workspaces.py +96 -2
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer_cli.egg-info/PKG-INFO +1 -1
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/README.md +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/setup.cfg +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_analytics.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_auth.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_billing.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_cli_coverage.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_cli_parity_integration.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_config_integration.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_file_operations_integration.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_kernel_scope_cli.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_nsys_analyze.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_nsys_profile.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_output.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_rocprof_compute_integration.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_skill_commands.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_ssh_integration.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_targets_ops.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_wevin_cli.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/tests/test_workflow_integration.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/GUIDE.md +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/__init__.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/agent_defaults.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/analytics.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/api_client.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/auth.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/autotuner.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/baseline.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/billing.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/cli_instructions.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/config.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/corpus.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/evaluate.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/global_config.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/gpu_run.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/inference.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/kernel_scope.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/ncu_analyze.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/nsys_analyze.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/nsys_profile.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/output.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/problems.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/rocprof_compute.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/rocprof_sdk.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/rocprof_systems.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/skills/wafer-guide/SKILL.md +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/specs_cli.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/ssh_keys.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/target_lock.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/targets.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/targets_cli.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/targets_ops.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/templates/__init__.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/templates/aiter_optimize.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/templates/ask_docs.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/templates/optimize_kernel.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/templates/optimize_kernelbench.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/templates/optimize_vllm.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/templates/trace_analyze.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/tests/test_eval_cli_parity.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/trace_compare.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/tracelens.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer/wevin_cli.py +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer_cli.egg-info/SOURCES.txt +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer_cli.egg-info/dependency_links.txt +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer_cli.egg-info/entry_points.txt +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer_cli.egg-info/requires.txt +0 -0
- {wafer_cli-0.2.39 → wafer_cli-0.2.41}/wafer_cli.egg-info/top_level.txt +0 -0
|
@@ -220,16 +220,22 @@ workspaces_app = typer.Typer(
|
|
|
220
220
|
|
|
221
221
|
Workspaces are on-demand cloud GPU environments. Requires authentication (wafer login).
|
|
222
222
|
|
|
223
|
+
Environment Types:
|
|
224
|
+
modal Serverless GPU execution (fast startup, no SSH)
|
|
225
|
+
baremetal Dedicated GPU server (SSH access, hardware counters)
|
|
226
|
+
|
|
223
227
|
Available GPUs:
|
|
224
|
-
MI300X AMD Instinct MI300X (192GB HBM3, ROCm)
|
|
228
|
+
MI300X AMD Instinct MI300X (192GB HBM3, ROCm) - baremetal only
|
|
225
229
|
B200 NVIDIA Blackwell B200 (180GB HBM3e, CUDA)
|
|
230
|
+
H100 NVIDIA Hopper H100 (80GB HBM3, CUDA)
|
|
226
231
|
|
|
227
232
|
Commands:
|
|
228
|
-
wafer workspaces create dev
|
|
229
|
-
wafer workspaces
|
|
230
|
-
wafer workspaces
|
|
231
|
-
wafer workspaces
|
|
232
|
-
wafer workspaces
|
|
233
|
+
wafer workspaces create dev -g B200 -e modal # Create Modal workspace
|
|
234
|
+
wafer workspaces create dev -g B200 -e baremetal # Create baremetal workspace
|
|
235
|
+
wafer workspaces exec dev -- python x.py # Run commands
|
|
236
|
+
wafer workspaces ssh dev # Interactive SSH (baremetal only)
|
|
237
|
+
wafer workspaces sync dev ./project # Sync files
|
|
238
|
+
wafer workspaces delete dev # Clean up"""
|
|
233
239
|
)
|
|
234
240
|
app.add_typer(workspaces_app, name="workspaces")
|
|
235
241
|
|
|
@@ -3272,7 +3278,7 @@ def demo_eval(
|
|
|
3272
3278
|
# Step 1: Create workspace
|
|
3273
3279
|
typer.echo(f"\n[1/4] Creating workspace '{workspace_name}'...")
|
|
3274
3280
|
result = subprocess.run(
|
|
3275
|
-
["wafer", "workspaces", "create", workspace_name, "--gpu", "B200", "--json"],
|
|
3281
|
+
["wafer", "workspaces", "create", workspace_name, "--gpu", "B200", "--environment", "modal", "--json"],
|
|
3276
3282
|
capture_output=True,
|
|
3277
3283
|
text=True,
|
|
3278
3284
|
check=True,
|
|
@@ -3359,7 +3365,7 @@ print(f"Performance: {(t1-t0)/100*1e6:.1f} us/iter")
|
|
|
3359
3365
|
typer.echo("\n✓ Demo complete! To evaluate your own kernels:")
|
|
3360
3366
|
typer.echo("")
|
|
3361
3367
|
typer.echo(" # Using workspaces (no setup required):")
|
|
3362
|
-
typer.echo(" wafer workspaces create dev --gpu B200")
|
|
3368
|
+
typer.echo(" wafer workspaces create dev --gpu B200 --environment modal")
|
|
3363
3369
|
typer.echo(" wafer workspaces exec --sync ./my-kernel dev -- python my_test.py")
|
|
3364
3370
|
typer.echo("")
|
|
3365
3371
|
typer.echo(" # Or using wafer evaluate with a configured target:")
|
|
@@ -4816,7 +4822,10 @@ def workspaces_list(
|
|
|
4816
4822
|
def workspaces_create(
|
|
4817
4823
|
name: str = typer.Argument(..., help="Workspace name"),
|
|
4818
4824
|
gpu_type: str = typer.Option(
|
|
4819
|
-
|
|
4825
|
+
..., "--gpu", "-g", help="GPU type: MI300X (AMD) or B200/H100 (NVIDIA)"
|
|
4826
|
+
),
|
|
4827
|
+
environment: str = typer.Option(
|
|
4828
|
+
..., "--environment", "-e", help="Environment type: modal or baremetal"
|
|
4820
4829
|
),
|
|
4821
4830
|
image: str | None = typer.Option(None, "--image", "-i", help="Docker image (optional)"),
|
|
4822
4831
|
wait: bool = typer.Option(
|
|
@@ -4826,16 +4835,22 @@ def workspaces_create(
|
|
|
4826
4835
|
) -> None:
|
|
4827
4836
|
"""Create a new workspace.
|
|
4828
4837
|
|
|
4838
|
+
Per-vendor architecture: each workspace has a single environment type.
|
|
4839
|
+
|
|
4840
|
+
Environment Types:
|
|
4841
|
+
modal Serverless GPU execution (fast startup, no SSH)
|
|
4842
|
+
baremetal Dedicated GPU server (SSH access, hardware counters)
|
|
4843
|
+
|
|
4829
4844
|
Available GPUs:
|
|
4830
|
-
MI300X
|
|
4831
|
-
B200
|
|
4845
|
+
MI300X AMD Instinct MI300X (192GB HBM3, ROCm) - baremetal only
|
|
4846
|
+
B200 NVIDIA Blackwell B200 (180GB HBM3e, CUDA)
|
|
4847
|
+
H100 NVIDIA Hopper H100 (80GB HBM3, CUDA)
|
|
4832
4848
|
|
|
4833
4849
|
Example:
|
|
4834
|
-
wafer workspaces create my-kernel
|
|
4835
|
-
wafer workspaces create my-kernel --gpu MI300X
|
|
4836
|
-
wafer workspaces create my-kernel
|
|
4837
|
-
wafer workspaces create my-kernel
|
|
4838
|
-
wafer workspaces create my-kernel --wait
|
|
4850
|
+
wafer workspaces create my-kernel --gpu B200 --environment modal
|
|
4851
|
+
wafer workspaces create my-kernel --gpu MI300X --environment baremetal
|
|
4852
|
+
wafer workspaces create my-kernel -g B200 -e baremetal # SSH + ncu profiling
|
|
4853
|
+
wafer workspaces create my-kernel -g B200 -e modal --wait
|
|
4839
4854
|
"""
|
|
4840
4855
|
from .workspaces import create_workspace
|
|
4841
4856
|
|
|
@@ -4843,6 +4858,7 @@ def workspaces_create(
|
|
|
4843
4858
|
result = create_workspace(
|
|
4844
4859
|
name,
|
|
4845
4860
|
gpu_type=gpu_type,
|
|
4861
|
+
environment_type=environment,
|
|
4846
4862
|
image=image,
|
|
4847
4863
|
wait=wait,
|
|
4848
4864
|
json_output=json_output,
|
|
@@ -249,16 +249,20 @@ def list_workspaces(json_output: bool = False) -> str:
|
|
|
249
249
|
|
|
250
250
|
def create_workspace(
|
|
251
251
|
name: str,
|
|
252
|
-
gpu_type: str
|
|
252
|
+
gpu_type: str,
|
|
253
|
+
environment_type: str,
|
|
253
254
|
image: str | None = None,
|
|
254
255
|
wait: bool = False,
|
|
255
256
|
json_output: bool = False,
|
|
256
257
|
) -> str:
|
|
257
258
|
"""Create a new workspace.
|
|
258
259
|
|
|
260
|
+
Per-vendor architecture: each workspace has a single environment type.
|
|
261
|
+
|
|
259
262
|
Args:
|
|
260
263
|
name: Workspace name (must be unique)
|
|
261
|
-
gpu_type: GPU type (
|
|
264
|
+
gpu_type: GPU type (required: B200, H100, MI300X)
|
|
265
|
+
environment_type: Environment type (required: modal, baremetal)
|
|
262
266
|
image: Docker image (optional, uses default if not specified)
|
|
263
267
|
wait: If True, stream provisioning progress and return SSH credentials
|
|
264
268
|
json_output: If True, return raw JSON; otherwise return formatted text
|
|
@@ -272,6 +276,7 @@ def create_workspace(
|
|
|
272
276
|
# Validate inputs
|
|
273
277
|
assert name, "Workspace name must be non-empty"
|
|
274
278
|
assert gpu_type, "GPU type must be non-empty"
|
|
279
|
+
assert environment_type, "Environment type must be non-empty"
|
|
275
280
|
|
|
276
281
|
api_url, headers = _get_client()
|
|
277
282
|
|
|
@@ -296,6 +301,7 @@ def create_workspace(
|
|
|
296
301
|
request_body: dict = {
|
|
297
302
|
"name": name,
|
|
298
303
|
"gpu_type": gpu_type,
|
|
304
|
+
"environment_type": environment_type,
|
|
299
305
|
}
|
|
300
306
|
if image:
|
|
301
307
|
request_body["image"] = image
|
|
@@ -838,6 +844,73 @@ def _parse_sse_content(content: str) -> SSEEvent:
|
|
|
838
844
|
return SSEEvent(output=content, exit_code=None, is_error=False)
|
|
839
845
|
|
|
840
846
|
|
|
847
|
+
def _exec_via_ssh(ssh_host: str, ssh_port: int, ssh_user: str, command: str) -> int:
|
|
848
|
+
"""Execute command via SSH, streaming output to stdout/stderr.
|
|
849
|
+
|
|
850
|
+
Used for baremetal workspaces. The workspace's zsh plugin handles GPU routing.
|
|
851
|
+
|
|
852
|
+
Returns:
|
|
853
|
+
Exit code from remote command
|
|
854
|
+
"""
|
|
855
|
+
import selectors
|
|
856
|
+
import shlex
|
|
857
|
+
import subprocess
|
|
858
|
+
import sys
|
|
859
|
+
|
|
860
|
+
assert ssh_host, "SSH host required"
|
|
861
|
+
assert ssh_port > 0, "SSH port must be positive"
|
|
862
|
+
assert ssh_user, "SSH user required"
|
|
863
|
+
assert command, "Command required"
|
|
864
|
+
|
|
865
|
+
ssh_cmd = [
|
|
866
|
+
"ssh",
|
|
867
|
+
"-p", str(ssh_port),
|
|
868
|
+
"-t", # Force TTY for zsh plugin to work
|
|
869
|
+
"-o", "StrictHostKeyChecking=no",
|
|
870
|
+
"-o", "UserKnownHostsFile=/dev/null",
|
|
871
|
+
"-o", "BatchMode=yes",
|
|
872
|
+
"-o", "LogLevel=ERROR",
|
|
873
|
+
f"{ssh_user}@{ssh_host}",
|
|
874
|
+
f"zsh -i -l -c {shlex.quote(command)}", # Interactive login shell to load plugins
|
|
875
|
+
]
|
|
876
|
+
|
|
877
|
+
process = subprocess.Popen(
|
|
878
|
+
ssh_cmd,
|
|
879
|
+
stdout=subprocess.PIPE,
|
|
880
|
+
stderr=subprocess.PIPE,
|
|
881
|
+
text=True,
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
assert process.stdout is not None
|
|
885
|
+
assert process.stderr is not None
|
|
886
|
+
|
|
887
|
+
sel = selectors.DefaultSelector()
|
|
888
|
+
sel.register(process.stdout, selectors.EVENT_READ)
|
|
889
|
+
sel.register(process.stderr, selectors.EVENT_READ)
|
|
890
|
+
|
|
891
|
+
while True:
|
|
892
|
+
for key, _ in sel.select(timeout=0.1):
|
|
893
|
+
line = key.fileobj.readline() # type: ignore
|
|
894
|
+
if line:
|
|
895
|
+
if key.fileobj == process.stdout:
|
|
896
|
+
print(line, end="", file=sys.stdout)
|
|
897
|
+
else:
|
|
898
|
+
print(line, end="", file=sys.stderr)
|
|
899
|
+
|
|
900
|
+
if process.poll() is not None:
|
|
901
|
+
for line in process.stdout:
|
|
902
|
+
print(line, end="", file=sys.stdout)
|
|
903
|
+
for line in process.stderr:
|
|
904
|
+
print(line, end="", file=sys.stderr)
|
|
905
|
+
sys.stdout.flush()
|
|
906
|
+
sys.stderr.flush()
|
|
907
|
+
break
|
|
908
|
+
|
|
909
|
+
sel.close()
|
|
910
|
+
assert process.returncode is not None, "SSH process did not terminate properly"
|
|
911
|
+
return process.returncode
|
|
912
|
+
|
|
913
|
+
|
|
841
914
|
def exec_command(
|
|
842
915
|
workspace_id: str,
|
|
843
916
|
command: str,
|
|
@@ -847,11 +920,18 @@ def exec_command(
|
|
|
847
920
|
) -> int:
|
|
848
921
|
"""Execute a command in workspace, streaming output.
|
|
849
922
|
|
|
923
|
+
For baremetal workspaces (with SSH access), commands are executed via SSH.
|
|
924
|
+
The workspace's zsh plugin handles GPU routing automatically, ensuring
|
|
925
|
+
packages installed via pip persist across commands.
|
|
926
|
+
|
|
927
|
+
For Modal workspaces (no SSH), commands are executed via the API.
|
|
928
|
+
|
|
850
929
|
Args:
|
|
851
930
|
workspace_id: Workspace ID or name
|
|
852
931
|
command: Command to execute
|
|
853
932
|
timeout_seconds: Execution timeout (default: 300, from config)
|
|
854
933
|
routing: Routing hint - "auto", "gpu", "cpu", or "baremetal" (default: auto)
|
|
934
|
+
pull_image: Pull image on target if missing (only for API exec)
|
|
855
935
|
|
|
856
936
|
Returns:
|
|
857
937
|
Exit code (0 = success, non-zero = failure)
|
|
@@ -862,6 +942,20 @@ def exec_command(
|
|
|
862
942
|
assert workspace_id, "Workspace ID must be non-empty"
|
|
863
943
|
assert command, "Command must be non-empty"
|
|
864
944
|
|
|
945
|
+
# Get workspace details to check if SSH is available (baremetal)
|
|
946
|
+
workspace = get_workspace_raw(workspace_id)
|
|
947
|
+
ssh_host = workspace.get("ssh_host")
|
|
948
|
+
ssh_port = workspace.get("ssh_port")
|
|
949
|
+
ssh_user = workspace.get("ssh_user")
|
|
950
|
+
|
|
951
|
+
# Baremetal workspaces have SSH access - use SSH for stateful execution
|
|
952
|
+
# This ensures pip installs persist because we're in the workspace container
|
|
953
|
+
# The zsh plugin still handles GPU routing for GPU commands
|
|
954
|
+
if ssh_host and ssh_port and ssh_user:
|
|
955
|
+
return _exec_via_ssh(ssh_host, ssh_port, ssh_user, command)
|
|
956
|
+
|
|
957
|
+
# Modal workspaces (no SSH) - use API exec
|
|
958
|
+
# Modal Named Sandboxes persist state within their lifetime
|
|
865
959
|
api_url, headers = _get_client()
|
|
866
960
|
|
|
867
961
|
# Base64 encode command to avoid escaping issues
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|