skypilot-nightly 1.0.0.dev20240926__py3-none-any.whl → 1.0.0.dev20240928__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sky/__init__.py +2 -2
- sky/adaptors/kubernetes.py +10 -8
- sky/authentication.py +10 -6
- sky/backends/backend_utils.py +1 -0
- sky/backends/cloud_vm_ray_backend.py +14 -1
- sky/cli.py +129 -19
- sky/clouds/kubernetes.py +161 -45
- sky/clouds/oci.py +11 -8
- sky/clouds/service_catalog/kubernetes_catalog.py +15 -7
- sky/provision/kubernetes/network.py +34 -14
- sky/provision/kubernetes/network_utils.py +7 -5
- sky/provision/kubernetes/utils.py +125 -59
- sky/provision/provisioner.py +2 -0
- sky/templates/kubernetes-ray.yml.j2 +1 -1
- sky/utils/command_runner.py +4 -0
- sky/utils/kubernetes/deploy_remote_cluster.sh +243 -0
- sky/utils/log_utils.py +88 -10
- sky/utils/schemas.py +6 -0
- {skypilot_nightly-1.0.0.dev20240926.dist-info → skypilot_nightly-1.0.0.dev20240928.dist-info}/METADATA +16 -15
- {skypilot_nightly-1.0.0.dev20240926.dist-info → skypilot_nightly-1.0.0.dev20240928.dist-info}/RECORD +24 -23
- {skypilot_nightly-1.0.0.dev20240926.dist-info → skypilot_nightly-1.0.0.dev20240928.dist-info}/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20240926.dist-info → skypilot_nightly-1.0.0.dev20240928.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20240926.dist-info → skypilot_nightly-1.0.0.dev20240928.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20240926.dist-info → skypilot_nightly-1.0.0.dev20240928.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,243 @@
|
|
1
|
+
#!/bin/bash
|
2
|
+
# Refer to https://skypilot.readthedocs.io/en/latest/reservations/existing-machines.html for details on how to use this script.
|
3
|
+
set -e
|
4
|
+
|
5
|
+
# Colors for nicer UX
|
6
|
+
RED='\033[0;31m'
|
7
|
+
GREEN='\033[0;32m'
|
8
|
+
YELLOW='\033[1;33m'
|
9
|
+
NC='\033[0m' # No color
|
10
|
+
|
11
|
+
# Variables
|
12
|
+
IPS_FILE=$1
|
13
|
+
USER=$2
|
14
|
+
SSH_KEY=$3
|
15
|
+
K3S_TOKEN=mytoken # Any string can be used as the token
|
16
|
+
CLEANUP=false
|
17
|
+
INSTALL_GPU=false
|
18
|
+
|
19
|
+
if [[ "$4" == "--cleanup" ]]; then
|
20
|
+
CLEANUP=true
|
21
|
+
fi
|
22
|
+
|
23
|
+
# Basic argument checks
|
24
|
+
if [ -z "$IPS_FILE" ] || [ -z "$USER" ] || [ -z "$SSH_KEY" ]; then
|
25
|
+
>&2 echo -e "${RED}Error: Missing required arguments.${NC}"
|
26
|
+
>&2 echo "Usage: ./deploy_remote_cluster.sh ips.txt username path/to/ssh/key [--cleanup]"
|
27
|
+
exit 1
|
28
|
+
fi
|
29
|
+
|
30
|
+
# Check if SSH key exists
|
31
|
+
if [ ! -f "$SSH_KEY" ]; then
|
32
|
+
>&2 echo -e "${RED}Error: SSH key not found: $SSH_KEY${NC}"
|
33
|
+
exit 1
|
34
|
+
fi
|
35
|
+
|
36
|
+
# Check if IPs file exists
|
37
|
+
if [ ! -f "$IPS_FILE" ]; then
|
38
|
+
>&2 echo -e "${RED}Error: IPs file not found: $IPS_FILE${NC}"
|
39
|
+
exit 1
|
40
|
+
fi
|
41
|
+
|
42
|
+
# Get head node and worker nodes from the IPs file
|
43
|
+
HEAD_NODE=$(head -n 1 "$IPS_FILE")
|
44
|
+
WORKER_NODES=$(tail -n +2 "$IPS_FILE")
|
45
|
+
|
46
|
+
# Check if the IPs file is empty or not formatted correctly
|
47
|
+
if [ -z "$HEAD_NODE" ]; then
|
48
|
+
>&2 echo -e "${RED}Error: IPs file is empty or not formatted correctly.${NC}"
|
49
|
+
exit 1
|
50
|
+
fi
|
51
|
+
|
52
|
+
# Function to show a progress message
|
53
|
+
progress_message() {
|
54
|
+
echo -e "${YELLOW}➜ $1${NC}"
|
55
|
+
}
|
56
|
+
|
57
|
+
# Step to display success
|
58
|
+
success_message() {
|
59
|
+
echo -e "${GREEN}✔ $1${NC}"
|
60
|
+
}
|
61
|
+
|
62
|
+
# Function to run a command on a remote machine via SSH
|
63
|
+
run_remote() {
|
64
|
+
local NODE_IP=$1
|
65
|
+
local CMD=$2
|
66
|
+
# echo -e "${YELLOW}Running command on $NODE_IP...${NC}"
|
67
|
+
ssh -o StrictHostKeyChecking=no -i "$SSH_KEY" "$USER@$NODE_IP" "$CMD"
|
68
|
+
}
|
69
|
+
|
70
|
+
# Function to uninstall k3s and clean up the state on a remote machine
|
71
|
+
cleanup_server_node() {
|
72
|
+
local NODE_IP=$1
|
73
|
+
echo -e "${YELLOW}Cleaning up head node $NODE_IP...${NC}"
|
74
|
+
run_remote "$NODE_IP" "
|
75
|
+
echo 'Uninstalling k3s...' &&
|
76
|
+
/usr/local/bin/k3s-uninstall.sh || true &&
|
77
|
+
sudo rm -rf /etc/rancher /var/lib/rancher /var/lib/kubelet /etc/kubernetes ~/.kube
|
78
|
+
"
|
79
|
+
echo -e "${GREEN}Node $NODE_IP cleaned up successfully.${NC}"
|
80
|
+
}
|
81
|
+
|
82
|
+
# Function to uninstall k3s and clean up the state on a remote machine
|
83
|
+
cleanup_agent_node() {
|
84
|
+
local NODE_IP=$1
|
85
|
+
echo -e "${YELLOW}Cleaning up node $NODE_IP...${NC}"
|
86
|
+
run_remote "$NODE_IP" "
|
87
|
+
echo 'Uninstalling k3s...' &&
|
88
|
+
/usr/local/bin/k3s-agent-uninstall.sh || true &&
|
89
|
+
sudo rm -rf /etc/rancher /var/lib/rancher /var/lib/kubelet /etc/kubernetes ~/.kube
|
90
|
+
"
|
91
|
+
echo -e "${GREEN}Node $NODE_IP cleaned up successfully.${NC}"
|
92
|
+
}
|
93
|
+
|
94
|
+
check_gpu() {
|
95
|
+
local NODE_IP=$1
|
96
|
+
run_remote "$NODE_IP" "
|
97
|
+
if command -v nvidia-smi &> /dev/null; then
|
98
|
+
nvidia-smi --list-gpus | grep 'GPU 0'
|
99
|
+
fi
|
100
|
+
"
|
101
|
+
}
|
102
|
+
|
103
|
+
# Pre-flight checks
|
104
|
+
run_remote "$HEAD_NODE" "echo 'SSH connection successful'"
|
105
|
+
# TODO: Add more pre-flight checks here, including checking if port 6443 is accessible
|
106
|
+
|
107
|
+
# If --cleanup flag is set, uninstall k3s and exit
|
108
|
+
if [ "$CLEANUP" == "true" ]; then
|
109
|
+
echo -e "${YELLOW}Starting cleanup...${NC}"
|
110
|
+
|
111
|
+
# Clean up head node
|
112
|
+
cleanup_server_node "$HEAD_NODE"
|
113
|
+
|
114
|
+
# Clean up worker nodes
|
115
|
+
for NODE in $WORKER_NODES; do
|
116
|
+
cleanup_agent_node "$NODE"
|
117
|
+
done
|
118
|
+
|
119
|
+
echo -e "${GREEN}Cleanup completed successfully.${NC}"
|
120
|
+
exit 0
|
121
|
+
fi
|
122
|
+
|
123
|
+
# Step 1: Install k3s on the head node
|
124
|
+
progress_message "Deploying Kubernetes on head node ($HEAD_NODE)..."
|
125
|
+
run_remote "$HEAD_NODE" "
|
126
|
+
curl -sfL https://get.k3s.io | K3S_TOKEN=$K3S_TOKEN sh - &&
|
127
|
+
mkdir -p ~/.kube &&
|
128
|
+
sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config &&
|
129
|
+
sudo chown \$(id -u):\$(id -g) ~/.kube/config &&
|
130
|
+
for i in {1..3}; do
|
131
|
+
if kubectl wait --for=condition=ready node --all --timeout=2m --kubeconfig ~/.kube/config; then
|
132
|
+
break
|
133
|
+
else
|
134
|
+
echo 'Waiting for nodes to be ready...'
|
135
|
+
sleep 5
|
136
|
+
fi
|
137
|
+
done
|
138
|
+
if [ $i -eq 3 ]; then
|
139
|
+
echo 'Failed to wait for nodes to be ready after 3 attempts'
|
140
|
+
exit 1
|
141
|
+
fi"
|
142
|
+
success_message "K3s deployed on head node."
|
143
|
+
|
144
|
+
# Check if head node has a GPU
|
145
|
+
if check_gpu "$HEAD_NODE"; then
|
146
|
+
echo -e "${YELLOW}GPU detected on head node ($HEAD_NODE).${NC}"
|
147
|
+
INSTALL_GPU=true
|
148
|
+
fi
|
149
|
+
|
150
|
+
# Fetch the head node's internal IP (this will be passed to worker nodes)
|
151
|
+
MASTER_ADDR=$(run_remote "$HEAD_NODE" "hostname -I | awk '{print \$1}'")
|
152
|
+
|
153
|
+
echo -e "${GREEN}Master node internal IP: $MASTER_ADDR${NC}"
|
154
|
+
|
155
|
+
# Step 2: Install k3s on worker nodes and join them to the master node
|
156
|
+
for NODE in $WORKER_NODES; do
|
157
|
+
progress_message "Deploying Kubernetes on worker node ($NODE)..."
|
158
|
+
run_remote "$NODE" "
|
159
|
+
curl -sfL https://get.k3s.io | K3S_URL=https://$MASTER_ADDR:6443 K3S_TOKEN=$K3S_TOKEN sh -"
|
160
|
+
success_message "Kubernetes deployed on worker node ($NODE)."
|
161
|
+
|
162
|
+
# Check if worker node has a GPU
|
163
|
+
if check_gpu "$NODE"; then
|
164
|
+
echo -e "${YELLOW}GPU detected on worker node ($NODE).${NC}"
|
165
|
+
INSTALL_GPU=true
|
166
|
+
fi
|
167
|
+
done
|
168
|
+
# Step 3: Configure local kubectl to connect to the cluster
|
169
|
+
progress_message "Configuring local kubectl to connect to the cluster..."
|
170
|
+
scp -o StrictHostKeyChecking=no -i "$SSH_KEY" "$USER@$HEAD_NODE":~/.kube/config ~/.kube/config
|
171
|
+
|
172
|
+
# Back up the original kubeconfig file if it exists
|
173
|
+
KUBECONFIG_FILE="$HOME/.kube/config"
|
174
|
+
if [[ -f "$KUBECONFIG_FILE" ]]; then
|
175
|
+
echo "Backing up existing kubeconfig to $KUBECONFIG_FILE.bak"
|
176
|
+
cp "$KUBECONFIG_FILE" "$KUBECONFIG_FILE.bak"
|
177
|
+
fi
|
178
|
+
|
179
|
+
# Update kubeconfig for the local machine to use the master node's IP
|
180
|
+
# Temporary file to hold the modified kubeconfig
|
181
|
+
TEMP_FILE=$(mktemp)
|
182
|
+
|
183
|
+
# Remove the certificate-authority-data, and replace the server with the master address
|
184
|
+
awk '
|
185
|
+
BEGIN { in_cluster = 0 }
|
186
|
+
/^clusters:/ { in_cluster = 1 }
|
187
|
+
/^users:/ { in_cluster = 0 }
|
188
|
+
in_cluster && /^ *certificate-authority-data:/ { next }
|
189
|
+
in_cluster && /^ *server:/ {
|
190
|
+
print " server: https://'${HEAD_NODE}:6443'"
|
191
|
+
print " insecure-skip-tls-verify: true"
|
192
|
+
next
|
193
|
+
}
|
194
|
+
{ print }
|
195
|
+
' "$KUBECONFIG_FILE" > "$TEMP_FILE"
|
196
|
+
|
197
|
+
# Replace the original kubeconfig with the modified one
|
198
|
+
mv "$TEMP_FILE" "$KUBECONFIG_FILE"
|
199
|
+
|
200
|
+
success_message "kubectl configured to connect to the cluster."
|
201
|
+
|
202
|
+
echo "Cluster deployment completed. You can now run 'kubectl get nodes' to verify the setup."
|
203
|
+
|
204
|
+
# Install GPU operator if a GPU was detected on any node
|
205
|
+
if [ "$INSTALL_GPU" == "true" ]; then
|
206
|
+
echo -e "${YELLOW}GPU detected in the cluster. Installing Nvidia GPU Operator...${NC}"
|
207
|
+
run_remote "$HEAD_NODE" "
|
208
|
+
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 &&
|
209
|
+
chmod 700 get_helm.sh &&
|
210
|
+
./get_helm.sh &&
|
211
|
+
helm repo add nvidia https://helm.ngc.nvidia.com/nvidia && helm repo update &&
|
212
|
+
kubectl create namespace gpu-operator --kubeconfig ~/.kube/config || true &&
|
213
|
+
sudo ln -s /sbin/ldconfig /sbin/ldconfig.real || true &&
|
214
|
+
helm install gpu-operator -n gpu-operator --create-namespace nvidia/gpu-operator \
|
215
|
+
--set 'toolkit.env[0].name=CONTAINERD_CONFIG' \
|
216
|
+
--set 'toolkit.env[0].value=/var/lib/rancher/k3s/agent/etc/containerd/config.toml' \
|
217
|
+
--set 'toolkit.env[1].name=CONTAINERD_SOCKET' \
|
218
|
+
--set 'toolkit.env[1].value=/run/k3s/containerd/containerd.sock' \
|
219
|
+
--set 'toolkit.env[2].name=CONTAINERD_RUNTIME_CLASS' \
|
220
|
+
--set 'toolkit.env[2].value=nvidia' &&
|
221
|
+
echo 'Waiting for GPU operator installation...' &&
|
222
|
+
while ! kubectl describe nodes --kubeconfig ~/.kube/config | grep -q 'nvidia.com/gpu:'; do
|
223
|
+
echo 'Waiting for GPU operator...'
|
224
|
+
sleep 5
|
225
|
+
done
|
226
|
+
echo 'GPU operator installed successfully.'"
|
227
|
+
success_message "GPU Operator installed."
|
228
|
+
else
|
229
|
+
echo -e "${YELLOW}No GPUs detected. Skipping GPU Operator installation.${NC}"
|
230
|
+
fi
|
231
|
+
|
232
|
+
# Configure SkyPilot
|
233
|
+
progress_message "Configuring SkyPilot..."
|
234
|
+
sky check kubernetes
|
235
|
+
success_message "SkyPilot configured successfully."
|
236
|
+
|
237
|
+
# Display final success message
|
238
|
+
echo -e "${GREEN}==== 🎉 Kubernetes cluster deployment completed successfully 🎉 ====${NC}"
|
239
|
+
echo "You can now interact with your Kubernetes cluster through SkyPilot: "
|
240
|
+
echo " • List available GPUs: sky show-gpus --cloud kubernetes"
|
241
|
+
echo " • Launch a GPU development pod: sky launch -c devbox --cloud kubernetes --gpus A100:1"
|
242
|
+
echo " • Connect to pod with SSH: ssh devbox"
|
243
|
+
echo " • Connect to pod with VSCode: code --remote ssh-remote+devbox '/'"
|
sky/utils/log_utils.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
"""Logging utils."""
|
2
2
|
import enum
|
3
|
-
|
3
|
+
import types
|
4
|
+
from typing import List, Optional, Type
|
4
5
|
|
5
6
|
import colorama
|
6
7
|
import pendulum
|
@@ -15,13 +16,15 @@ logger = sky_logging.init_logger(__name__)
|
|
15
16
|
class LineProcessor(object):
|
16
17
|
"""A processor for log lines."""
|
17
18
|
|
18
|
-
def __enter__(self):
|
19
|
+
def __enter__(self) -> None:
|
19
20
|
pass
|
20
21
|
|
21
|
-
def process_line(self, log_line):
|
22
|
+
def process_line(self, log_line: str) -> None:
|
22
23
|
pass
|
23
24
|
|
24
|
-
def __exit__(self, except_type
|
25
|
+
def __exit__(self, except_type: Optional[Type[BaseException]],
|
26
|
+
except_value: Optional[BaseException],
|
27
|
+
traceback: Optional[types.TracebackType]) -> None:
|
25
28
|
del except_type, except_value, traceback # unused
|
26
29
|
pass
|
27
30
|
|
@@ -34,12 +37,12 @@ class RayUpLineProcessor(LineProcessor):
|
|
34
37
|
RUNTIME_SETUP = 1
|
35
38
|
PULLING_DOCKER_IMAGES = 2
|
36
39
|
|
37
|
-
def __enter__(self):
|
40
|
+
def __enter__(self) -> None:
|
38
41
|
self.state = self.ProvisionStatus.LAUNCH
|
39
42
|
self.status_display = rich_utils.safe_status('[bold cyan]Launching')
|
40
43
|
self.status_display.start()
|
41
44
|
|
42
|
-
def process_line(self, log_line):
|
45
|
+
def process_line(self, log_line: str) -> None:
|
43
46
|
if ('Success.' in log_line and
|
44
47
|
self.state == self.ProvisionStatus.LAUNCH):
|
45
48
|
logger.info(f'{colorama.Fore.GREEN}Head node is up.'
|
@@ -60,7 +63,9 @@ class RayUpLineProcessor(LineProcessor):
|
|
60
63
|
'[bold cyan]Launching - Preparing SkyPilot runtime')
|
61
64
|
self.state = self.ProvisionStatus.RUNTIME_SETUP
|
62
65
|
|
63
|
-
def __exit__(self, except_type
|
66
|
+
def __exit__(self, except_type: Optional[Type[BaseException]],
|
67
|
+
except_value: Optional[BaseException],
|
68
|
+
traceback: Optional[types.TracebackType]) -> None:
|
64
69
|
del except_type, except_value, traceback # unused
|
65
70
|
self.status_display.stop()
|
66
71
|
|
@@ -68,13 +73,13 @@ class RayUpLineProcessor(LineProcessor):
|
|
68
73
|
class SkyLocalUpLineProcessor(LineProcessor):
|
69
74
|
"""A processor for `sky local up` log lines."""
|
70
75
|
|
71
|
-
def __enter__(self):
|
76
|
+
def __enter__(self) -> None:
|
72
77
|
status = rich_utils.safe_status('[bold cyan]Creating local cluster - '
|
73
78
|
'initializing Kubernetes')
|
74
79
|
self.status_display = status
|
75
80
|
self.status_display.start()
|
76
81
|
|
77
|
-
def process_line(self, log_line):
|
82
|
+
def process_line(self, log_line: str) -> None:
|
78
83
|
if 'Kind cluster created.' in log_line:
|
79
84
|
logger.info(f'{colorama.Fore.GREEN}Kubernetes is running.'
|
80
85
|
f'{colorama.Style.RESET_ALL}')
|
@@ -124,7 +129,80 @@ class SkyLocalUpLineProcessor(LineProcessor):
|
|
124
129
|
f'{colorama.Fore.GREEN}Nginx Ingress Controller installed.'
|
125
130
|
f'{colorama.Style.RESET_ALL}')
|
126
131
|
|
127
|
-
def __exit__(self, except_type
|
132
|
+
def __exit__(self, except_type: Optional[Type[BaseException]],
|
133
|
+
except_value: Optional[BaseException],
|
134
|
+
traceback: Optional[types.TracebackType]) -> None:
|
135
|
+
del except_type, except_value, traceback # unused
|
136
|
+
self.status_display.stop()
|
137
|
+
|
138
|
+
|
139
|
+
class SkyRemoteUpLineProcessor(LineProcessor):
|
140
|
+
"""A processor for deploy_remote_cluster.sh log lines."""
|
141
|
+
|
142
|
+
def __enter__(self) -> None:
|
143
|
+
status = rich_utils.safe_status('[bold cyan]Creating remote cluster')
|
144
|
+
self.status_display = status
|
145
|
+
self.status_display.start()
|
146
|
+
|
147
|
+
def process_line(self, log_line: str) -> None:
|
148
|
+
# Pre-flight checks
|
149
|
+
if 'SSH connection successful' in log_line:
|
150
|
+
logger.info(f'{colorama.Fore.GREEN}SSH connection established.'
|
151
|
+
f'{colorama.Style.RESET_ALL}')
|
152
|
+
|
153
|
+
# Kubernetes installation steps
|
154
|
+
if 'Deploying Kubernetes on head node' in log_line:
|
155
|
+
self.status_display.update('[bold cyan]Creating remote cluster - '
|
156
|
+
'deploying Kubernetes on head node')
|
157
|
+
if 'K3s deployed on head node.' in log_line:
|
158
|
+
logger.info(f'{colorama.Fore.GREEN}'
|
159
|
+
'✔ K3s successfully deployed on head node.'
|
160
|
+
f'{colorama.Style.RESET_ALL}')
|
161
|
+
|
162
|
+
# Worker nodes
|
163
|
+
if 'Deploying Kubernetes on worker node' in log_line:
|
164
|
+
self.status_display.update('[bold cyan]Creating remote cluster - '
|
165
|
+
'deploying Kubernetes on worker nodes')
|
166
|
+
if 'Kubernetes deployed on worker node' in log_line:
|
167
|
+
logger.info(f'{colorama.Fore.GREEN}'
|
168
|
+
'✔ K3s successfully deployed on worker node.'
|
169
|
+
f'{colorama.Style.RESET_ALL}')
|
170
|
+
|
171
|
+
# Cluster configuration
|
172
|
+
if 'Configuring local kubectl to connect to the cluster...' in log_line:
|
173
|
+
self.status_display.update('[bold cyan]Creating remote cluster - '
|
174
|
+
'configuring local kubectl')
|
175
|
+
if 'kubectl configured to connect to the cluster.' in log_line:
|
176
|
+
logger.info(f'{colorama.Fore.GREEN}'
|
177
|
+
'✔ kubectl configured for the remote cluster.'
|
178
|
+
f'{colorama.Style.RESET_ALL}')
|
179
|
+
|
180
|
+
# GPU operator installation
|
181
|
+
if 'Installing Nvidia GPU Operator...' in log_line:
|
182
|
+
self.status_display.update('[bold cyan]Creating remote cluster - '
|
183
|
+
'installing Nvidia GPU Operator')
|
184
|
+
if 'GPU Operator installed.' in log_line:
|
185
|
+
logger.info(f'{colorama.Fore.GREEN}'
|
186
|
+
'✔ Nvidia GPU Operator installed successfully.'
|
187
|
+
f'{colorama.Style.RESET_ALL}')
|
188
|
+
|
189
|
+
# Cleanup steps
|
190
|
+
if 'Cleaning up head node' in log_line:
|
191
|
+
self.status_display.update('[bold cyan]Cleaning up head node')
|
192
|
+
if 'Cleaning up node' in log_line:
|
193
|
+
self.status_display.update('[bold cyan]Cleaning up worker node')
|
194
|
+
if 'cleaned up successfully' in log_line:
|
195
|
+
logger.info(f'{colorama.Fore.GREEN}'
|
196
|
+
f'{log_line.strip()}{colorama.Style.RESET_ALL}')
|
197
|
+
|
198
|
+
# Final status
|
199
|
+
if 'Cluster deployment completed.' in log_line:
|
200
|
+
logger.info(f'{colorama.Fore.GREEN}✔ Remote k3s is running.'
|
201
|
+
f'{colorama.Style.RESET_ALL}')
|
202
|
+
|
203
|
+
def __exit__(self, except_type: Optional[Type[BaseException]],
|
204
|
+
except_value: Optional[BaseException],
|
205
|
+
traceback: Optional[types.TracebackType]) -> None:
|
128
206
|
del except_type, except_value, traceback # unused
|
129
207
|
self.status_display.stop()
|
130
208
|
|
sky/utils/schemas.py
CHANGED
@@ -775,6 +775,12 @@ def get_config_schema():
|
|
775
775
|
'required': [],
|
776
776
|
'additionalProperties': False,
|
777
777
|
'properties': {
|
778
|
+
'allowed_contexts': {
|
779
|
+
'type': 'array',
|
780
|
+
'items': {
|
781
|
+
'type': 'string',
|
782
|
+
},
|
783
|
+
},
|
778
784
|
'networking': {
|
779
785
|
'type': 'string',
|
780
786
|
'case_insensitive_enum': [
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: skypilot-nightly
|
3
|
-
Version: 1.0.0.
|
3
|
+
Version: 1.0.0.dev20240928
|
4
4
|
Summary: SkyPilot: An intercloud broker for the clouds
|
5
5
|
Author: SkyPilot Team
|
6
6
|
License: Apache 2.0
|
@@ -153,27 +153,27 @@ Requires-Dist: pyvmomi==8.0.1.0.2; extra == "vsphere"
|
|
153
153
|
|
154
154
|
----
|
155
155
|
:fire: *News* :fire:
|
156
|
-
- [Sep, 2024] Point,
|
157
|
-
- [Sep, 2024] Run and deploy [Pixtral](./llm/pixtral), the first open-source multimodal model from Mistral AI.
|
158
|
-
- [Jul, 2024] [Finetune](./llm/llama-3_1-finetuning/) and [serve](./llm/llama-3_1/) **Llama 3.1** on your infra
|
156
|
+
- [Sep, 2024] Point, Launch and Serve **Llama 3.2** on on Kubernetes or Any Cloud: [**example**](./llm/llama-3_2/)
|
157
|
+
- [Sep, 2024] Run and deploy [**Pixtral**](./llm/pixtral), the first open-source multimodal model from Mistral AI.
|
158
|
+
- [Jul, 2024] [**Finetune**](./llm/llama-3_1-finetuning/) and [**serve**](./llm/llama-3_1/) **Llama 3.1** on your infra
|
159
159
|
- [Jun, 2024] Reproduce **GPT** with [llm.c](https://github.com/karpathy/llm.c/discussions/481) on any cloud: [**guide**](./llm/gpt-2/)
|
160
|
-
- [Apr, 2024] Serve
|
161
|
-
- [Apr, 2024] Using
|
162
|
-
- [Feb, 2024] Deploying and scaling
|
163
|
-
- [Feb, 2024] Serving
|
164
|
-
- [Dec, 2023]
|
165
|
-
- [Nov, 2023] Using
|
166
|
-
- [Sep, 2023] Case study: [**Covariant**](https://covariant.ai/) transformed AI development on the cloud using SkyPilot, delivering models 4x faster cost-effectively: [**read the case study**](https://blog.skypilot.co/covariant/)
|
167
|
-
- [Aug, 2023] **Finetuning Cookbook**: Finetuning Llama 2 in your own cloud environment, privately: [**example**](./llm/vicuna-llama-2/), [**blog post**](https://blog.skypilot.co/finetuning-llama2-operational-guide/)
|
160
|
+
- [Apr, 2024] Serve **Qwen-110B** on your infra: [**example**](./llm/qwen/)
|
161
|
+
- [Apr, 2024] Using **Ollama** to deploy quantized LLMs on CPUs and GPUs: [**example**](./llm/ollama/)
|
162
|
+
- [Feb, 2024] Deploying and scaling **Gemma** with SkyServe: [**example**](./llm/gemma/)
|
163
|
+
- [Feb, 2024] Serving **Code Llama 70B** with vLLM and SkyServe: [**example**](./llm/codellama/)
|
164
|
+
- [Dec, 2023] **Mixtral 8x7B**, a high quality sparse mixture-of-experts model, was released by Mistral AI! Deploy via SkyPilot on any cloud: [**example**](./llm/mixtral/)
|
165
|
+
- [Nov, 2023] Using **Axolotl** to finetune Mistral 7B on the cloud (on-demand and spot): [**example**](./llm/axolotl/)
|
168
166
|
|
169
167
|
<details>
|
170
168
|
<summary>Archived</summary>
|
171
|
-
|
169
|
+
|
172
170
|
- [Apr, 2024] Serve and finetune [**Llama 3**](https://skypilot.readthedocs.io/en/latest/gallery/llms/llama-3.html) on any cloud or Kubernetes: [**example**](./llm/llama-3/)
|
173
171
|
- [Mar, 2024] Serve and deploy [**Databricks DBRX**](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm) on your infra: [**example**](./llm/dbrx/)
|
174
172
|
- [Feb, 2024] Speed up your LLM deployments with [**SGLang**](https://github.com/sgl-project/sglang) for 5x throughput on SkyServe: [**example**](./llm/sglang/)
|
175
173
|
- [Dec, 2023] Using [**LoRAX**](https://github.com/predibase/lorax) to serve 1000s of finetuned LLMs on a single instance in the cloud: [**example**](./llm/lorax/)
|
176
174
|
- [Sep, 2023] [**Mistral 7B**](https://mistral.ai/news/announcing-mistral-7b/), a high-quality open LLM, was released! Deploy via SkyPilot on any cloud: [**Mistral docs**](https://docs.mistral.ai/self-deployment/skypilot)
|
175
|
+
- [Sep, 2023] Case study: [**Covariant**](https://covariant.ai/) transformed AI development on the cloud using SkyPilot, delivering models 4x faster cost-effectively: [**read the case study**](https://blog.skypilot.co/covariant/)
|
176
|
+
- [Aug, 2023] **Finetuning Cookbook**: Finetuning Llama 2 in your own cloud environment, privately: [**example**](./llm/vicuna-llama-2/), [**blog post**](https://blog.skypilot.co/finetuning-llama2-operational-guide/)
|
177
177
|
- [July, 2023] Self-Hosted **Llama-2 Chatbot** on Any Cloud: [**example**](./llm/llama-2/)
|
178
178
|
- [June, 2023] Serving LLM 24x Faster On the Cloud [**with vLLM**](https://vllm.ai/) and SkyPilot: [**example**](./llm/vllm/), [**blog post**](https://blog.skypilot.co/serving-llm-24x-faster-on-the-cloud-with-vllm-and-skypilot/)
|
179
179
|
- [April, 2023] [SkyPilot YAMLs](./llm/vicuna/) for finetuning & serving the [Vicuna LLM](https://lmsys.org/blog/2023-03-30-vicuna/) with a single command!
|
@@ -277,11 +277,12 @@ SkyPilot then performs the heavy-lifting for you, including:
|
|
277
277
|
Refer to [Quickstart](https://skypilot.readthedocs.io/en/latest/getting-started/quickstart.html) to get started with SkyPilot.
|
278
278
|
|
279
279
|
## More Information
|
280
|
-
To learn more, see our [
|
280
|
+
To learn more, see our [documentation](https://skypilot.readthedocs.io/en/latest/), [blog](https://blog.skypilot.co/), and [community integrations](https://blog.skypilot.co/community/).
|
281
281
|
|
282
282
|
<!-- Keep this section in sync with index.rst in SkyPilot Docs -->
|
283
283
|
Runnable examples:
|
284
284
|
- LLMs on SkyPilot
|
285
|
+
- [Llama 3.2: lightweight and vision models](./llm/llama-3_2/)
|
285
286
|
- [Pixtral](./llm/pixtral/)
|
286
287
|
- [Llama 3.1 finetuning](./llm/llama-3_1-finetuning/) and [serving](./llm/llama-3_1/)
|
287
288
|
- [GPT-2 via `llm.c`](./llm/gpt-2/)
|
@@ -327,4 +328,4 @@ We are excited to hear your feedback!
|
|
327
328
|
For general discussions, join us on the [SkyPilot Slack](http://slack.skypilot.co).
|
328
329
|
|
329
330
|
## Contributing
|
330
|
-
We welcome
|
331
|
+
We welcome all contributions to the project! See [CONTRIBUTING](CONTRIBUTING.md) for how to get involved.
|
{skypilot_nightly-1.0.0.dev20240926.dist-info → skypilot_nightly-1.0.0.dev20240928.dist-info}/RECORD
RENAMED
@@ -1,8 +1,8 @@
|
|
1
|
-
sky/__init__.py,sha256=
|
1
|
+
sky/__init__.py,sha256=8BEk3x0IPkFli8tjp7axkkM5mwQ1GuCABWwTMppkPcc,5854
|
2
2
|
sky/admin_policy.py,sha256=hPo02f_A32gCqhUueF0QYy1fMSSKqRwYEg_9FxScN_s,3248
|
3
|
-
sky/authentication.py,sha256=
|
3
|
+
sky/authentication.py,sha256=o8ZhUf4VSN8WtjWcUUGYg-HVskaqaoMK4ZobHC-HVYU,20697
|
4
4
|
sky/check.py,sha256=jLMIIJrseaZj1_o5WkbaD9XdyXIlCaT6pyAaIFdhdmA,9079
|
5
|
-
sky/cli.py,sha256=
|
5
|
+
sky/cli.py,sha256=9h4yO8p962960qUjvQ-xSusrtdh8TXNNQ1sfV0OqgZc,206262
|
6
6
|
sky/cloud_stores.py,sha256=RjFgmRhUh1Kk__f6g3KxzLp9s7dA0pFK4W1AukEuUaw,21153
|
7
7
|
sky/core.py,sha256=YF_6kwj8Ja171Oycb8L25SZ7V_ylZYovFS_jpnjwGo0,34408
|
8
8
|
sky/dag.py,sha256=WLFWr5hfrwjd31uYlNvI-zWUk7tLaT_gzJn4LzbVtkE,2780
|
@@ -24,14 +24,14 @@ sky/adaptors/cudo.py,sha256=WGvIQrlzJkGDe02Ve7pygA56tHwUc4kwS3XHW8kMFAA,239
|
|
24
24
|
sky/adaptors/docker.py,sha256=_kzpZ0fkWHqqQAVVl0llTsCE31KYz3Sjn8psTBQHVkA,468
|
25
25
|
sky/adaptors/gcp.py,sha256=OQ9RaqjR0r0iaWYpjvEtIx5vnEhyB4LhUCwbtdxsmVk,3115
|
26
26
|
sky/adaptors/ibm.py,sha256=H87vD6izq_wQI8oQC7cx9iVtRgPi_QkAcrfa1Z3PNqU,4906
|
27
|
-
sky/adaptors/kubernetes.py,sha256=
|
27
|
+
sky/adaptors/kubernetes.py,sha256=3DOZkz7NvinNLXJYpU1i8tULCK2vNPmsSF7DMWDdPKc,5508
|
28
28
|
sky/adaptors/oci.py,sha256=n_zcrippTZRbTIhN3euD5sqNYn43G397zMavaJyEYbk,1480
|
29
29
|
sky/adaptors/runpod.py,sha256=4Nt_BfZhJAKQNA3wO8cxvvNI8x4NsDGHu_4EhRDlGYQ,225
|
30
30
|
sky/adaptors/vsphere.py,sha256=zJP9SeObEoLrpgHW2VHvZE48EhgVf8GfAEIwBeaDMfM,2129
|
31
31
|
sky/backends/__init__.py,sha256=UDjwbUgpTRApbPJnNfR786GadUuwgRk3vsWoVu5RB_c,536
|
32
32
|
sky/backends/backend.py,sha256=xtxR6boDv1o-uSCjbJhOMkKMnZvBZh3gExx4khFWPTI,5932
|
33
|
-
sky/backends/backend_utils.py,sha256=
|
34
|
-
sky/backends/cloud_vm_ray_backend.py,sha256=
|
33
|
+
sky/backends/backend_utils.py,sha256=9Cjj2badyQbx-53F4EdsDW4yG-dk77DVnwBld2Ys9E8,126623
|
34
|
+
sky/backends/cloud_vm_ray_backend.py,sha256=NtmHtteEdkI1P2GRdFbNLPsDpCEx5GshPLVn1wRNYhw,233984
|
35
35
|
sky/backends/docker_utils.py,sha256=Hyw1YY20EyghhEbYx6O2FIMDcGkNzBzV9TM7LFynei8,8358
|
36
36
|
sky/backends/local_docker_backend.py,sha256=H4GBo0KFUC_EEf-ziv1OUbfAkOI5BrwkYs9fYOxSoNw,16741
|
37
37
|
sky/backends/wheel_utils.py,sha256=3QS4T_Ydvo4DbYhogtyADyNBEf04I6jUCL71M285shQ,7963
|
@@ -48,9 +48,9 @@ sky/clouds/cudo.py,sha256=H4VyMo5wWGAv2MXZ3xsbWjlZA_cZYnt4ecNlTOOao8Y,13147
|
|
48
48
|
sky/clouds/fluidstack.py,sha256=iOmoOx52yTrHKMzwBDaxFJCfNo79M61d5tj-Np24Lyc,12436
|
49
49
|
sky/clouds/gcp.py,sha256=CrSsaSXd83tM78foKH9viBfW1cQsjve3aUQbshsqvDg,54033
|
50
50
|
sky/clouds/ibm.py,sha256=M8QdjeSFlwssfoY2aOodxG4q5R3eT9K-4lTPDHYvEYI,21476
|
51
|
-
sky/clouds/kubernetes.py,sha256=
|
51
|
+
sky/clouds/kubernetes.py,sha256=DyGkJusl5YMy_sIeogSKAcJ8XaUgxUx7Gc90dRi2bZU,27251
|
52
52
|
sky/clouds/lambda_cloud.py,sha256=2Al3qCSl-I4iTi7pPPNXcbaLyVfCUgTl__vYBunLB6k,12439
|
53
|
-
sky/clouds/oci.py,sha256=
|
53
|
+
sky/clouds/oci.py,sha256=ozVEa-9IkfI-RxyXDs_aLG5G0toLBRdtwUtaU-y7bH4,26382
|
54
54
|
sky/clouds/paperspace.py,sha256=lmUZPYAblaqiBmGQwCunccMiTF_dVA1o3vqY9Q_Nc28,10921
|
55
55
|
sky/clouds/runpod.py,sha256=lstUC6f4JDhtcH9NfwkbpCJMmfmvMigoanhPXPbTYds,11540
|
56
56
|
sky/clouds/scp.py,sha256=2KLTuNSMdBzK8CLwSesv7efOuiLidIMoyNG4AOt5Sqw,15870
|
@@ -65,7 +65,7 @@ sky/clouds/service_catalog/cudo_catalog.py,sha256=WPLIQ9NVvuKAS48KYujV6dQsyeppIc
|
|
65
65
|
sky/clouds/service_catalog/fluidstack_catalog.py,sha256=c8MMTldG-q97MJ0zJymudQiOVQC_rxS7vqrZgLrgbQA,5038
|
66
66
|
sky/clouds/service_catalog/gcp_catalog.py,sha256=MHWq_-jqm68oNpK1i8AlJIGBkSKT-P6xX7DkpvqvpHU,24323
|
67
67
|
sky/clouds/service_catalog/ibm_catalog.py,sha256=0dzjmXABFECzaAuIa0E6pVINhVK6-G6U52Mj-L45gK8,4472
|
68
|
-
sky/clouds/service_catalog/kubernetes_catalog.py,sha256=
|
68
|
+
sky/clouds/service_catalog/kubernetes_catalog.py,sha256=6OocEUkgyJtBgHwzu4RPsvru6pj6RwGU-4uSFNQmsSM,8254
|
69
69
|
sky/clouds/service_catalog/lambda_catalog.py,sha256=BAhUGqHj8aVe1zUhEQNO7bQUhcd9jAespGvPyQubTJY,5281
|
70
70
|
sky/clouds/service_catalog/oci_catalog.py,sha256=tcV8_rsv_7_aTlcfTkq0XKdKRTFgwh8-rjyxVzPiYwQ,7744
|
71
71
|
sky/clouds/service_catalog/paperspace_catalog.py,sha256=W8GgGlPbbWViELQ8EZfmIkxSbeQcCmMRUX4ecIIYDsk,3768
|
@@ -110,7 +110,7 @@ sky/provision/docker_utils.py,sha256=Z7vDUs9Yjqks_CsWrACcTgABIZuFi3EJVFwkU0WsdD0
|
|
110
110
|
sky/provision/instance_setup.py,sha256=n1Px_KOYZl7Rf1WLXrfTTHyqxyA8_5QTN9BNLjQRkgc,22427
|
111
111
|
sky/provision/logging.py,sha256=yZWgejrFBhhRjAtvFu5N5bRXIMK5TuwNjp1vKQqz2pw,2103
|
112
112
|
sky/provision/metadata_utils.py,sha256=LrxeV4wD2QPzNdXV_npj8q-pr35FatxBBjF_jSbpOT0,4013
|
113
|
-
sky/provision/provisioner.py,sha256=
|
113
|
+
sky/provision/provisioner.py,sha256=atW6P0XgBL_qxChp0klwgj6j2XE7Q9_EevPrFSJcoJc,25240
|
114
114
|
sky/provision/aws/__init__.py,sha256=mxq8PeWJqUtalDozTNpbtENErRZ1ktEs8uf2aG9UUgU,731
|
115
115
|
sky/provision/aws/config.py,sha256=GfI6l5-ewhm2SS2XBnhVIFZCEueBkLgpvkf16xlSlG0,23017
|
116
116
|
sky/provision/aws/instance.py,sha256=eCslJ2XfJo_pkQMnKFQqhGnUIRvwKiT12oxBY5-klss,40750
|
@@ -139,9 +139,9 @@ sky/provision/gcp/mig_utils.py,sha256=oFpcFZoapHMILSE4iIm8V5bxP1RhbMHRF7cciqq8qA
|
|
139
139
|
sky/provision/kubernetes/__init__.py,sha256=y6yVfii81WYG3ROxv4hiIj-ydinS5-xGxLvXnARVQoI,719
|
140
140
|
sky/provision/kubernetes/config.py,sha256=gC1FeW-cyeebphY6sq2BGVF8QKZujUKyH7qe9TAAoPM,29024
|
141
141
|
sky/provision/kubernetes/instance.py,sha256=YdcZ2vhxJPXzT1D8FuCIUyjdkK6VjsG4_qm3dDbygGw,38204
|
142
|
-
sky/provision/kubernetes/network.py,sha256=
|
143
|
-
sky/provision/kubernetes/network_utils.py,sha256=
|
144
|
-
sky/provision/kubernetes/utils.py,sha256=
|
142
|
+
sky/provision/kubernetes/network.py,sha256=EpNjRQ131CXepqbdkoRKFu4szVrm0oKEpv1l8EgOkjU,12364
|
143
|
+
sky/provision/kubernetes/network_utils.py,sha256=AZ8dkVyRHxdbJ8Lm_zXYc2y9a3O9PJWLL1PH8IjwAW8,11314
|
144
|
+
sky/provision/kubernetes/utils.py,sha256=JO54OXmalzjXkiE6VgirTJoJ6s-3uBH1zt0rBqPz_Yk,82014
|
145
145
|
sky/provision/kubernetes/manifests/smarter-device-manager-configmap.yaml,sha256=AMzYzlY0JIlfBWj5eX054Rc1XDW2thUcLSOGMJVhIdA,229
|
146
146
|
sky/provision/kubernetes/manifests/smarter-device-manager-daemonset.yaml,sha256=RtTq4F1QUmR2Uunb6zuuRaPhV7hpesz4saHjn3Ncsb4,2010
|
147
147
|
sky/provision/paperspace/__init__.py,sha256=1nbUPWio7UA5gCQkO_rfEDfgXT17u5OtuByxQx4Ez6g,598
|
@@ -227,7 +227,7 @@ sky/templates/jobs-controller.yaml.j2,sha256=Gu3ogFxFYr09VEXP-6zEbrCUOFo1aYxWEjA
|
|
227
227
|
sky/templates/kubernetes-ingress.yml.j2,sha256=73iDklVDWBMbItg0IexCa6_ClXPJOxw7PWz3leku4nE,1340
|
228
228
|
sky/templates/kubernetes-loadbalancer.yml.j2,sha256=IxrNYM366N01bbkJEbZ_UPYxUP8wyVEbRNFHRsBuLsw,626
|
229
229
|
sky/templates/kubernetes-port-forward-proxy-command.sh,sha256=HlG7CPBBedCVBlL9qv0erW_eKm6Irj0LFyaAWuJW_lc,3148
|
230
|
-
sky/templates/kubernetes-ray.yml.j2,sha256=
|
230
|
+
sky/templates/kubernetes-ray.yml.j2,sha256=Wq9luXc6-t141uyHbtOy1IDmLMM0PBbePTZfZEtAKw0,18160
|
231
231
|
sky/templates/kubernetes-ssh-jump.yml.j2,sha256=k5W5sOIMppU7dDkJMwPlqsUcb92y7L5_TVG3hkgMy8M,2747
|
232
232
|
sky/templates/lambda-ray.yml.j2,sha256=UrYOUh4EliPlWcfQWPZzQSiIIYSoRloujV2xsZejYPM,5786
|
233
233
|
sky/templates/local-ray.yml.j2,sha256=FNHeyHF6nW9nU9QLIZceUWfvrFTTcO51KqhTnYCEFaA,1185
|
@@ -244,7 +244,7 @@ sky/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
244
244
|
sky/utils/accelerator_registry.py,sha256=BO4iYH5bV80Xyp4EPfO0n1D3LL0FvESCy7xm59Je3_o,3798
|
245
245
|
sky/utils/admin_policy_utils.py,sha256=zFCu1OFIrZRfQNY0JFRO1502WFfdqZhwAU_QgM4fO9U,5943
|
246
246
|
sky/utils/cluster_yaml_utils.py,sha256=1wRRYqI1kI-eFs1pMW4r_FFjHJ0zamq6v2RRI-Gtx5E,849
|
247
|
-
sky/utils/command_runner.py,sha256=
|
247
|
+
sky/utils/command_runner.py,sha256=4A7IbhyAiHHeYA85MulyRqldkQyDmc4BhRhjbfVlwE4,33850
|
248
248
|
sky/utils/command_runner.pyi,sha256=1khh14BhdOpMxvk9Ydnd3OFdas5Nha6dSOzy5xLBUU4,7710
|
249
249
|
sky/utils/common_utils.py,sha256=O6PlZTCNhbuXOzjuV2DKw43niWE_qPfYZNGhnMtZzQg,24028
|
250
250
|
sky/utils/controller_utils.py,sha256=VtdjKH9u1kWwUOMzPUxuLpT-XXQ2gCLCLOldB-vdh_8,37483
|
@@ -252,10 +252,10 @@ sky/utils/dag_utils.py,sha256=gjGZiJj4_GYsraXX67e6ElvbmOByJcyjSfvVgYZiXvs,5588
|
|
252
252
|
sky/utils/db_utils.py,sha256=AOvMmBEN9cF4I7CoXihPCtus4mU2VDGjBQSVMMgzKlA,2786
|
253
253
|
sky/utils/env_options.py,sha256=1VXyd3bhiUgGfCpmmTqM9PagRo1ILBH4-pzIxmIeE6E,861
|
254
254
|
sky/utils/kubernetes_enums.py,sha256=imGqHSa8O07zD_6xH1SDMM7dBU5lF5fzFFlQuQy00QM,1384
|
255
|
-
sky/utils/log_utils.py,sha256=
|
255
|
+
sky/utils/log_utils.py,sha256=yVu3etgKhiVYX8UG-JFPWZujxWBT4kwxZ5oAPIdjtGs,12054
|
256
256
|
sky/utils/resources_utils.py,sha256=snByBxgx3Hnjfch2uysdAA3D-OAwrnuzTDHug36s5H4,6515
|
257
257
|
sky/utils/rich_utils.py,sha256=5ZVhzlFx-nhqMXwv00eO9xC4rz7ibDlfD2lmGhZrJEY,1581
|
258
|
-
sky/utils/schemas.py,sha256=
|
258
|
+
sky/utils/schemas.py,sha256=QT0Fxri2o0SiWkky1DlZhA1dzQRQoB5OdVaej0wJvhc,28787
|
259
259
|
sky/utils/subprocess_utils.py,sha256=zK0L3mvAkvKX1nFFI4IFEmRWX9ytpguhhxOTYUDKoDs,6507
|
260
260
|
sky/utils/timeline.py,sha256=ao_nm0y52ZQILfL7Y92c3pSEFRyPm_ElORC3DrI5BwQ,3936
|
261
261
|
sky/utils/ux_utils.py,sha256=318TRunQCyJpJXonfiJ1SVotNA-6K4F2XgMEYjvWvsk,3264
|
@@ -265,6 +265,7 @@ sky/utils/cli_utils/status_utils.py,sha256=9odkfXiXLMD14XJsqve6sGvHpe7ThHXpC6ic9
|
|
265
265
|
sky/utils/kubernetes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
266
266
|
sky/utils/kubernetes/create_cluster.sh,sha256=rv5Lz6AR00yBJMRyScfMSQiGKptMhtHWRsvyG20-u9c,7764
|
267
267
|
sky/utils/kubernetes/delete_cluster.sh,sha256=BSccHF43GyepDNf-FZcenzHzpXXATkVD92vgn1lWPgk,927
|
268
|
+
sky/utils/kubernetes/deploy_remote_cluster.sh,sha256=vGj0mD0tejHDRy8ulwKOvOF2mfLyT5J8fp7GVqEe_EY,8478
|
268
269
|
sky/utils/kubernetes/generate_kind_config.py,sha256=_TNLnifA_r7-CRq083IP1xjelYqiLjzQX9ohuqYpDH8,3187
|
269
270
|
sky/utils/kubernetes/generate_kubeconfig.sh,sha256=AcYhuuG5jXWGHUmyRuH-oKy5qcn92gXhu6bXOt6eD6g,9274
|
270
271
|
sky/utils/kubernetes/gpu_labeler.py,sha256=MEUv0U4ACDcNwtFVltlv017XJMjxx1Bndf6fL0i6eqg,6960
|
@@ -272,9 +273,9 @@ sky/utils/kubernetes/k8s_gpu_labeler_job.yaml,sha256=KPqp23B-zQ2SZK03jdHeF9fLTog
|
|
272
273
|
sky/utils/kubernetes/k8s_gpu_labeler_setup.yaml,sha256=VLKT2KKimZu1GDg_4AIlIt488oMQvhRZWwsj9vBbPUg,3812
|
273
274
|
sky/utils/kubernetes/rsync_helper.sh,sha256=Ma-N9a271fTfdgP5-8XIQL7KPf8IPUo-uY004PCdUFo,747
|
274
275
|
sky/utils/kubernetes/ssh_jump_lifecycle_manager.py,sha256=RFLJ3k7MR5UN4SKHykQ0lV9SgXumoULpKYIAt1vh-HU,6560
|
275
|
-
skypilot_nightly-1.0.0.
|
276
|
-
skypilot_nightly-1.0.0.
|
277
|
-
skypilot_nightly-1.0.0.
|
278
|
-
skypilot_nightly-1.0.0.
|
279
|
-
skypilot_nightly-1.0.0.
|
280
|
-
skypilot_nightly-1.0.0.
|
276
|
+
skypilot_nightly-1.0.0.dev20240928.dist-info/LICENSE,sha256=emRJAvE7ngL6x0RhQvlns5wJzGI3NEQ_WMjNmd9TZc4,12170
|
277
|
+
skypilot_nightly-1.0.0.dev20240928.dist-info/METADATA,sha256=AT9cnsY7Uj7BK0COu8mOXiCtfyCFrjtk7OBQvqx-_Nk,18948
|
278
|
+
skypilot_nightly-1.0.0.dev20240928.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
279
|
+
skypilot_nightly-1.0.0.dev20240928.dist-info/entry_points.txt,sha256=StA6HYpuHj-Y61L2Ze-hK2IcLWgLZcML5gJu8cs6nU4,36
|
280
|
+
skypilot_nightly-1.0.0.dev20240928.dist-info/top_level.txt,sha256=qA8QuiNNb6Y1OF-pCUtPEr6sLEwy2xJX06Bd_CrtrHY,4
|
281
|
+
skypilot_nightly-1.0.0.dev20240928.dist-info/RECORD,,
|
File without changes
|
{skypilot_nightly-1.0.0.dev20240926.dist-info → skypilot_nightly-1.0.0.dev20240928.dist-info}/WHEEL
RENAMED
File without changes
|
File without changes
|
File without changes
|