@computesdk/modal 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +587 -0
- package/dist/index.d.mts +24 -0
- package/dist/index.d.ts +24 -0
- package/dist/index.js +359 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +334 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +62 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 computesdk
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,587 @@
|
|
|
1
|
+
# @computesdk/modal
|
|
2
|
+
|
|
3
|
+
Modal provider for ComputeSDK - Execute code in serverless Modal sandboxes with full Python support and GPU acceleration.
|
|
4
|
+
|
|
5
|
+
> **✅ Full Implementation:** This provider uses Modal's official JavaScript SDK (v0.3.16) with complete real API integration. All code execution, filesystem operations, and command execution are implemented using actual Modal Sandbox APIs.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
npm install @computesdk/modal
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Setup
|
|
14
|
+
|
|
15
|
+
1. Get your Modal API credentials from [modal.com](https://modal.com/)
|
|
16
|
+
2. Set the environment variables:
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
export MODAL_TOKEN_ID=your_token_id_here
|
|
20
|
+
export MODAL_TOKEN_SECRET=your_token_secret_here
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Usage
|
|
24
|
+
|
|
25
|
+
### With ComputeSDK
|
|
26
|
+
|
|
27
|
+
```typescript
|
|
28
|
+
import { compute } from 'computesdk';
|
|
29
|
+
import { modal } from '@computesdk/modal';
|
|
30
|
+
|
|
31
|
+
// Set as default provider
|
|
32
|
+
compute.setConfig({
|
|
33
|
+
provider: modal({
|
|
34
|
+
tokenId: process.env.MODAL_TOKEN_ID,
|
|
35
|
+
tokenSecret: process.env.MODAL_TOKEN_SECRET
|
|
36
|
+
})
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
// Create sandbox
|
|
40
|
+
const sandbox = await compute.sandbox.create({});
|
|
41
|
+
|
|
42
|
+
// Execute Python code with GPU acceleration
|
|
43
|
+
const result = await sandbox.runCode(`
|
|
44
|
+
import torch
|
|
45
|
+
import numpy as np
|
|
46
|
+
|
|
47
|
+
# Check if CUDA is available
|
|
48
|
+
print(f"CUDA available: {torch.cuda.is_available()}")
|
|
49
|
+
if torch.cuda.is_available():
|
|
50
|
+
print(f"GPU device: {torch.cuda.get_device_name()}")
|
|
51
|
+
|
|
52
|
+
# Create tensor operations
|
|
53
|
+
x = torch.randn(1000, 1000)
|
|
54
|
+
if torch.cuda.is_available():
|
|
55
|
+
x = x.cuda()
|
|
56
|
+
|
|
57
|
+
y = torch.matmul(x, x.T)
|
|
58
|
+
print(f"Result shape: {y.shape}")
|
|
59
|
+
print(f"Mean: {y.mean().item():.4f}")
|
|
60
|
+
`);
|
|
61
|
+
|
|
62
|
+
console.log(result.stdout);
|
|
63
|
+
|
|
64
|
+
// Clean up
|
|
65
|
+
await compute.sandbox.destroy(sandbox.sandboxId);
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Direct Usage
|
|
69
|
+
|
|
70
|
+
```typescript
|
|
71
|
+
import { modal } from '@computesdk/modal';
|
|
72
|
+
|
|
73
|
+
// Create provider
|
|
74
|
+
const provider = modal({
|
|
75
|
+
tokenId: 'your_token_id',
|
|
76
|
+
tokenSecret: 'your_token_secret',
|
|
77
|
+
timeout: 600000 // 10 minutes
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
// Use with compute singleton
|
|
81
|
+
const sandbox = await compute.sandbox.create({ provider });
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## Configuration
|
|
85
|
+
|
|
86
|
+
### Environment Variables
|
|
87
|
+
|
|
88
|
+
```bash
|
|
89
|
+
export MODAL_TOKEN_ID=your_token_id_here
|
|
90
|
+
export MODAL_TOKEN_SECRET=your_token_secret_here
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Configuration Options
|
|
94
|
+
|
|
95
|
+
```typescript
|
|
96
|
+
interface ModalConfig {
|
|
97
|
+
/** Modal API token ID - if not provided, will use MODAL_TOKEN_ID env var */
|
|
98
|
+
tokenId?: string;
|
|
99
|
+
/** Modal API token secret - if not provided, will use MODAL_TOKEN_SECRET env var */
|
|
100
|
+
tokenSecret?: string;
|
|
101
|
+
/** Default runtime environment */
|
|
102
|
+
runtime?: 'python' | 'node';
|
|
103
|
+
/** Execution timeout in milliseconds */
|
|
104
|
+
timeout?: number;
|
|
105
|
+
/** Modal environment (sandbox or main) */
|
|
106
|
+
environment?: string;
|
|
107
|
+
}
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
## Features
|
|
111
|
+
|
|
112
|
+
- ✅ **Code Execution** - Real Python code execution using Modal Sandbox.exec()
|
|
113
|
+
- ✅ **Command Execution** - Real shell command execution in Modal containers
|
|
114
|
+
- ✅ **Filesystem Operations** - Real file system access via Modal open() and exec() APIs
|
|
115
|
+
- ✅ **Serverless Scaling** - Automatic scaling to thousands of containers
|
|
116
|
+
- ✅ **GPU Support** - Easy GPU access with Modal's native GPU support
|
|
117
|
+
- ✅ **Full Modal Integration** - Complete real implementation using Modal JavaScript SDK
|
|
118
|
+
|
|
119
|
+
## Implementation Status
|
|
120
|
+
|
|
121
|
+
This provider uses Modal's **official JavaScript SDK** (v0.3.16) with **complete real API integration**:
|
|
122
|
+
|
|
123
|
+
✅ **Real Modal SDK Integration** - Uses the official `modal` npm package
|
|
124
|
+
✅ **Authentication** - Full Modal API token handling with initializeClient()
|
|
125
|
+
✅ **Sandbox Management** - Real create, connect, and destroy Modal sandboxes
|
|
126
|
+
✅ **Code Execution** - Real Python execution using Modal Sandbox.exec()
|
|
127
|
+
✅ **Filesystem Operations** - Real file operations using Modal open() API with fallbacks
|
|
128
|
+
✅ **Command Execution** - Real shell command execution in Modal containers
|
|
129
|
+
✅ **Status Monitoring** - Real sandbox status using Modal poll() API
|
|
130
|
+
|
|
131
|
+
### Current Status
|
|
132
|
+
- **Package**: Uses `modal@0.3.16` from npm
|
|
133
|
+
- **Authentication**: Fully implemented with MODAL_TOKEN_ID/MODAL_TOKEN_SECRET
|
|
134
|
+
- **Core Structure**: Complete ComputeSDK provider interface
|
|
135
|
+
- **Execution**: **Real Modal API calls** for all operations
|
|
136
|
+
- **Filesystem**: Dual approach using Modal file API + command fallbacks
|
|
137
|
+
- **Error Handling**: Comprehensive error handling with Modal-specific errors
|
|
138
|
+
|
|
139
|
+
### Production Ready
|
|
140
|
+
This provider is **production ready** with real Modal API integration:
|
|
141
|
+
1. ✅ Real code execution via Modal Sandbox.exec()
|
|
142
|
+
2. ✅ Real filesystem operations via Modal open() + command fallbacks
|
|
143
|
+
3. ✅ Real command execution in Modal containers
|
|
144
|
+
4. ✅ Real sandbox lifecycle management
|
|
145
|
+
5. ✅ Comprehensive error handling and stream management
|
|
146
|
+
|
|
147
|
+
## Planned API Reference
|
|
148
|
+
|
|
149
|
+
### Code Execution
|
|
150
|
+
|
|
151
|
+
```typescript
|
|
152
|
+
// Execute Python code with real Modal Sandbox.exec()
|
|
153
|
+
const result = await sandbox.runCode(`
|
|
154
|
+
import torch
|
|
155
|
+
import numpy as np
|
|
156
|
+
|
|
157
|
+
# Check GPU availability
|
|
158
|
+
print(f"CUDA available: {torch.cuda.is_available()}")
|
|
159
|
+
if torch.cuda.is_available():
|
|
160
|
+
print(f"GPU: {torch.cuda.get_device_name()}")
|
|
161
|
+
|
|
162
|
+
# Create tensor operations
|
|
163
|
+
x = torch.randn(1000, 1000).cuda() if torch.cuda.is_available() else torch.randn(1000, 1000)
|
|
164
|
+
y = torch.matmul(x, x.T)
|
|
165
|
+
print(f"Result shape: {y.shape}")
|
|
166
|
+
`, 'python');
|
|
167
|
+
|
|
168
|
+
console.log(result.stdout); // Real output from Modal sandbox
|
|
169
|
+
console.log(result.stderr); // Real errors if any
|
|
170
|
+
console.log(result.exitCode); // Real exit code
|
|
171
|
+
|
|
172
|
+
// Auto-detection (defaults to Python for Modal)
|
|
173
|
+
const result = await sandbox.runCode('print("Hello from real Modal sandbox!")');
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
### Command Execution
|
|
177
|
+
|
|
178
|
+
```typescript
|
|
179
|
+
// List files using real Modal exec()
|
|
180
|
+
const result = await sandbox.runCommand('ls', ['-la']);
|
|
181
|
+
console.log(result.stdout); // Real directory listing
|
|
182
|
+
|
|
183
|
+
// Install packages in real Modal container
|
|
184
|
+
const result = await sandbox.runCommand('pip', ['install', 'transformers', 'torch']);
|
|
185
|
+
console.log(result.stdout); // Real pip installation output
|
|
186
|
+
|
|
187
|
+
// Run ML training script in Modal
|
|
188
|
+
const result = await sandbox.runCommand('python', ['train.py', '--epochs', '10']);
|
|
189
|
+
console.log(result.stdout); // Real training output
|
|
190
|
+
|
|
191
|
+
// System commands with real GPU info
|
|
192
|
+
const result = await sandbox.runCommand('nvidia-smi');
|
|
193
|
+
console.log(result.stdout); // Real GPU information from Modal
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Filesystem Operations
|
|
197
|
+
|
|
198
|
+
```typescript
|
|
199
|
+
// Write files using real Modal file API
|
|
200
|
+
await sandbox.filesystem.writeFile('/app/train.py', `
|
|
201
|
+
import torch
|
|
202
|
+
import torch.nn as nn
|
|
203
|
+
|
|
204
|
+
class SimpleModel(nn.Module):
|
|
205
|
+
def __init__(self):
|
|
206
|
+
super().__init__()
|
|
207
|
+
self.linear = nn.Linear(10, 1)
|
|
208
|
+
|
|
209
|
+
def forward(self, x):
|
|
210
|
+
return self.linear(x)
|
|
211
|
+
|
|
212
|
+
model = SimpleModel()
|
|
213
|
+
print("Model created successfully!")
|
|
214
|
+
`);
|
|
215
|
+
|
|
216
|
+
// Read real file content from Modal sandbox
|
|
217
|
+
const content = await sandbox.filesystem.readFile('/app/train.py');
|
|
218
|
+
console.log(content); // Actual file content from Modal
|
|
219
|
+
|
|
220
|
+
// Create directories in real Modal filesystem
|
|
221
|
+
await sandbox.filesystem.mkdir('/app/data');
|
|
222
|
+
await sandbox.filesystem.mkdir('/app/models');
|
|
223
|
+
|
|
224
|
+
// List real directory contents from Modal
|
|
225
|
+
const files = await sandbox.filesystem.readdir('/app');
|
|
226
|
+
console.log(files); // Real file listing with metadata
|
|
227
|
+
|
|
228
|
+
// Check real file existence in Modal
|
|
229
|
+
const exists = await sandbox.filesystem.exists('/app/train.py');
|
|
230
|
+
console.log('File exists:', exists); // true if file actually exists
|
|
231
|
+
|
|
232
|
+
// Remove files from real Modal filesystem
|
|
233
|
+
await sandbox.filesystem.remove('/app/temp_file.txt');
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
### Sandbox Management
|
|
237
|
+
|
|
238
|
+
```typescript
|
|
239
|
+
// Get sandbox info
|
|
240
|
+
const info = await sandbox.getInfo();
|
|
241
|
+
console.log(info.id, info.provider, info.status);
|
|
242
|
+
|
|
243
|
+
// List all sandboxes (Modal Apps)
|
|
244
|
+
const sandboxes = await compute.sandbox.list(provider);
|
|
245
|
+
|
|
246
|
+
// Get existing sandbox by ID
|
|
247
|
+
const existing = await compute.sandbox.getById(provider, 'app-id');
|
|
248
|
+
|
|
249
|
+
// Destroy sandbox
|
|
250
|
+
await compute.sandbox.destroy(provider, 'app-id');
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
## Modal-Specific Features
|
|
254
|
+
|
|
255
|
+
### GPU Acceleration
|
|
256
|
+
|
|
257
|
+
```typescript
|
|
258
|
+
// Modal automatically handles GPU allocation
|
|
259
|
+
const result = await sandbox.runCode(`
|
|
260
|
+
import torch
|
|
261
|
+
print(f"CUDA available: {torch.cuda.is_available()}")
|
|
262
|
+
|
|
263
|
+
# Use GPU if available
|
|
264
|
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
265
|
+
model = model.to(device)
|
|
266
|
+
`);
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
### Serverless Scaling
|
|
270
|
+
|
|
271
|
+
```typescript
|
|
272
|
+
// Modal automatically scales based on demand
|
|
273
|
+
// No configuration needed - just execute code
|
|
274
|
+
const results = await Promise.all([
|
|
275
|
+
sandbox.runCode(task1),
|
|
276
|
+
sandbox.runCode(task2),
|
|
277
|
+
sandbox.runCode(task3)
|
|
278
|
+
]);
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
### Container Images
|
|
282
|
+
|
|
283
|
+
```typescript
|
|
284
|
+
// Modal supports custom container images
|
|
285
|
+
const provider = modal({
|
|
286
|
+
tokenId: process.env.MODAL_TOKEN_ID,
|
|
287
|
+
tokenSecret: process.env.MODAL_TOKEN_SECRET,
|
|
288
|
+
// Custom image configuration would be specified here
|
|
289
|
+
});
|
|
290
|
+
```
|
|
291
|
+
|
|
292
|
+
## Error Handling
|
|
293
|
+
|
|
294
|
+
```typescript
|
|
295
|
+
try {
|
|
296
|
+
const result = await sandbox.runCode('invalid python code');
|
|
297
|
+
} catch (error) {
|
|
298
|
+
if (error.message.includes('Missing Modal API credentials')) {
|
|
299
|
+
console.error('Set MODAL_TOKEN_ID and MODAL_TOKEN_SECRET environment variables');
|
|
300
|
+
} else if (error.message.includes('authentication failed')) {
|
|
301
|
+
console.error('Check your Modal API credentials');
|
|
302
|
+
} else if (error.message.includes('quota exceeded')) {
|
|
303
|
+
console.error('Modal usage limits reached');
|
|
304
|
+
} else if (error.message.includes('Syntax error')) {
|
|
305
|
+
console.error('Code has syntax errors');
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
```
|
|
309
|
+
|
|
310
|
+
## Web Framework Integration
|
|
311
|
+
|
|
312
|
+
Use with web frameworks via the request handler:
|
|
313
|
+
|
|
314
|
+
```typescript
|
|
315
|
+
import { handleComputeRequest } from 'computesdk';
|
|
316
|
+
import { modal } from '@computesdk/modal';
|
|
317
|
+
|
|
318
|
+
export async function POST(request: Request) {
|
|
319
|
+
return handleComputeRequest({
|
|
320
|
+
request,
|
|
321
|
+
provider: modal({
|
|
322
|
+
tokenId: process.env.MODAL_TOKEN_ID,
|
|
323
|
+
tokenSecret: process.env.MODAL_TOKEN_SECRET
|
|
324
|
+
})
|
|
325
|
+
});
|
|
326
|
+
}
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
## Examples
|
|
330
|
+
|
|
331
|
+
### Machine Learning Pipeline
|
|
332
|
+
|
|
333
|
+
```typescript
|
|
334
|
+
const sandbox = await compute.sandbox.create({});
|
|
335
|
+
|
|
336
|
+
// Create ML project structure
|
|
337
|
+
await sandbox.filesystem.mkdir('/ml-project');
|
|
338
|
+
await sandbox.filesystem.mkdir('/ml-project/data');
|
|
339
|
+
await sandbox.filesystem.mkdir('/ml-project/models');
|
|
340
|
+
|
|
341
|
+
// Write training script
|
|
342
|
+
const trainScript = `
|
|
343
|
+
import torch
|
|
344
|
+
import torch.nn as nn
|
|
345
|
+
import numpy as np
|
|
346
|
+
from torch.utils.data import DataLoader, TensorDataset
|
|
347
|
+
|
|
348
|
+
# Generate sample data
|
|
349
|
+
X = torch.randn(1000, 10)
|
|
350
|
+
y = torch.randn(1000, 1)
|
|
351
|
+
|
|
352
|
+
# Create dataset
|
|
353
|
+
dataset = TensorDataset(X, y)
|
|
354
|
+
dataloader = DataLoader(dataset, batch_size=32)
|
|
355
|
+
|
|
356
|
+
# Define model
|
|
357
|
+
class SimpleModel(nn.Module):
|
|
358
|
+
def __init__(self):
|
|
359
|
+
super().__init__()
|
|
360
|
+
self.linear = nn.Linear(10, 1)
|
|
361
|
+
|
|
362
|
+
def forward(self, x):
|
|
363
|
+
return self.linear(x)
|
|
364
|
+
|
|
365
|
+
# Train model
|
|
366
|
+
model = SimpleModel()
|
|
367
|
+
criterion = nn.MSELoss()
|
|
368
|
+
optimizer = torch.optim.Adam(model.parameters())
|
|
369
|
+
|
|
370
|
+
for epoch in range(10):
|
|
371
|
+
for batch_x, batch_y in dataloader:
|
|
372
|
+
optimizer.zero_grad()
|
|
373
|
+
outputs = model(batch_x)
|
|
374
|
+
loss = criterion(outputs, batch_y)
|
|
375
|
+
loss.backward()
|
|
376
|
+
optimizer.step()
|
|
377
|
+
|
|
378
|
+
print(f"Epoch {epoch+1}, Loss: {loss.item():.4f}")
|
|
379
|
+
|
|
380
|
+
# Save model
|
|
381
|
+
torch.save(model.state_dict(), '/ml-project/models/model.pt')
|
|
382
|
+
print("Model saved!")
|
|
383
|
+
`;
|
|
384
|
+
|
|
385
|
+
await sandbox.filesystem.writeFile('/ml-project/train.py', trainScript);
|
|
386
|
+
|
|
387
|
+
// Run training
|
|
388
|
+
const result = await sandbox.runCode(`
|
|
389
|
+
import subprocess
|
|
390
|
+
result = subprocess.run(['python', '/ml-project/train.py'],
|
|
391
|
+
capture_output=True, text=True)
|
|
392
|
+
print(result.stdout)
|
|
393
|
+
if result.stderr:
|
|
394
|
+
print("Errors:", result.stderr)
|
|
395
|
+
`);
|
|
396
|
+
|
|
397
|
+
console.log(result.stdout);
|
|
398
|
+
|
|
399
|
+
// Verify model was saved
|
|
400
|
+
const modelExists = await sandbox.filesystem.exists('/ml-project/models/model.pt');
|
|
401
|
+
console.log('Model saved:', modelExists);
|
|
402
|
+
```
|
|
403
|
+
|
|
404
|
+
### GPU-Accelerated Inference
|
|
405
|
+
|
|
406
|
+
```typescript
|
|
407
|
+
const sandbox = await compute.sandbox.create({});
|
|
408
|
+
|
|
409
|
+
// GPU inference example
|
|
410
|
+
const result = await sandbox.runCode(`
|
|
411
|
+
import torch
|
|
412
|
+
import torch.nn as nn
|
|
413
|
+
import time
|
|
414
|
+
|
|
415
|
+
# Check GPU availability
|
|
416
|
+
print(f"CUDA available: {torch.cuda.is_available()}")
|
|
417
|
+
if torch.cuda.is_available():
|
|
418
|
+
print(f"GPU: {torch.cuda.get_device_name()}")
|
|
419
|
+
device = torch.device('cuda')
|
|
420
|
+
else:
|
|
421
|
+
device = torch.device('cpu')
|
|
422
|
+
|
|
423
|
+
# Create large model for inference
|
|
424
|
+
class LargeModel(nn.Module):
|
|
425
|
+
def __init__(self):
|
|
426
|
+
super().__init__()
|
|
427
|
+
self.layers = nn.Sequential(
|
|
428
|
+
nn.Linear(1000, 2000),
|
|
429
|
+
nn.ReLU(),
|
|
430
|
+
nn.Linear(2000, 1000),
|
|
431
|
+
nn.ReLU(),
|
|
432
|
+
nn.Linear(1000, 100)
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
def forward(self, x):
|
|
436
|
+
return self.layers(x)
|
|
437
|
+
|
|
438
|
+
# Initialize model and move to GPU
|
|
439
|
+
model = LargeModel().to(device)
|
|
440
|
+
model.eval()
|
|
441
|
+
|
|
442
|
+
# Create test data
|
|
443
|
+
batch_size = 64
|
|
444
|
+
input_data = torch.randn(batch_size, 1000).to(device)
|
|
445
|
+
|
|
446
|
+
# Run inference
|
|
447
|
+
start_time = time.time()
|
|
448
|
+
with torch.no_grad():
|
|
449
|
+
outputs = model(input_data)
|
|
450
|
+
|
|
451
|
+
inference_time = time.time() - start_time
|
|
452
|
+
print(f"Inference completed in {inference_time:.4f} seconds")
|
|
453
|
+
print(f"Output shape: {outputs.shape}")
|
|
454
|
+
print(f"Device: {outputs.device}")
|
|
455
|
+
`);
|
|
456
|
+
|
|
457
|
+
console.log(result.stdout);
|
|
458
|
+
```
|
|
459
|
+
|
|
460
|
+
### Distributed Processing
|
|
461
|
+
|
|
462
|
+
```typescript
|
|
463
|
+
// Process multiple tasks in parallel
|
|
464
|
+
const tasks = [
|
|
465
|
+
'task1_data.json',
|
|
466
|
+
'task2_data.json',
|
|
467
|
+
'task3_data.json'
|
|
468
|
+
];
|
|
469
|
+
|
|
470
|
+
const results = await Promise.all(
|
|
471
|
+
tasks.map(async (taskFile) => {
|
|
472
|
+
const sandbox = await compute.sandbox.create({});
|
|
473
|
+
|
|
474
|
+
return await sandbox.runCode(`
|
|
475
|
+
import json
|
|
476
|
+
import numpy as np
|
|
477
|
+
|
|
478
|
+
# Load task data
|
|
479
|
+
with open('/data/${taskFile}', 'r') as f:
|
|
480
|
+
data = json.load(f)
|
|
481
|
+
|
|
482
|
+
# Process data (example: statistical analysis)
|
|
483
|
+
values = np.array(data['values'])
|
|
484
|
+
results = {
|
|
485
|
+
'task': '${taskFile}',
|
|
486
|
+
'mean': float(values.mean()),
|
|
487
|
+
'std': float(values.std()),
|
|
488
|
+
'min': float(values.min()),
|
|
489
|
+
'max': float(values.max()),
|
|
490
|
+
'count': len(values)
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
print(json.dumps(results))
|
|
494
|
+
`);
|
|
495
|
+
})
|
|
496
|
+
);
|
|
497
|
+
|
|
498
|
+
results.forEach(result => {
|
|
499
|
+
const taskResult = JSON.parse(result.stdout);
|
|
500
|
+
console.log(`Task ${taskResult.task}: mean=${taskResult.mean:.2f}`);
|
|
501
|
+
});
|
|
502
|
+
```
|
|
503
|
+
|
|
504
|
+
## Best Practices
|
|
505
|
+
|
|
506
|
+
1. **Resource Management**: Modal automatically manages resources, but destroy sandboxes when done
|
|
507
|
+
2. **Error Handling**: Use try-catch blocks for robust error handling
|
|
508
|
+
3. **GPU Utilization**: Modal provides easy GPU access - leverage it for ML workloads
|
|
509
|
+
4. **Parallel Processing**: Use Modal's natural scaling for parallel tasks
|
|
510
|
+
5. **Container Images**: Use Modal's pre-built ML images for faster startup
|
|
511
|
+
6. **API Security**: Never commit API credentials to version control
|
|
512
|
+
|
|
513
|
+
## Limitations
|
|
514
|
+
|
|
515
|
+
- **JavaScript SDK**: No official Modal JavaScript SDK (Python-only)
|
|
516
|
+
- **Implementation Required**: This is a stub requiring actual Modal integration
|
|
517
|
+
- **Runtime Support**: Primarily Python-focused (Modal's strength)
|
|
518
|
+
- **Network Access**: Subject to Modal's networking policies
|
|
519
|
+
- **Billing**: Pay-per-use Modal pricing applies
|
|
520
|
+
|
|
521
|
+
## Modal vs Other Providers
|
|
522
|
+
|
|
523
|
+
| Feature | Modal | E2B | Vercel | Daytona |
|
|
524
|
+
|---------|--------|-----|--------|----------|
|
|
525
|
+
| **Primary Runtime** | Python | Python/Node | Node/Python | Any |
|
|
526
|
+
| **GPU Support** | ✅ Easy | ❌ | ❌ | ❌ |
|
|
527
|
+
| **Auto Scaling** | ✅ Thousands | ❌ | ✅ | ❌ |
|
|
528
|
+
| **ML/AI Focus** | ✅ Optimized | ✅ | ❌ | ❌ |
|
|
529
|
+
| **Pricing Model** | Pay-per-use | Per sandbox | Per execution | Per workspace |
|
|
530
|
+
| **Filesystem** | ✅ | ✅ | Limited | ✅ |
|
|
531
|
+
|
|
532
|
+
## Implementation Requirements
|
|
533
|
+
|
|
534
|
+
To complete this provider, one of the following approaches is needed:
|
|
535
|
+
|
|
536
|
+
### Python Bridge Service
|
|
537
|
+
|
|
538
|
+
```python
|
|
539
|
+
# example_bridge.py
|
|
540
|
+
from modal import App, Image, web_endpoint
|
|
541
|
+
import json
|
|
542
|
+
|
|
543
|
+
app = App("computesdk-bridge")
|
|
544
|
+
|
|
545
|
+
@app.function(
|
|
546
|
+
image=Image.debian_slim().pip_install(["flask", "requests"]),
|
|
547
|
+
port=8080
|
|
548
|
+
)
|
|
549
|
+
@web_endpoint(method="POST")
|
|
550
|
+
def execute_code(request_data: dict):
|
|
551
|
+
"""Bridge endpoint for executing code via Modal"""
|
|
552
|
+
code = request_data.get('code')
|
|
553
|
+
runtime = request_data.get('runtime', 'python')
|
|
554
|
+
|
|
555
|
+
# Execute code in Modal sandbox
|
|
556
|
+
# Return results in ComputeSDK format
|
|
557
|
+
pass
|
|
558
|
+
```
|
|
559
|
+
|
|
560
|
+
### REST API Client
|
|
561
|
+
|
|
562
|
+
```typescript
|
|
563
|
+
// Hypothetical REST API implementation
|
|
564
|
+
class ModalRestClient {
|
|
565
|
+
async createSandbox(config: ModalConfig) {
|
|
566
|
+
// POST to Modal API
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
async executeCode(sandboxId: string, code: string) {
|
|
570
|
+
// POST to Modal API
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
async destroySandbox(sandboxId: string) {
|
|
574
|
+
// DELETE from Modal API
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
```
|
|
578
|
+
|
|
579
|
+
## Support
|
|
580
|
+
|
|
581
|
+
- [Modal Documentation](https://modal.com/docs)
|
|
582
|
+
- [ComputeSDK Issues](https://github.com/computesdk/computesdk/issues)
|
|
583
|
+
- [Modal Community](https://modal.com/slack)
|
|
584
|
+
|
|
585
|
+
## License
|
|
586
|
+
|
|
587
|
+
MIT
|
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import * as computesdk from 'computesdk';
|
|
2
|
+
import { Runtime } from 'computesdk';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Modal-specific configuration options
|
|
6
|
+
*/
|
|
7
|
+
interface ModalConfig {
|
|
8
|
+
/** Modal API token ID - if not provided, will fallback to MODAL_TOKEN_ID environment variable */
|
|
9
|
+
tokenId?: string;
|
|
10
|
+
/** Modal API token secret - if not provided, will fallback to MODAL_TOKEN_SECRET environment variable */
|
|
11
|
+
tokenSecret?: string;
|
|
12
|
+
/** Default runtime environment */
|
|
13
|
+
runtime?: Runtime;
|
|
14
|
+
/** Execution timeout in milliseconds */
|
|
15
|
+
timeout?: number;
|
|
16
|
+
/** Modal environment (sandbox or main) */
|
|
17
|
+
environment?: string;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Create a Modal provider instance using the factory pattern
|
|
21
|
+
*/
|
|
22
|
+
declare const modal: (config: ModalConfig) => computesdk.Provider;
|
|
23
|
+
|
|
24
|
+
export { type ModalConfig, modal };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import * as computesdk from 'computesdk';
|
|
2
|
+
import { Runtime } from 'computesdk';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Modal-specific configuration options
|
|
6
|
+
*/
|
|
7
|
+
interface ModalConfig {
|
|
8
|
+
/** Modal API token ID - if not provided, will fallback to MODAL_TOKEN_ID environment variable */
|
|
9
|
+
tokenId?: string;
|
|
10
|
+
/** Modal API token secret - if not provided, will fallback to MODAL_TOKEN_SECRET environment variable */
|
|
11
|
+
tokenSecret?: string;
|
|
12
|
+
/** Default runtime environment */
|
|
13
|
+
runtime?: Runtime;
|
|
14
|
+
/** Execution timeout in milliseconds */
|
|
15
|
+
timeout?: number;
|
|
16
|
+
/** Modal environment (sandbox or main) */
|
|
17
|
+
environment?: string;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Create a Modal provider instance using the factory pattern
|
|
21
|
+
*/
|
|
22
|
+
declare const modal: (config: ModalConfig) => computesdk.Provider;
|
|
23
|
+
|
|
24
|
+
export { type ModalConfig, modal };
|