tetra-rp 0.11.0__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. tetra_rp/__init__.py +2 -0
  2. tetra_rp/cli/__init__.py +0 -0
  3. tetra_rp/cli/commands/__init__.py +1 -0
  4. tetra_rp/cli/commands/deploy.py +336 -0
  5. tetra_rp/cli/commands/init.py +86 -0
  6. tetra_rp/cli/commands/resource.py +191 -0
  7. tetra_rp/cli/commands/run.py +122 -0
  8. tetra_rp/cli/main.py +81 -0
  9. tetra_rp/cli/templates/advanced/main.py +58 -0
  10. tetra_rp/cli/templates/advanced/utils.py +24 -0
  11. tetra_rp/cli/templates/basic/main.py +32 -0
  12. tetra_rp/cli/templates/gpu-compute/main.py +64 -0
  13. tetra_rp/cli/templates/web-api/api.py +67 -0
  14. tetra_rp/cli/templates/web-api/main.py +42 -0
  15. tetra_rp/cli/utils/__init__.py +1 -0
  16. tetra_rp/cli/utils/deployment.py +172 -0
  17. tetra_rp/cli/utils/skeleton.py +101 -0
  18. tetra_rp/client.py +0 -6
  19. tetra_rp/config.py +29 -0
  20. tetra_rp/core/resources/__init__.py +3 -2
  21. tetra_rp/core/resources/cpu.py +115 -12
  22. tetra_rp/core/resources/gpu.py +29 -14
  23. tetra_rp/core/resources/live_serverless.py +40 -14
  24. tetra_rp/core/resources/resource_manager.py +63 -22
  25. tetra_rp/core/resources/serverless.py +27 -46
  26. tetra_rp/core/resources/serverless_cpu.py +154 -0
  27. tetra_rp/core/utils/file_lock.py +260 -0
  28. tetra_rp/core/utils/singleton.py +15 -1
  29. tetra_rp/execute_class.py +0 -3
  30. tetra_rp/protos/remote_execution.py +0 -4
  31. tetra_rp/stubs/live_serverless.py +11 -9
  32. tetra_rp/stubs/registry.py +25 -14
  33. {tetra_rp-0.11.0.dist-info → tetra_rp-0.13.0.dist-info}/METADATA +5 -1
  34. tetra_rp-0.13.0.dist-info/RECORD +56 -0
  35. tetra_rp-0.13.0.dist-info/entry_points.txt +2 -0
  36. tetra_rp-0.11.0.dist-info/RECORD +0 -36
  37. {tetra_rp-0.11.0.dist-info → tetra_rp-0.13.0.dist-info}/WHEEL +0 -0
  38. {tetra_rp-0.11.0.dist-info → tetra_rp-0.13.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,122 @@
1
+ """Execute main entry point command."""
2
+
3
+ import asyncio
4
+ import sys
5
+ from pathlib import Path
6
+ from typing import Optional
7
+ import typer
8
+ from rich.console import Console
9
+ from rich.progress import Progress, SpinnerColumn, TextColumn
10
+ from rich.panel import Panel
11
+
12
+ from tetra_rp.config import get_paths
13
+
14
+ console = Console()
15
+
16
+
17
+ def run_command(
18
+ entry_point: Optional[str] = typer.Option(
19
+ None, "--entry", "-e", help="Entry point file to execute"
20
+ ),
21
+ no_deploy: bool = typer.Option(
22
+ False, "--no-deploy", help="Skip resource deployment"
23
+ ),
24
+ ):
25
+ """Execute the main entry point of the app."""
26
+
27
+ # Discover entry point if not provided
28
+ if not entry_point:
29
+ entry_point = discover_entry_point()
30
+ if not entry_point:
31
+ console.print("No entry point found")
32
+ console.print("Specify entry point with --entry or create main.py")
33
+ raise typer.Exit(1)
34
+
35
+ # Validate entry point exists
36
+ entry_path = Path(entry_point)
37
+ if not entry_path.exists():
38
+ console.print(f"Entry point not found: {entry_point}")
39
+ raise typer.Exit(1)
40
+
41
+ console.print(f"🚀 Executing entry point: [bold]{entry_point}[/bold]")
42
+
43
+ # Run the entry point
44
+ try:
45
+ asyncio.run(execute_entry_point(entry_path, no_deploy))
46
+ except KeyboardInterrupt:
47
+ console.print("\nExecution interrupted by user")
48
+ raise typer.Exit(1)
49
+ except Exception as e:
50
+ console.print(f"Execution failed: {e}")
51
+ raise typer.Exit(1)
52
+
53
+
54
+ def discover_entry_point() -> Optional[str]:
55
+ """Discover the main entry point file."""
56
+ # Check common entry point names
57
+ candidates = ["main.py", "app.py", "run.py", "__main__.py"]
58
+
59
+ for candidate in candidates:
60
+ if Path(candidate).exists():
61
+ return candidate
62
+
63
+ # Check for .tetra/config.json entry point
64
+ paths = get_paths()
65
+ config_path = paths.config_file
66
+ if config_path.exists():
67
+ try:
68
+ import json
69
+
70
+ with open(config_path) as f:
71
+ config = json.load(f)
72
+ return config.get("entry_point")
73
+ except (json.JSONDecodeError, KeyError):
74
+ pass
75
+
76
+ return None
77
+
78
+
79
+ async def execute_entry_point(entry_path: Path, no_deploy: bool = False):
80
+ """Execute the entry point with progress tracking."""
81
+
82
+ with Progress(
83
+ SpinnerColumn(),
84
+ TextColumn("[progress.description]{task.description}"),
85
+ console=console,
86
+ ) as progress:
87
+ if not no_deploy:
88
+ # Deployment phase
89
+ deploy_task = progress.add_task("Preparing resources...", total=None)
90
+ await asyncio.sleep(1) # Mock deployment time
91
+ progress.update(deploy_task, description="Resources ready")
92
+ progress.stop_task(deploy_task)
93
+
94
+ # Execution phase
95
+ exec_task = progress.add_task("Executing...", total=None)
96
+
97
+ # Execute the Python file
98
+ try:
99
+ # Import and run the module
100
+ spec = __import__("importlib.util").util.spec_from_file_location(
101
+ entry_path.stem, entry_path
102
+ )
103
+ module = __import__("importlib.util").util.module_from_spec(spec)
104
+
105
+ # Add the directory to sys.path so imports work
106
+ sys.path.insert(0, str(entry_path.parent))
107
+
108
+ spec.loader.exec_module(module)
109
+
110
+ progress.update(exec_task, description="Complete!")
111
+ await asyncio.sleep(0.5) # Brief pause to show completion
112
+
113
+ except Exception as e:
114
+ progress.update(exec_task, description=f"Failed: {e}")
115
+ raise
116
+ finally:
117
+ progress.stop_task(exec_task)
118
+
119
+ # Success message
120
+ console.print(
121
+ Panel("Execution completed successfully", title="Success", expand=False)
122
+ )
tetra_rp/cli/main.py ADDED
@@ -0,0 +1,81 @@
1
+ """Main CLI entry point for Flash CLI."""
2
+
3
+ import typer
4
+ from importlib import metadata
5
+ from rich.console import Console
6
+ from rich.panel import Panel
7
+
8
+ from .commands import (
9
+ init,
10
+ run,
11
+ resource,
12
+ deploy,
13
+ )
14
+
15
+
16
+ def get_version() -> str:
17
+ """Get the package version from metadata."""
18
+ try:
19
+ return metadata.version("tetra_rp")
20
+ except metadata.PackageNotFoundError:
21
+ return "unknown"
22
+
23
+
24
+ console = Console()
25
+
26
+ # command: flash
27
+ app = typer.Typer(
28
+ name="flash",
29
+ help="Flash CLI - Distributed inference and serving framework",
30
+ no_args_is_help=True,
31
+ rich_markup_mode="rich",
32
+ )
33
+
34
+ # command: flash <command>
35
+ app.command("init")(init.init_command)
36
+ app.command("run")(run.run_command)
37
+ app.command("report")(resource.report_command)
38
+ app.command("clean")(resource.clean_command)
39
+
40
+ # command: flash deploy
41
+ deploy_app = typer.Typer(
42
+ name="deploy",
43
+ help="Deployment environment management commands",
44
+ no_args_is_help=True,
45
+ )
46
+
47
+ # command: flash deploy *
48
+ deploy_app.command("list")(deploy.list_command)
49
+ deploy_app.command("new")(deploy.new_command)
50
+ deploy_app.command("send")(deploy.send_command)
51
+ deploy_app.command("report")(deploy.report_command)
52
+ deploy_app.command("rollback")(deploy.rollback_command)
53
+ deploy_app.command("remove")(deploy.remove_command)
54
+
55
+ app.add_typer(deploy_app, name="deploy")
56
+
57
+
58
+ @app.callback(invoke_without_command=True)
59
+ def main(
60
+ ctx: typer.Context,
61
+ version: bool = typer.Option(False, "--version", "-v", help="Show version"),
62
+ ):
63
+ """Flash CLI - Distributed inference and serving framework."""
64
+ if version:
65
+ console.print(f"Flash CLI v{get_version()}")
66
+ raise typer.Exit()
67
+
68
+ if ctx.invoked_subcommand is None:
69
+ console.print(
70
+ Panel(
71
+ "[bold blue]Flash CLI[/bold blue]\n\n"
72
+ "A framework for distributed inference and serving of ML models.\n\n"
73
+ "Use [bold]flash --help[/bold] to see available commands.",
74
+ title="Welcome",
75
+ expand=False,
76
+ )
77
+ )
78
+
79
+
80
+ if __name__ == "__main__":
81
+ app()
@@ -0,0 +1,58 @@
1
+ import asyncio
2
+ from dotenv import load_dotenv
3
+ from tetra_rp import remote, LiveServerless
4
+ from utils import generate_report
5
+
6
+ # Load environment variables from .env file
7
+ load_dotenv()
8
+
9
+ # Configuration for compute workload
10
+ compute_config = LiveServerless(
11
+ name="advanced_compute",
12
+ workersMax=2,
13
+ cpu=2,
14
+ memory=4096,
15
+ )
16
+
17
+
18
+ @remote(compute_config)
19
+ def analyze_data(data):
20
+ """Process and analyze data remotely."""
21
+ import pandas as pd
22
+
23
+ # Convert to DataFrame
24
+ df = pd.DataFrame(data)
25
+
26
+ # Perform analysis
27
+ result = {
28
+ "mean": df.mean().to_dict(),
29
+ "std": df.std().to_dict(),
30
+ "count": len(df),
31
+ "summary": df.describe().to_dict(),
32
+ }
33
+
34
+ return result
35
+
36
+
37
+ async def main():
38
+ print("🚀 Running advanced Tetra example...")
39
+
40
+ # Sample data
41
+ sample_data = {
42
+ "values": [1, 2, 3, 4, 5, 10, 15, 20],
43
+ "categories": ["A", "B", "A", "C", "B", "A", "C", "B"],
44
+ }
45
+
46
+ # Process remotely
47
+ result = await analyze_data(sample_data)
48
+
49
+ # Generate report
50
+ report = generate_report(result)
51
+ print(report)
52
+
53
+
54
+ if __name__ == "__main__":
55
+ try:
56
+ asyncio.run(main())
57
+ except Exception as e:
58
+ print(f"An error occurred: {e}")
@@ -0,0 +1,24 @@
1
+ """Utility functions for advanced example."""
2
+
3
+
4
+ def process_data(data):
5
+ """Process raw data before analysis."""
6
+ # Add any preprocessing logic here
7
+ return data
8
+
9
+
10
+ def generate_report(analysis_result):
11
+ """Generate a formatted report from analysis results."""
12
+ report = "\n=== Analysis Report ===\n"
13
+
14
+ if "mean" in analysis_result:
15
+ report += "\nMean values:\n"
16
+ for key, value in analysis_result["mean"].items():
17
+ report += f" {key}: {value:.2f}\n"
18
+
19
+ if "count" in analysis_result:
20
+ report += f"\nTotal records: {analysis_result['count']}\n"
21
+
22
+ report += "\n" + "=" * 25
23
+
24
+ return report
@@ -0,0 +1,32 @@
1
+ import asyncio
2
+ from dotenv import load_dotenv
3
+ from tetra_rp import remote, LiveServerless
4
+
5
+ # Load environment variables from .env file
6
+ load_dotenv()
7
+
8
+ # Configuration for a simple resource
9
+ config = LiveServerless(
10
+ name="basic_example",
11
+ workersMax=1,
12
+ )
13
+
14
+
15
+ @remote(config)
16
+ def hello_world():
17
+ """Simple remote function example."""
18
+ print("Hello from the remote function!")
19
+ return "Hello, World!"
20
+
21
+
22
+ async def main():
23
+ print("🚀 Running basic Tetra example...")
24
+ result = await hello_world()
25
+ print(f"Result: {result}")
26
+
27
+
28
+ if __name__ == "__main__":
29
+ try:
30
+ asyncio.run(main())
31
+ except Exception as e:
32
+ print(f"An error occurred: {e}")
@@ -0,0 +1,64 @@
1
+ import asyncio
2
+ from dotenv import load_dotenv
3
+ from tetra_rp import remote, LiveServerless
4
+
5
+ # Load environment variables from .env file
6
+ load_dotenv()
7
+
8
+ # Configuration for GPU workload
9
+ gpu_config = LiveServerless(
10
+ name="gpu_compute",
11
+ workersMax=1,
12
+ gpu=1,
13
+ gpuType="A40",
14
+ cpu=4,
15
+ memory=8192,
16
+ )
17
+
18
+
19
+ @remote(gpu_config)
20
+ def gpu_computation():
21
+ """GPU-accelerated computation example."""
22
+ try:
23
+ import torch
24
+
25
+ # Check GPU availability
26
+ if torch.cuda.is_available():
27
+ device = torch.cuda.get_device_name(0)
28
+ print(f"Using GPU: {device}")
29
+
30
+ # Simple GPU computation
31
+ x = torch.randn(1000, 1000).cuda()
32
+ y = torch.randn(1000, 1000).cuda()
33
+ result = torch.mm(x, y)
34
+
35
+ return {
36
+ "device": device,
37
+ "matrix_shape": result.shape,
38
+ "result_mean": result.mean().item(),
39
+ "computation": "Matrix multiplication completed on GPU",
40
+ }
41
+ else:
42
+ return {"error": "GPU not available"}
43
+
44
+ except ImportError:
45
+ return {"error": "PyTorch not available"}
46
+
47
+
48
+ async def main():
49
+ print("🚀 Running GPU compute example...")
50
+ result = await gpu_computation()
51
+
52
+ if "error" in result:
53
+ print(f"{result['error']}")
54
+ else:
55
+ print("GPU computation completed!")
56
+ print(f"Device: {result['device']}")
57
+ print(f"Result: {result['computation']}")
58
+
59
+
60
+ if __name__ == "__main__":
61
+ try:
62
+ asyncio.run(main())
63
+ except Exception as e:
64
+ print(f"An error occurred: {e}")
@@ -0,0 +1,67 @@
1
+ """FastAPI application with example endpoints."""
2
+
3
+ from fastapi import FastAPI, HTTPException
4
+ from pydantic import BaseModel
5
+
6
+
7
+ def create_api_app() -> FastAPI:
8
+ """Create and configure FastAPI application."""
9
+
10
+ app = FastAPI(
11
+ title="Tetra API Service",
12
+ description="Example web API deployed with Tetra",
13
+ version="1.0.0",
14
+ )
15
+
16
+ # Example models
17
+ class ComputeRequest(BaseModel):
18
+ operation: str
19
+ values: list[float]
20
+
21
+ class ComputeResponse(BaseModel):
22
+ result: float
23
+ operation: str
24
+ input_count: int
25
+
26
+ @app.get("/")
27
+ async def root():
28
+ """Root endpoint."""
29
+ return {"message": "Tetra API Service", "status": "running"}
30
+
31
+ @app.get("/health")
32
+ async def health_check():
33
+ """Health check endpoint."""
34
+ return {"status": "healthy", "service": "tetra-rp-api"}
35
+
36
+ @app.post("/compute", response_model=ComputeResponse)
37
+ async def compute(request: ComputeRequest):
38
+ """Perform computation on provided values."""
39
+
40
+ if not request.values:
41
+ raise HTTPException(status_code=400, detail="No values provided")
42
+
43
+ try:
44
+ if request.operation == "sum":
45
+ result = sum(request.values)
46
+ elif request.operation == "mean":
47
+ result = sum(request.values) / len(request.values)
48
+ elif request.operation == "max":
49
+ result = max(request.values)
50
+ elif request.operation == "min":
51
+ result = min(request.values)
52
+ else:
53
+ raise HTTPException(
54
+ status_code=400,
55
+ detail=f"Unsupported operation: {request.operation}",
56
+ )
57
+
58
+ return ComputeResponse(
59
+ result=result,
60
+ operation=request.operation,
61
+ input_count=len(request.values),
62
+ )
63
+
64
+ except Exception as e:
65
+ raise HTTPException(status_code=500, detail=str(e))
66
+
67
+ return app
@@ -0,0 +1,42 @@
1
+ import asyncio
2
+ from dotenv import load_dotenv
3
+ from tetra_rp import remote, LiveServerless
4
+ from api import create_api_app
5
+
6
+ # Load environment variables from .env file
7
+ load_dotenv()
8
+
9
+ # Configuration for web API
10
+ api_config = LiveServerless(
11
+ name="web_api_service",
12
+ workersMax=3,
13
+ cpu=2,
14
+ memory=2048,
15
+ ports=[8000],
16
+ )
17
+
18
+
19
+ @remote(api_config)
20
+ def run_api_server():
21
+ """Run FastAPI web service."""
22
+ import uvicorn
23
+
24
+ app = create_api_app()
25
+
26
+ # Run the server
27
+ uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")
28
+
29
+ return "API server started"
30
+
31
+
32
+ async def main():
33
+ print("🚀 Starting web API service...")
34
+ result = await run_api_server()
35
+ print(f"Result: {result}")
36
+
37
+
38
+ if __name__ == "__main__":
39
+ try:
40
+ asyncio.run(main())
41
+ except Exception as e:
42
+ print(f"An error occurred: {e}")
@@ -0,0 +1 @@
1
+ """CLI utility modules."""
@@ -0,0 +1,172 @@
1
+ """Deployment environment management utilities."""
2
+
3
+ import json
4
+ from typing import Dict, Any
5
+ from datetime import datetime
6
+
7
+ from tetra_rp.config import get_paths
8
+
9
+
10
+ def get_deployment_environments() -> Dict[str, Dict[str, Any]]:
11
+ """Get all deployment environments."""
12
+ paths = get_paths()
13
+ deployments_file = paths.deployments_file
14
+
15
+ if not deployments_file.exists():
16
+ return {}
17
+
18
+ try:
19
+ with open(deployments_file) as f:
20
+ return json.load(f)
21
+ except (json.JSONDecodeError, FileNotFoundError):
22
+ return {}
23
+
24
+
25
+ def save_deployment_environments(environments: Dict[str, Dict[str, Any]]):
26
+ """Save deployment environments to file."""
27
+ paths = get_paths()
28
+ deployments_file = paths.deployments_file
29
+
30
+ # Ensure .tetra directory exists
31
+ paths.ensure_tetra_dir()
32
+
33
+ with open(deployments_file, "w") as f:
34
+ json.dump(environments, f, indent=2)
35
+
36
+
37
+ def create_deployment_environment(name: str, config: Dict[str, Any]):
38
+ """Create a new deployment environment."""
39
+ environments = get_deployment_environments()
40
+
41
+ # Mock environment creation
42
+ environments[name] = {
43
+ "status": "idle",
44
+ "config": config,
45
+ "created_at": datetime.now().isoformat(),
46
+ "current_version": None,
47
+ "last_deployed": None,
48
+ "url": None,
49
+ "version_history": [],
50
+ }
51
+
52
+ save_deployment_environments(environments)
53
+
54
+
55
+ def remove_deployment_environment(name: str):
56
+ """Remove a deployment environment."""
57
+ environments = get_deployment_environments()
58
+
59
+ if name in environments:
60
+ del environments[name]
61
+ save_deployment_environments(environments)
62
+
63
+
64
+ def deploy_to_environment(name: str) -> Dict[str, Any]:
65
+ """Deploy current project to environment (mock implementation)."""
66
+ environments = get_deployment_environments()
67
+
68
+ if name not in environments:
69
+ raise ValueError(f"Environment {name} not found")
70
+
71
+ # Mock deployment
72
+ version = f"v1.{len(environments[name]['version_history'])}.0"
73
+ url = f"https://{name.lower()}.example.com"
74
+
75
+ # Update environment
76
+ environments[name].update(
77
+ {
78
+ "status": "active",
79
+ "current_version": version,
80
+ "last_deployed": datetime.now().isoformat(),
81
+ "url": url,
82
+ "uptime": "99.9%",
83
+ }
84
+ )
85
+
86
+ # Add to version history
87
+ version_entry = {
88
+ "version": version,
89
+ "deployed_at": datetime.now().isoformat(),
90
+ "description": "Deployment via CLI",
91
+ "is_current": True,
92
+ }
93
+
94
+ # Mark previous versions as not current
95
+ for v in environments[name]["version_history"]:
96
+ v["is_current"] = False
97
+
98
+ environments[name]["version_history"].insert(0, version_entry)
99
+
100
+ save_deployment_environments(environments)
101
+
102
+ return {"version": version, "url": url, "status": "active"}
103
+
104
+
105
+ def rollback_deployment(name: str, target_version: str):
106
+ """Rollback deployment to a previous version (mock implementation)."""
107
+ environments = get_deployment_environments()
108
+
109
+ if name not in environments:
110
+ raise ValueError(f"Environment {name} not found")
111
+
112
+ # Find target version
113
+ target_version_info = None
114
+ for version in environments[name]["version_history"]:
115
+ if version["version"] == target_version:
116
+ target_version_info = version
117
+ break
118
+
119
+ if not target_version_info:
120
+ raise ValueError(f"Version {target_version} not found")
121
+
122
+ # Update current version
123
+ environments[name]["current_version"] = target_version
124
+ environments[name]["last_deployed"] = datetime.now().isoformat()
125
+
126
+ # Update version history
127
+ for version in environments[name]["version_history"]:
128
+ version["is_current"] = version["version"] == target_version
129
+
130
+ save_deployment_environments(environments)
131
+
132
+
133
+ def get_environment_info(name: str) -> Dict[str, Any]:
134
+ """Get detailed information about an environment."""
135
+ environments = get_deployment_environments()
136
+
137
+ if name not in environments:
138
+ raise ValueError(f"Environment {name} not found")
139
+
140
+ env_info = environments[name].copy()
141
+
142
+ # Add mock metrics and additional info
143
+ if env_info["status"] == "active":
144
+ env_info.update(
145
+ {
146
+ "uptime": "99.9%",
147
+ "requests_24h": 145234,
148
+ "avg_response_time": "245ms",
149
+ "error_rate": "0.02%",
150
+ "cpu_usage": "45%",
151
+ "memory_usage": "62%",
152
+ }
153
+ )
154
+
155
+ # Ensure version history exists and is properly formatted
156
+ if "version_history" not in env_info:
157
+ env_info["version_history"] = []
158
+
159
+ # Add sample version history if empty
160
+ if not env_info["version_history"] and env_info["current_version"]:
161
+ env_info["version_history"] = [
162
+ {
163
+ "version": env_info["current_version"],
164
+ "deployed_at": env_info.get(
165
+ "last_deployed", datetime.now().isoformat()
166
+ ),
167
+ "description": "Initial deployment",
168
+ "is_current": True,
169
+ }
170
+ ]
171
+
172
+ return env_info