tetra-rp 0.13.0__py3-none-any.whl → 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tetra-rp might be problematic. Click here for more details.

@@ -1,58 +0,0 @@
1
- import asyncio
2
- from dotenv import load_dotenv
3
- from tetra_rp import remote, LiveServerless
4
- from utils import generate_report
5
-
6
- # Load environment variables from .env file
7
- load_dotenv()
8
-
9
- # Configuration for compute workload
10
- compute_config = LiveServerless(
11
- name="advanced_compute",
12
- workersMax=2,
13
- cpu=2,
14
- memory=4096,
15
- )
16
-
17
-
18
- @remote(compute_config)
19
- def analyze_data(data):
20
- """Process and analyze data remotely."""
21
- import pandas as pd
22
-
23
- # Convert to DataFrame
24
- df = pd.DataFrame(data)
25
-
26
- # Perform analysis
27
- result = {
28
- "mean": df.mean().to_dict(),
29
- "std": df.std().to_dict(),
30
- "count": len(df),
31
- "summary": df.describe().to_dict(),
32
- }
33
-
34
- return result
35
-
36
-
37
- async def main():
38
- print("🚀 Running advanced Tetra example...")
39
-
40
- # Sample data
41
- sample_data = {
42
- "values": [1, 2, 3, 4, 5, 10, 15, 20],
43
- "categories": ["A", "B", "A", "C", "B", "A", "C", "B"],
44
- }
45
-
46
- # Process remotely
47
- result = await analyze_data(sample_data)
48
-
49
- # Generate report
50
- report = generate_report(result)
51
- print(report)
52
-
53
-
54
- if __name__ == "__main__":
55
- try:
56
- asyncio.run(main())
57
- except Exception as e:
58
- print(f"An error occurred: {e}")
@@ -1,24 +0,0 @@
1
- """Utility functions for advanced example."""
2
-
3
-
4
- def process_data(data):
5
- """Process raw data before analysis."""
6
- # Add any preprocessing logic here
7
- return data
8
-
9
-
10
- def generate_report(analysis_result):
11
- """Generate a formatted report from analysis results."""
12
- report = "\n=== Analysis Report ===\n"
13
-
14
- if "mean" in analysis_result:
15
- report += "\nMean values:\n"
16
- for key, value in analysis_result["mean"].items():
17
- report += f" {key}: {value:.2f}\n"
18
-
19
- if "count" in analysis_result:
20
- report += f"\nTotal records: {analysis_result['count']}\n"
21
-
22
- report += "\n" + "=" * 25
23
-
24
- return report
@@ -1,32 +0,0 @@
1
- import asyncio
2
- from dotenv import load_dotenv
3
- from tetra_rp import remote, LiveServerless
4
-
5
- # Load environment variables from .env file
6
- load_dotenv()
7
-
8
- # Configuration for a simple resource
9
- config = LiveServerless(
10
- name="basic_example",
11
- workersMax=1,
12
- )
13
-
14
-
15
- @remote(config)
16
- def hello_world():
17
- """Simple remote function example."""
18
- print("Hello from the remote function!")
19
- return "Hello, World!"
20
-
21
-
22
- async def main():
23
- print("🚀 Running basic Tetra example...")
24
- result = await hello_world()
25
- print(f"Result: {result}")
26
-
27
-
28
- if __name__ == "__main__":
29
- try:
30
- asyncio.run(main())
31
- except Exception as e:
32
- print(f"An error occurred: {e}")
@@ -1,64 +0,0 @@
1
- import asyncio
2
- from dotenv import load_dotenv
3
- from tetra_rp import remote, LiveServerless
4
-
5
- # Load environment variables from .env file
6
- load_dotenv()
7
-
8
- # Configuration for GPU workload
9
- gpu_config = LiveServerless(
10
- name="gpu_compute",
11
- workersMax=1,
12
- gpu=1,
13
- gpuType="A40",
14
- cpu=4,
15
- memory=8192,
16
- )
17
-
18
-
19
- @remote(gpu_config)
20
- def gpu_computation():
21
- """GPU-accelerated computation example."""
22
- try:
23
- import torch
24
-
25
- # Check GPU availability
26
- if torch.cuda.is_available():
27
- device = torch.cuda.get_device_name(0)
28
- print(f"Using GPU: {device}")
29
-
30
- # Simple GPU computation
31
- x = torch.randn(1000, 1000).cuda()
32
- y = torch.randn(1000, 1000).cuda()
33
- result = torch.mm(x, y)
34
-
35
- return {
36
- "device": device,
37
- "matrix_shape": result.shape,
38
- "result_mean": result.mean().item(),
39
- "computation": "Matrix multiplication completed on GPU",
40
- }
41
- else:
42
- return {"error": "GPU not available"}
43
-
44
- except ImportError:
45
- return {"error": "PyTorch not available"}
46
-
47
-
48
- async def main():
49
- print("🚀 Running GPU compute example...")
50
- result = await gpu_computation()
51
-
52
- if "error" in result:
53
- print(f"{result['error']}")
54
- else:
55
- print("GPU computation completed!")
56
- print(f"Device: {result['device']}")
57
- print(f"Result: {result['computation']}")
58
-
59
-
60
- if __name__ == "__main__":
61
- try:
62
- asyncio.run(main())
63
- except Exception as e:
64
- print(f"An error occurred: {e}")
@@ -1,67 +0,0 @@
1
- """FastAPI application with example endpoints."""
2
-
3
- from fastapi import FastAPI, HTTPException
4
- from pydantic import BaseModel
5
-
6
-
7
- def create_api_app() -> FastAPI:
8
- """Create and configure FastAPI application."""
9
-
10
- app = FastAPI(
11
- title="Tetra API Service",
12
- description="Example web API deployed with Tetra",
13
- version="1.0.0",
14
- )
15
-
16
- # Example models
17
- class ComputeRequest(BaseModel):
18
- operation: str
19
- values: list[float]
20
-
21
- class ComputeResponse(BaseModel):
22
- result: float
23
- operation: str
24
- input_count: int
25
-
26
- @app.get("/")
27
- async def root():
28
- """Root endpoint."""
29
- return {"message": "Tetra API Service", "status": "running"}
30
-
31
- @app.get("/health")
32
- async def health_check():
33
- """Health check endpoint."""
34
- return {"status": "healthy", "service": "tetra-rp-api"}
35
-
36
- @app.post("/compute", response_model=ComputeResponse)
37
- async def compute(request: ComputeRequest):
38
- """Perform computation on provided values."""
39
-
40
- if not request.values:
41
- raise HTTPException(status_code=400, detail="No values provided")
42
-
43
- try:
44
- if request.operation == "sum":
45
- result = sum(request.values)
46
- elif request.operation == "mean":
47
- result = sum(request.values) / len(request.values)
48
- elif request.operation == "max":
49
- result = max(request.values)
50
- elif request.operation == "min":
51
- result = min(request.values)
52
- else:
53
- raise HTTPException(
54
- status_code=400,
55
- detail=f"Unsupported operation: {request.operation}",
56
- )
57
-
58
- return ComputeResponse(
59
- result=result,
60
- operation=request.operation,
61
- input_count=len(request.values),
62
- )
63
-
64
- except Exception as e:
65
- raise HTTPException(status_code=500, detail=str(e))
66
-
67
- return app
@@ -1,42 +0,0 @@
1
- import asyncio
2
- from dotenv import load_dotenv
3
- from tetra_rp import remote, LiveServerless
4
- from api import create_api_app
5
-
6
- # Load environment variables from .env file
7
- load_dotenv()
8
-
9
- # Configuration for web API
10
- api_config = LiveServerless(
11
- name="web_api_service",
12
- workersMax=3,
13
- cpu=2,
14
- memory=2048,
15
- ports=[8000],
16
- )
17
-
18
-
19
- @remote(api_config)
20
- def run_api_server():
21
- """Run FastAPI web service."""
22
- import uvicorn
23
-
24
- app = create_api_app()
25
-
26
- # Run the server
27
- uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")
28
-
29
- return "API server started"
30
-
31
-
32
- async def main():
33
- print("🚀 Starting web API service...")
34
- result = await run_api_server()
35
- print(f"Result: {result}")
36
-
37
-
38
- if __name__ == "__main__":
39
- try:
40
- asyncio.run(main())
41
- except Exception as e:
42
- print(f"An error occurred: {e}")