isa-model 0.3.9__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. isa_model/__init__.py +1 -1
  2. isa_model/client.py +732 -565
  3. isa_model/core/cache/redis_cache.py +401 -0
  4. isa_model/core/config/config_manager.py +53 -10
  5. isa_model/core/config.py +1 -1
  6. isa_model/core/database/__init__.py +1 -0
  7. isa_model/core/database/migrations.py +277 -0
  8. isa_model/core/database/supabase_client.py +123 -0
  9. isa_model/core/models/__init__.py +37 -0
  10. isa_model/core/models/model_billing_tracker.py +60 -88
  11. isa_model/core/models/model_manager.py +36 -18
  12. isa_model/core/models/model_repo.py +44 -38
  13. isa_model/core/models/model_statistics_tracker.py +234 -0
  14. isa_model/core/models/model_storage.py +0 -1
  15. isa_model/core/models/model_version_manager.py +959 -0
  16. isa_model/core/pricing_manager.py +2 -249
  17. isa_model/core/resilience/circuit_breaker.py +366 -0
  18. isa_model/core/security/secrets.py +358 -0
  19. isa_model/core/services/__init__.py +2 -4
  20. isa_model/core/services/intelligent_model_selector.py +101 -370
  21. isa_model/core/storage/hf_storage.py +1 -1
  22. isa_model/core/types.py +7 -0
  23. isa_model/deployment/cloud/modal/isa_audio_chatTTS_service.py +520 -0
  24. isa_model/deployment/cloud/modal/isa_audio_fish_service.py +0 -0
  25. isa_model/deployment/cloud/modal/isa_audio_openvoice_service.py +758 -0
  26. isa_model/deployment/cloud/modal/isa_audio_service_v2.py +1044 -0
  27. isa_model/deployment/cloud/modal/isa_embed_rerank_service.py +296 -0
  28. isa_model/deployment/cloud/modal/isa_video_hunyuan_service.py +423 -0
  29. isa_model/deployment/cloud/modal/isa_vision_ocr_service.py +519 -0
  30. isa_model/deployment/cloud/modal/isa_vision_qwen25_service.py +709 -0
  31. isa_model/deployment/cloud/modal/isa_vision_table_service.py +467 -323
  32. isa_model/deployment/cloud/modal/isa_vision_ui_service.py +607 -180
  33. isa_model/deployment/cloud/modal/isa_vision_ui_service_optimized.py +660 -0
  34. isa_model/deployment/core/deployment_manager.py +6 -4
  35. isa_model/deployment/services/auto_hf_modal_deployer.py +894 -0
  36. isa_model/eval/benchmarks/__init__.py +27 -0
  37. isa_model/eval/benchmarks/multimodal_datasets.py +460 -0
  38. isa_model/eval/benchmarks.py +244 -12
  39. isa_model/eval/evaluators/__init__.py +8 -2
  40. isa_model/eval/evaluators/audio_evaluator.py +727 -0
  41. isa_model/eval/evaluators/embedding_evaluator.py +742 -0
  42. isa_model/eval/evaluators/vision_evaluator.py +564 -0
  43. isa_model/eval/example_evaluation.py +395 -0
  44. isa_model/eval/factory.py +272 -5
  45. isa_model/eval/isa_benchmarks.py +700 -0
  46. isa_model/eval/isa_integration.py +582 -0
  47. isa_model/eval/metrics.py +159 -6
  48. isa_model/eval/tests/unit/test_basic.py +396 -0
  49. isa_model/inference/ai_factory.py +44 -8
  50. isa_model/inference/services/audio/__init__.py +21 -0
  51. isa_model/inference/services/audio/base_realtime_service.py +225 -0
  52. isa_model/inference/services/audio/isa_tts_service.py +0 -0
  53. isa_model/inference/services/audio/openai_realtime_service.py +320 -124
  54. isa_model/inference/services/audio/openai_stt_service.py +32 -6
  55. isa_model/inference/services/base_service.py +17 -1
  56. isa_model/inference/services/embedding/__init__.py +13 -0
  57. isa_model/inference/services/embedding/base_embed_service.py +111 -8
  58. isa_model/inference/services/embedding/isa_embed_service.py +305 -0
  59. isa_model/inference/services/embedding/openai_embed_service.py +2 -4
  60. isa_model/inference/services/embedding/tests/test_embedding.py +222 -0
  61. isa_model/inference/services/img/__init__.py +2 -2
  62. isa_model/inference/services/img/base_image_gen_service.py +24 -7
  63. isa_model/inference/services/img/replicate_image_gen_service.py +84 -422
  64. isa_model/inference/services/img/services/replicate_face_swap.py +193 -0
  65. isa_model/inference/services/img/services/replicate_flux.py +226 -0
  66. isa_model/inference/services/img/services/replicate_flux_kontext.py +219 -0
  67. isa_model/inference/services/img/services/replicate_sticker_maker.py +249 -0
  68. isa_model/inference/services/img/tests/test_img_client.py +297 -0
  69. isa_model/inference/services/llm/base_llm_service.py +30 -6
  70. isa_model/inference/services/llm/helpers/llm_adapter.py +63 -9
  71. isa_model/inference/services/llm/ollama_llm_service.py +2 -1
  72. isa_model/inference/services/llm/openai_llm_service.py +652 -55
  73. isa_model/inference/services/llm/yyds_llm_service.py +2 -1
  74. isa_model/inference/services/vision/__init__.py +5 -5
  75. isa_model/inference/services/vision/base_vision_service.py +118 -185
  76. isa_model/inference/services/vision/helpers/image_utils.py +11 -5
  77. isa_model/inference/services/vision/isa_vision_service.py +573 -0
  78. isa_model/inference/services/vision/tests/test_ocr_client.py +284 -0
  79. isa_model/serving/api/fastapi_server.py +88 -16
  80. isa_model/serving/api/middleware/auth.py +311 -0
  81. isa_model/serving/api/middleware/security.py +278 -0
  82. isa_model/serving/api/routes/analytics.py +486 -0
  83. isa_model/serving/api/routes/deployments.py +339 -0
  84. isa_model/serving/api/routes/evaluations.py +579 -0
  85. isa_model/serving/api/routes/logs.py +430 -0
  86. isa_model/serving/api/routes/settings.py +582 -0
  87. isa_model/serving/api/routes/unified.py +324 -165
  88. isa_model/serving/api/startup.py +304 -0
  89. isa_model/serving/modal_proxy_server.py +249 -0
  90. isa_model/training/__init__.py +100 -6
  91. isa_model/training/core/__init__.py +4 -1
  92. isa_model/training/examples/intelligent_training_example.py +281 -0
  93. isa_model/training/intelligent/__init__.py +25 -0
  94. isa_model/training/intelligent/decision_engine.py +643 -0
  95. isa_model/training/intelligent/intelligent_factory.py +888 -0
  96. isa_model/training/intelligent/knowledge_base.py +751 -0
  97. isa_model/training/intelligent/resource_optimizer.py +839 -0
  98. isa_model/training/intelligent/task_classifier.py +576 -0
  99. isa_model/training/storage/__init__.py +24 -0
  100. isa_model/training/storage/core_integration.py +439 -0
  101. isa_model/training/storage/training_repository.py +552 -0
  102. isa_model/training/storage/training_storage.py +628 -0
  103. {isa_model-0.3.9.dist-info → isa_model-0.4.0.dist-info}/METADATA +13 -1
  104. isa_model-0.4.0.dist-info/RECORD +182 -0
  105. isa_model/deployment/cloud/modal/isa_vision_doc_service.py +0 -766
  106. isa_model/deployment/cloud/modal/register_models.py +0 -321
  107. isa_model/inference/adapter/unified_api.py +0 -248
  108. isa_model/inference/services/helpers/stacked_config.py +0 -148
  109. isa_model/inference/services/img/flux_professional_service.py +0 -603
  110. isa_model/inference/services/img/helpers/base_stacked_service.py +0 -274
  111. isa_model/inference/services/others/table_transformer_service.py +0 -61
  112. isa_model/inference/services/vision/doc_analysis_service.py +0 -640
  113. isa_model/inference/services/vision/helpers/base_stacked_service.py +0 -274
  114. isa_model/inference/services/vision/ui_analysis_service.py +0 -823
  115. isa_model/scripts/inference_tracker.py +0 -283
  116. isa_model/scripts/mlflow_manager.py +0 -379
  117. isa_model/scripts/model_registry.py +0 -465
  118. isa_model/scripts/register_models.py +0 -370
  119. isa_model/scripts/register_models_with_embeddings.py +0 -510
  120. isa_model/scripts/start_mlflow.py +0 -95
  121. isa_model/scripts/training_tracker.py +0 -257
  122. isa_model-0.3.9.dist-info/RECORD +0 -138
  123. {isa_model-0.3.9.dist-info → isa_model-0.4.0.dist-info}/WHEEL +0 -0
  124. {isa_model-0.3.9.dist-info → isa_model-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,423 @@
1
+ """
2
+ ISA HunyuanVideo Service
3
+
4
+ SOTA open-source video generation service using HunyuanVideo (13B parameters)
5
+ - Text-to-Video generation with cinematic quality
6
+ - Superior motion accuracy and physics simulation
7
+ - Beats Runway Gen-3 in benchmarks
8
+ """
9
+
10
+ import modal
11
+ import time
12
+ import json
13
+ import os
14
+ import logging
15
+ import base64
16
+ import tempfile
17
+ from typing import Dict, List, Optional, Any
18
+ from pathlib import Path
19
+
20
+ # Define Modal application
21
+ app = modal.App("isa-video-hunyuan")
22
+
23
+ # Define Modal container image with HunyuanVideo dependencies
24
+ image = (
25
+ modal.Image.debian_slim(python_version="3.10")
26
+ .pip_install([
27
+ "torch>=2.0.0",
28
+ "torchvision>=0.15.0",
29
+ "torchaudio>=2.0.0",
30
+ "transformers>=4.35.0",
31
+ "diffusers>=0.24.0",
32
+ "accelerate>=0.24.0",
33
+ "huggingface_hub>=0.19.0",
34
+ "opencv-python>=4.8.0",
35
+ "pillow>=10.0.0",
36
+ "numpy>=1.24.0",
37
+ "imageio>=2.31.0",
38
+ "imageio-ffmpeg>=0.4.8",
39
+ "ffmpeg-python>=0.2.0",
40
+ "requests>=2.31.0",
41
+ "httpx>=0.26.0",
42
+ "pydantic>=2.0.0",
43
+ "python-dotenv>=1.0.0",
44
+ "safetensors>=0.4.0",
45
+ "xformers>=0.0.22", # For memory efficiency
46
+ "einops>=0.7.0",
47
+ ])
48
+ .apt_install([
49
+ "ffmpeg",
50
+ "libsm6",
51
+ "libxext6",
52
+ "libxrender-dev",
53
+ "libglib2.0-0",
54
+ "libgl1-mesa-glx",
55
+ "git-lfs"
56
+ ])
57
+ .env({
58
+ "TRANSFORMERS_CACHE": "/models",
59
+ "TORCH_HOME": "/models/torch",
60
+ "HF_HOME": "/models",
61
+ "CUDA_VISIBLE_DEVICES": "0",
62
+ "PYTORCH_CUDA_ALLOC_CONF": "max_split_size_mb:512"
63
+ })
64
+ )
65
+
66
+ # HunyuanVideo Service - Optimized for A100 GPU
67
+ @app.cls(
68
+ gpu="A100", # 80GB A100 for 13B model
69
+ image=image,
70
+ memory=32768, # 32GB RAM
71
+ timeout=3600, # 60 minutes for video generation
72
+ scaledown_window=300, # 5 minutes idle timeout
73
+ min_containers=0, # Scale to zero
74
+ max_containers=5, # Support up to 5 concurrent containers
75
+ secrets=[modal.Secret.from_name("huggingface-secret")], # Optional HF token
76
+ )
77
+ class ISAVideoHunyuanService:
78
+ """
79
+ ISA HunyuanVideo Service
80
+
81
+ SOTA 13B parameter video generation model:
82
+ - Model: Tencent/HunyuanVideo
83
+ - Architecture: Diffusion Transformer
84
+ - Performance: Beats Runway Gen-3
85
+ - Capabilities: Text-to-Video, cinematic quality
86
+ """
87
+
88
+ @modal.enter()
89
+ def load_models(self):
90
+ """Load HunyuanVideo model and dependencies"""
91
+ print("Loading HunyuanVideo (13B parameters)...")
92
+ start_time = time.time()
93
+
94
+ # Initialize instance variables
95
+ self.pipeline = None
96
+ self.logger = logging.getLogger(__name__)
97
+ self.request_count = 0
98
+ self.total_processing_time = 0.0
99
+
100
+ try:
101
+ import torch
102
+ from diffusers import HunyuanVideoPipeline
103
+
104
+ print("Loading HunyuanVideo pipeline...")
105
+
106
+ # Load HunyuanVideo pipeline with optimizations
107
+ self.pipeline = HunyuanVideoPipeline.from_pretrained(
108
+ "tencent/HunyuanVideo",
109
+ torch_dtype=torch.bfloat16,
110
+ use_safetensors=True,
111
+ variant="bf16"
112
+ )
113
+
114
+ # Enable memory efficient attention
115
+ if hasattr(self.pipeline, 'enable_xformers_memory_efficient_attention'):
116
+ self.pipeline.enable_xformers_memory_efficient_attention()
117
+
118
+ # Enable CPU offloading for memory efficiency
119
+ self.pipeline.enable_model_cpu_offload()
120
+
121
+ # Enable VAE slicing for memory efficiency
122
+ if hasattr(self.pipeline.vae, 'enable_slicing'):
123
+ self.pipeline.vae.enable_slicing()
124
+
125
+ # Enable VAE tiling for large videos
126
+ if hasattr(self.pipeline.vae, 'enable_tiling'):
127
+ self.pipeline.vae.enable_tiling()
128
+
129
+ load_time = time.time() - start_time
130
+ print(f"HunyuanVideo loaded successfully in {load_time:.2f}s")
131
+
132
+ # Model loading status
133
+ self.models_loaded = True
134
+
135
+ except Exception as e:
136
+ print(f"Model loading failed: {e}")
137
+ import traceback
138
+ traceback.print_exc()
139
+ self.models_loaded = False
140
+
141
+ @modal.method()
142
+ def generate_video(
143
+ self,
144
+ prompt: str,
145
+ negative_prompt: Optional[str] = None,
146
+ num_frames: int = 49,
147
+ height: int = 720,
148
+ width: int = 1280,
149
+ fps: int = 15,
150
+ num_inference_steps: int = 30,
151
+ guidance_scale: float = 6.0,
152
+ seed: Optional[int] = None,
153
+ output_format: str = "mp4"
154
+ ) -> Dict[str, Any]:
155
+ """
156
+ Generate video using HunyuanVideo
157
+
158
+ Args:
159
+ prompt: Text description for video generation
160
+ negative_prompt: What to avoid in generation
161
+ num_frames: Number of frames (default 49, max 129)
162
+ height: Video height (default 720, max 1024)
163
+ width: Video width (default 1280, max 1920)
164
+ fps: Frames per second (default 15)
165
+ num_inference_steps: Denoising steps (default 30)
166
+ guidance_scale: How closely to follow prompt (default 6.0)
167
+ seed: Random seed for reproducibility
168
+ output_format: Output format ('mp4' or 'gif')
169
+
170
+ Returns:
171
+ Video generation results with metadata
172
+ """
173
+ start_time = time.time()
174
+ self.request_count += 1
175
+
176
+ try:
177
+ # Validate model loading status
178
+ if not self.models_loaded or not self.pipeline:
179
+ raise RuntimeError("HunyuanVideo model not loaded")
180
+
181
+ # Validate parameters
182
+ num_frames = min(max(num_frames, 9), 129) # Clamp to valid range
183
+ height = min(max(height, 480), 1024) # Clamp to valid range
184
+ width = min(max(width, 640), 1920) # Clamp to valid range
185
+
186
+ # Set random seed if provided
187
+ if seed is not None:
188
+ import torch
189
+ torch.manual_seed(seed)
190
+ if torch.cuda.is_available():
191
+ torch.cuda.manual_seed_all(seed)
192
+
193
+ print(f"Generating video: {prompt[:100]}...")
194
+ print(f"Parameters: {num_frames} frames, {width}x{height}, {fps}fps")
195
+
196
+ # Generate video using HunyuanVideo
197
+ video_frames = self.pipeline(
198
+ prompt=prompt,
199
+ negative_prompt=negative_prompt,
200
+ num_frames=num_frames,
201
+ height=height,
202
+ width=width,
203
+ num_inference_steps=num_inference_steps,
204
+ guidance_scale=guidance_scale,
205
+ generator=None if seed is None else torch.Generator().manual_seed(seed)
206
+ ).frames[0]
207
+
208
+ # Save video to temporary file
209
+ with tempfile.NamedTemporaryFile(suffix=f".{output_format}", delete=False) as tmp_file:
210
+ if output_format.lower() == "mp4":
211
+ self._save_video_mp4(video_frames, tmp_file.name, fps)
212
+ elif output_format.lower() == "gif":
213
+ self._save_video_gif(video_frames, tmp_file.name, fps)
214
+ else:
215
+ raise ValueError(f"Unsupported output format: {output_format}")
216
+
217
+ # Read video file and encode to base64
218
+ with open(tmp_file.name, "rb") as f:
219
+ video_data = f.read()
220
+ video_b64 = base64.b64encode(video_data).decode('utf-8')
221
+
222
+ # Clean up temp file
223
+ os.unlink(tmp_file.name)
224
+
225
+ processing_time = time.time() - start_time
226
+ self.total_processing_time += processing_time
227
+
228
+ # Calculate cost (A100 GPU: ~$2.00/hour)
229
+ gpu_cost = (processing_time / 3600) * 2.00
230
+
231
+ result = {
232
+ 'success': True,
233
+ 'service': 'isa-video-hunyuan',
234
+ 'operation': 'video_generation',
235
+ 'provider': 'ISA',
236
+ 'video_b64': video_b64,
237
+ 'video_format': output_format,
238
+ 'prompt': prompt,
239
+ 'model': 'HunyuanVideo-13B',
240
+ 'architecture': 'Diffusion Transformer',
241
+ 'parameters': {
242
+ 'num_frames': num_frames,
243
+ 'height': height,
244
+ 'width': width,
245
+ 'fps': fps,
246
+ 'inference_steps': num_inference_steps,
247
+ 'guidance_scale': guidance_scale,
248
+ 'seed': seed
249
+ },
250
+ 'video_info': {
251
+ 'duration_seconds': num_frames / fps,
252
+ 'total_frames': num_frames,
253
+ 'resolution': f"{width}x{height}",
254
+ 'file_size_bytes': len(video_data)
255
+ },
256
+ 'processing_time': processing_time,
257
+ 'billing': {
258
+ 'request_id': f"video_{self.request_count}_{int(time.time())}",
259
+ 'gpu_seconds': processing_time,
260
+ 'estimated_cost_usd': round(gpu_cost, 4),
261
+ 'gpu_type': 'A100'
262
+ },
263
+ 'model_info': {
264
+ 'model_name': 'HunyuanVideo',
265
+ 'provider': 'ISA',
266
+ 'architecture': 'Diffusion Transformer',
267
+ 'parameters': '13B',
268
+ 'gpu': 'A100',
269
+ 'performance': 'SOTA 2024 (beats Runway Gen-3)',
270
+ 'container_id': os.environ.get('MODAL_TASK_ID', 'unknown')
271
+ }
272
+ }
273
+
274
+ # Output JSON results
275
+ print("=== JSON_RESULT_START ===")
276
+ print(json.dumps({k: v for k, v in result.items() if k != 'video_b64'}, default=str))
277
+ print("=== JSON_RESULT_END ===")
278
+
279
+ return result
280
+
281
+ except Exception as e:
282
+ processing_time = time.time() - start_time
283
+ error_result = {
284
+ 'success': False,
285
+ 'service': 'isa-video-hunyuan',
286
+ 'operation': 'video_generation',
287
+ 'provider': 'ISA',
288
+ 'error': str(e),
289
+ 'processing_time': processing_time,
290
+ 'billing': {
291
+ 'request_id': f"video_{self.request_count}_{int(time.time())}",
292
+ 'gpu_seconds': processing_time,
293
+ 'estimated_cost_usd': round((processing_time / 3600) * 2.00, 4),
294
+ 'gpu_type': 'A100'
295
+ }
296
+ }
297
+
298
+ print("=== JSON_RESULT_START ===")
299
+ print(json.dumps(error_result, default=str))
300
+ print("=== JSON_RESULT_END ===")
301
+
302
+ return error_result
303
+
304
+ def _save_video_mp4(self, frames, output_path: str, fps: int):
305
+ """Save video frames as MP4"""
306
+ try:
307
+ import imageio
308
+
309
+ # Convert frames to numpy arrays if needed
310
+ if hasattr(frames[0], 'numpy'):
311
+ frames = [frame.numpy() for frame in frames]
312
+
313
+ # Write MP4 video
314
+ imageio.mimsave(
315
+ output_path,
316
+ frames,
317
+ fps=fps,
318
+ codec='libx264',
319
+ ffmpeg_params=['-pix_fmt', 'yuv420p']
320
+ )
321
+
322
+ except Exception as e:
323
+ print(f"Failed to save MP4: {e}")
324
+ raise
325
+
326
+ def _save_video_gif(self, frames, output_path: str, fps: int):
327
+ """Save video frames as GIF"""
328
+ try:
329
+ import imageio
330
+
331
+ # Convert frames to numpy arrays if needed
332
+ if hasattr(frames[0], 'numpy'):
333
+ frames = [frame.numpy() for frame in frames]
334
+
335
+ # Write GIF
336
+ imageio.mimsave(
337
+ output_path,
338
+ frames,
339
+ fps=fps,
340
+ loop=0
341
+ )
342
+
343
+ except Exception as e:
344
+ print(f"Failed to save GIF: {e}")
345
+ raise
346
+
347
+ @modal.method()
348
+ def health_check(self) -> Dict[str, Any]:
349
+ """Health check endpoint"""
350
+ return {
351
+ 'status': 'healthy',
352
+ 'service': 'isa-video-hunyuan',
353
+ 'provider': 'ISA',
354
+ 'models_loaded': self.models_loaded,
355
+ 'model': 'HunyuanVideo-13B',
356
+ 'architecture': 'Diffusion Transformer',
357
+ 'timestamp': time.time(),
358
+ 'gpu': 'A100',
359
+ 'memory_usage': '32GB',
360
+ 'request_count': self.request_count,
361
+ 'performance': 'SOTA 2024 (beats Runway Gen-3)'
362
+ }
363
+
364
+ # Deployment functions
365
+ @app.function()
366
+ def deploy_info():
367
+ """Deployment information"""
368
+ return {
369
+ 'service': 'isa-video-hunyuan',
370
+ 'version': '1.0.0',
371
+ 'description': 'ISA HunyuanVideo service - SOTA 13B parameter video generation',
372
+ 'model': 'HunyuanVideo-13B',
373
+ 'architecture': 'Diffusion Transformer',
374
+ 'gpu': 'A100',
375
+ 'performance': 'Beats Runway Gen-3',
376
+ 'deployment_time': time.time()
377
+ }
378
+
379
+ @app.function()
380
+ def register_service():
381
+ """Register service to model repository"""
382
+ try:
383
+ from isa_model.core.models.model_repo import ModelRepository
384
+
385
+ repo = ModelRepository()
386
+
387
+ # Register video generation service
388
+ repo.register_model({
389
+ 'model_id': 'isa-hunyuan-video-service',
390
+ 'model_type': 'video',
391
+ 'provider': 'isa',
392
+ 'endpoint': 'https://isa-video-hunyuan.modal.run',
393
+ 'capabilities': ['text_to_video', 'video_generation', 'cinematic_quality'],
394
+ 'pricing': {'gpu_type': 'A100', 'cost_per_hour': 2.00},
395
+ 'metadata': {
396
+ 'model': 'HunyuanVideo-13B',
397
+ 'architecture': 'Diffusion Transformer',
398
+ 'parameters': '13B',
399
+ 'performance': 'SOTA 2024',
400
+ 'max_resolution': '1920x1024',
401
+ 'max_frames': 129,
402
+ 'supported_formats': ['mp4', 'gif']
403
+ }
404
+ })
405
+
406
+ print("HunyuanVideo service registered successfully")
407
+ return {'status': 'registered'}
408
+
409
+ except Exception as e:
410
+ print(f"Service registration failed: {e}")
411
+ return {'status': 'failed', 'error': str(e)}
412
+
413
+ if __name__ == "__main__":
414
+ print("ISA HunyuanVideo Service - Modal Deployment")
415
+ print("Deploy with: modal deploy isa_video_hunyuan_service.py")
416
+ print()
417
+ print("Model: HunyuanVideo (13B parameters)")
418
+ print("Architecture: Diffusion Transformer")
419
+ print("Performance: SOTA 2024 (beats Runway Gen-3)")
420
+ print("GPU: A100 (80GB)")
421
+ print()
422
+ print("Usage:")
423
+ print("service.generate_video('A cat walking in a garden', num_frames=49, width=1280, height=720)")