isa-model 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. isa_model/__init__.py +1 -1
  2. isa_model/core/model_registry.py +273 -46
  3. isa_model/deployment/gpu_fp16_ds8/models/deepseek_r1/1/model.py +120 -0
  4. isa_model/deployment/gpu_fp16_ds8/scripts/download_model.py +18 -0
  5. isa_model/deployment/gpu_int8_ds8/app/server.py +66 -0
  6. isa_model/deployment/gpu_int8_ds8/scripts/test_client.py +43 -0
  7. isa_model/deployment/gpu_int8_ds8/scripts/test_client_os.py +35 -0
  8. isa_model/eval/__init__.py +56 -0
  9. isa_model/eval/benchmarks.py +469 -0
  10. isa_model/eval/factory.py +582 -0
  11. isa_model/eval/metrics.py +628 -0
  12. isa_model/inference/ai_factory.py +98 -93
  13. isa_model/inference/providers/openai_provider.py +21 -7
  14. isa_model/inference/providers/replicate_provider.py +18 -5
  15. isa_model/inference/providers/triton_provider.py +1 -1
  16. isa_model/inference/services/audio/base_stt_service.py +91 -0
  17. isa_model/inference/services/audio/base_tts_service.py +136 -0
  18. isa_model/inference/services/audio/{yyds_audio_service.py → openai_tts_service.py} +4 -4
  19. isa_model/inference/services/embedding/ollama_embed_service.py +48 -36
  20. isa_model/inference/services/llm/__init__.py +0 -4
  21. isa_model/inference/services/llm/base_llm_service.py +134 -0
  22. isa_model/inference/services/llm/ollama_llm_service.py +1 -10
  23. isa_model/inference/services/llm/openai_llm_service.py +70 -61
  24. isa_model/inference/services/vision/__init__.py +1 -1
  25. isa_model/inference/services/vision/ollama_vision_service.py +4 -4
  26. isa_model/inference/services/vision/{yyds_vision_service.py → openai_vision_service.py} +5 -5
  27. isa_model/inference/services/vision/replicate_image_gen_service.py +185 -0
  28. isa_model/training/__init__.py +44 -0
  29. isa_model/training/factory.py +393 -0
  30. isa_model-0.1.1.dist-info/METADATA +327 -0
  31. {isa_model-0.1.0.dist-info → isa_model-0.1.1.dist-info}/RECORD +35 -60
  32. isa_model/deployment/mlflow_gateway/__init__.py +0 -8
  33. isa_model/deployment/mlflow_gateway/start_gateway.py +0 -65
  34. isa_model/deployment/unified_multimodal_client.py +0 -341
  35. isa_model/inference/adapter/triton_adapter.py +0 -453
  36. isa_model/inference/backends/Pytorch/bge_embed_backend.py +0 -188
  37. isa_model/inference/backends/Pytorch/gemma_backend.py +0 -167
  38. isa_model/inference/backends/Pytorch/llama_backend.py +0 -166
  39. isa_model/inference/backends/Pytorch/whisper_backend.py +0 -194
  40. isa_model/inference/backends/__init__.py +0 -53
  41. isa_model/inference/backends/base_backend_client.py +0 -26
  42. isa_model/inference/backends/container_services.py +0 -104
  43. isa_model/inference/backends/local_services.py +0 -72
  44. isa_model/inference/backends/openai_client.py +0 -130
  45. isa_model/inference/backends/replicate_client.py +0 -197
  46. isa_model/inference/backends/third_party_services.py +0 -239
  47. isa_model/inference/backends/triton_client.py +0 -97
  48. isa_model/inference/client_sdk/client.py +0 -134
  49. isa_model/inference/client_sdk/client_data_std.py +0 -34
  50. isa_model/inference/client_sdk/client_sdk_schema.py +0 -16
  51. isa_model/inference/client_sdk/exceptions.py +0 -0
  52. isa_model/inference/engine/triton/model_repository/bge/1/model.py +0 -174
  53. isa_model/inference/engine/triton/model_repository/gemma/1/model.py +0 -250
  54. isa_model/inference/engine/triton/model_repository/llama/1/model.py +0 -76
  55. isa_model/inference/engine/triton/model_repository/whisper/1/model.py +0 -195
  56. isa_model/inference/providers/vllm_provider.py +0 -0
  57. isa_model/inference/providers/yyds_provider.py +0 -83
  58. isa_model/inference/services/audio/fish_speech/handler.py +0 -215
  59. isa_model/inference/services/audio/runpod_tts_fish_service.py +0 -212
  60. isa_model/inference/services/audio/triton_speech_service.py +0 -138
  61. isa_model/inference/services/audio/whisper_service.py +0 -186
  62. isa_model/inference/services/base_tts_service.py +0 -66
  63. isa_model/inference/services/embedding/bge_service.py +0 -183
  64. isa_model/inference/services/embedding/ollama_rerank_service.py +0 -118
  65. isa_model/inference/services/embedding/onnx_rerank_service.py +0 -73
  66. isa_model/inference/services/llm/gemma_service.py +0 -143
  67. isa_model/inference/services/llm/llama_service.py +0 -143
  68. isa_model/inference/services/llm/replicate_llm_service.py +0 -179
  69. isa_model/inference/services/llm/triton_llm_service.py +0 -230
  70. isa_model/inference/services/vision/replicate_vision_service.py +0 -241
  71. isa_model/inference/services/vision/triton_vision_service.py +0 -199
  72. isa_model-0.1.0.dist-info/METADATA +0 -116
  73. /isa_model/inference/{client_sdk/__init__.py → services/embedding/openai_embed_service.py} +0 -0
  74. {isa_model-0.1.0.dist-info → isa_model-0.1.1.dist-info}/WHEEL +0 -0
  75. {isa_model-0.1.0.dist-info → isa_model-0.1.1.dist-info}/licenses/LICENSE +0 -0
  76. {isa_model-0.1.0.dist-info → isa_model-0.1.1.dist-info}/top_level.txt +0 -0
@@ -1,241 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
-
4
- """
5
- Replicate Vision服务
6
- 用于与Replicate API交互,支持图像生成和图像分析
7
- """
8
-
9
- import os
10
- import time
11
- import uuid
12
- import logging
13
- from typing import Dict, Any, List, Optional, Union
14
- import asyncio
15
- import aiohttp
16
- import replicate
17
- import requests
18
- from PIL import Image
19
- from io import BytesIO
20
-
21
- from isa_model.inference.services.base_service import BaseService
22
- from isa_model.inference.providers.base_provider import BaseProvider
23
- from isa_model.inference.base import ModelType
24
-
25
- # 设置日志记录
26
- logging.basicConfig(level=logging.INFO)
27
- logger = logging.getLogger(__name__)
28
-
29
- class ReplicateVisionService(BaseService):
30
- """
31
- Replicate Vision服务,用于处理图像生成和分析
32
- """
33
-
34
- def __init__(self, provider: BaseProvider, model_name: str):
35
- """
36
- 初始化Replicate Vision服务
37
-
38
- Args:
39
- provider: Replicate提供商实例
40
- model_name: Replicate模型ID (格式: 'username/model_name:version')
41
- """
42
- super().__init__(provider, model_name)
43
- self.api_token = provider.config.get("api_token", os.environ.get("REPLICATE_API_TOKEN"))
44
- self.client = replicate.Client(api_token=self.api_token)
45
- self.model_type = ModelType.VISION
46
-
47
- # 可选的默认配置
48
- self.guidance_scale = provider.config.get("guidance_scale", 7.5)
49
- self.num_inference_steps = provider.config.get("num_inference_steps", 30)
50
-
51
- # 生成的图像存储目录
52
- self.output_dir = "generated_images"
53
- os.makedirs(self.output_dir, exist_ok=True)
54
-
55
- async def load(self) -> None:
56
- """
57
- 加载模型(对于Replicate,这只是验证API令牌)
58
- """
59
- if not self.api_token:
60
- raise ValueError("缺少Replicate API令牌,请设置REPLICATE_API_TOKEN环境变量")
61
-
62
- # 验证令牌有效性
63
- try:
64
- self.client.api_token = self.api_token
65
- logger.info(f"Replicate Vision服务初始化成功,使用模型: {self.model_name}")
66
- except Exception as e:
67
- logger.error(f"Replicate初始化失败: {e}")
68
- raise
69
-
70
- async def generate_image(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
71
- """
72
- 使用Replicate模型生成图像
73
-
74
- Args:
75
- input_data: 包含生成参数的字典
76
-
77
- Returns:
78
- 包含生成图像URL的结果字典
79
- """
80
- try:
81
- # 设置默认参数
82
- if "guidance_scale" not in input_data and self.guidance_scale:
83
- input_data["guidance_scale"] = self.guidance_scale
84
-
85
- if "num_inference_steps" not in input_data and self.num_inference_steps:
86
- input_data["num_inference_steps"] = self.num_inference_steps
87
-
88
- # 运行模型(同步API调用)
89
- logger.info(f"开始使用模型 {self.model_name} 生成图像")
90
-
91
- # 转换成异步操作
92
- loop = asyncio.get_event_loop()
93
- output = await loop.run_in_executor(
94
- None,
95
- lambda: replicate.run(self.model_name, input=input_data)
96
- )
97
-
98
- # 将结果转换为标准格式
99
- # 处理Replicate对象输出
100
- if hasattr(output, 'url'):
101
- urls = [output.url]
102
- elif isinstance(output, list) and all(hasattr(item, 'url') for item in output if item is not None):
103
- urls = [item.url for item in output if item is not None]
104
- else:
105
- # 兼容直接返回URL字符串的情况
106
- urls = output if isinstance(output, list) else [output]
107
-
108
- result = {
109
- "urls": urls,
110
- "metadata": {
111
- "model": self.model_name,
112
- "input": input_data
113
- }
114
- }
115
-
116
- logger.info(f"图像生成完成: {result['urls']}")
117
- return result
118
-
119
- except Exception as e:
120
- logger.error(f"图像生成失败: {e}")
121
- raise
122
-
123
- async def analyze_image(self, image_path: str, prompt: str) -> Dict[str, Any]:
124
- """
125
- 分析图像(用于支持视觉分析模型)
126
-
127
- Args:
128
- image_path: 图像路径或URL
129
- prompt: 分析提示
130
-
131
- Returns:
132
- 分析结果字典
133
- """
134
- try:
135
- # 构建输入数据
136
- input_data = {
137
- "image": self._get_image_url(image_path),
138
- "prompt": prompt
139
- }
140
-
141
- # 运行模型
142
- logger.info(f"开始使用模型 {self.model_name} 分析图像")
143
-
144
- # 转换成异步操作
145
- loop = asyncio.get_event_loop()
146
- output = await loop.run_in_executor(
147
- None,
148
- lambda: replicate.run(self.model_name, input=input_data)
149
- )
150
-
151
- result = {
152
- "text": output,
153
- "metadata": {
154
- "model": self.model_name,
155
- "input": input_data
156
- }
157
- }
158
-
159
- logger.info(f"图像分析完成")
160
- return result
161
-
162
- except Exception as e:
163
- logger.error(f"图像分析失败: {e}")
164
- raise
165
-
166
- async def generate_and_save(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
167
- """
168
- 生成图像并保存到本地
169
-
170
- Args:
171
- input_data: 包含生成参数的字典
172
-
173
- Returns:
174
- 包含生成图像URL和保存路径的结果字典
175
- """
176
- # 首先生成图像
177
- result = await self.generate_image(input_data)
178
-
179
- # 然后下载并保存
180
- saved_paths = []
181
- for i, url in enumerate(result["urls"]):
182
- # 生成唯一文件名
183
- timestamp = int(time.time())
184
- file_name = f"{self.output_dir}/{timestamp}_{uuid.uuid4().hex[:8]}_{i+1}.png"
185
-
186
- # 异步下载图像
187
- try:
188
- await self._download_image(url, file_name)
189
- saved_paths.append(file_name)
190
- logger.info(f"图像已保存至: {file_name}")
191
- except Exception as e:
192
- logger.error(f"保存图像失败: {e}")
193
-
194
- # 添加保存路径到结果
195
- result["saved_paths"] = saved_paths
196
- return result
197
-
198
- async def _download_image(self, url: str, save_path: str) -> None:
199
- """
200
- 异步下载图像并保存
201
-
202
- Args:
203
- url: 图像URL
204
- save_path: 保存路径
205
- """
206
- try:
207
- async with aiohttp.ClientSession() as session:
208
- async with session.get(url) as response:
209
- if response.status == 200:
210
- content = await response.read()
211
- img = Image.open(BytesIO(content))
212
- img.save(save_path)
213
- else:
214
- logger.error(f"下载图像失败: HTTP {response.status}")
215
- raise Exception(f"下载图像失败: HTTP {response.status}")
216
- except Exception as e:
217
- logger.error(f"下载图像时出错: {e}")
218
- raise
219
-
220
- def _get_image_url(self, image_path: str) -> str:
221
- """
222
- 获取图像URL(如果提供的是本地路径,则上传到临时存储)
223
-
224
- Args:
225
- image_path: 图像路径或URL
226
-
227
- Returns:
228
- 图像URL
229
- """
230
- # 如果已经是URL,直接返回
231
- if image_path.startswith(("http://", "https://")):
232
- return image_path
233
-
234
- # 否则,这是一个需要上传的本地文件
235
- # 注意:这里可以实现上传逻辑,但为简单起见,我们仅支持URL
236
- raise NotImplementedError("当前仅支持图像URL,不支持上传本地文件")
237
-
238
- async def unload(self) -> None:
239
- """卸载模型(对于Replicate API,这是一个无操作)"""
240
- logger.info(f"卸载Replicate Vision服务: {self.model_name}")
241
- # 没有需要清理的资源
@@ -1,199 +0,0 @@
1
- import json
2
- import logging
3
- import asyncio
4
- import base64
5
- import io
6
- from PIL import Image
7
- import numpy as np
8
- from typing import Dict, List, Any, AsyncGenerator, Optional, Union
9
-
10
- from isa_model.inference.services.base_service import BaseService
11
- from isa_model.inference.providers.triton_provider import TritonProvider
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- class TritonVisionService(BaseService):
17
- """
18
- Vision service that uses Triton Inference Server to run inference.
19
- """
20
-
21
- def __init__(self, provider: TritonProvider, model_name: str):
22
- """
23
- Initialize the Triton Vision service.
24
-
25
- Args:
26
- provider: The Triton provider
27
- model_name: Name of the model in Triton (e.g., "Gemma3-4B")
28
- """
29
- super().__init__(provider, model_name)
30
- self.client = None
31
- self.token_usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
32
- self.last_token_usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
33
-
34
- async def _initialize_client(self):
35
- """Initialize the Triton client"""
36
- if self.client is None:
37
- self.client = self.provider.create_client()
38
-
39
- # Check if model is ready
40
- if not self.provider.is_model_ready(self.model_name):
41
- logger.error(f"Model {self.model_name} is not ready on Triton server")
42
- raise RuntimeError(f"Model {self.model_name} is not ready on Triton server")
43
-
44
- logger.info(f"Initialized Triton client for vision model: {self.model_name}")
45
-
46
- async def process_image(self,
47
- image: Union[str, Image.Image, bytes],
48
- prompt: Optional[str] = None,
49
- params: Optional[Dict[str, Any]] = None) -> str:
50
- """
51
- Process an image and generate a description.
52
-
53
- Args:
54
- image: Input image (PIL Image, base64 string, or bytes)
55
- prompt: Optional text prompt to guide the model
56
- params: Generation parameters
57
-
58
- Returns:
59
- Generated text description
60
- """
61
- await self._initialize_client()
62
-
63
- try:
64
- import tritonclient.http as httpclient
65
-
66
- # Process the image to get numpy array
67
- image_array = self._prepare_image_input(image)
68
-
69
- # Create input tensors for the image
70
- image_input = httpclient.InferInput("IMAGE", image_array.shape, "UINT8")
71
- image_input.set_data_from_numpy(image_array)
72
- inputs = [image_input]
73
-
74
- # Add text prompt if provided
75
- if prompt:
76
- text_data = np.array([prompt], dtype=np.object_)
77
- text_input = httpclient.InferInput("TEXT", text_data.shape, "BYTES")
78
- text_input.set_data_from_numpy(text_data)
79
- inputs.append(text_input)
80
-
81
- # Add parameters if provided
82
- if params:
83
- default_params = {
84
- "max_new_tokens": 512,
85
- "temperature": 0.7,
86
- "top_p": 0.9,
87
- "do_sample": True
88
- }
89
- generation_params = {**default_params, **params}
90
-
91
- param_json = json.dumps(generation_params)
92
- param_data = np.array([param_json], dtype=np.object_)
93
- param_input = httpclient.InferInput("PARAMETERS", param_data.shape, "BYTES")
94
- param_input.set_data_from_numpy(param_data)
95
- inputs.append(param_input)
96
-
97
- # Create output tensor
98
- outputs = [httpclient.InferRequestedOutput("TEXT")]
99
-
100
- # Send the request
101
- response = await asyncio.to_thread(
102
- self.client.infer,
103
- self.model_name,
104
- inputs,
105
- outputs=outputs
106
- )
107
-
108
- # Process the response
109
- output = response.as_numpy("TEXT")
110
- response_text = output[0].decode('utf-8')
111
-
112
- # Update token usage (estimated since we don't have actual token counts)
113
- prompt_tokens = len(prompt) // 4 if prompt else 100 # Rough estimate
114
- completion_tokens = len(response_text) // 4 # Rough estimate
115
- total_tokens = prompt_tokens + completion_tokens
116
-
117
- self.last_token_usage = {
118
- "prompt_tokens": prompt_tokens,
119
- "completion_tokens": completion_tokens,
120
- "total_tokens": total_tokens
121
- }
122
-
123
- # Update total token usage
124
- self.token_usage["prompt_tokens"] += prompt_tokens
125
- self.token_usage["completion_tokens"] += completion_tokens
126
- self.token_usage["total_tokens"] += total_tokens
127
-
128
- return response_text
129
-
130
- except Exception as e:
131
- logger.error(f"Error during Triton vision inference: {str(e)}")
132
- raise
133
-
134
- def get_token_usage(self) -> Dict[str, int]:
135
- """
136
- Get total token usage statistics.
137
-
138
- Returns:
139
- Dictionary with token usage statistics
140
- """
141
- return self.token_usage
142
-
143
- def get_last_token_usage(self) -> Dict[str, int]:
144
- """
145
- Get token usage from last request.
146
-
147
- Returns:
148
- Dictionary with token usage statistics from last request
149
- """
150
- return self.last_token_usage
151
-
152
- def _prepare_image_input(self, image: Union[str, Image.Image, bytes]) -> np.ndarray:
153
- """
154
- Process different types of image inputs into a numpy array.
155
-
156
- Args:
157
- image: Image input (PIL Image, base64 string, or bytes)
158
-
159
- Returns:
160
- Numpy array of the image
161
- """
162
- # Convert to PIL image first
163
- pil_image = self._to_pil_image(image)
164
-
165
- # Convert PIL image to numpy array
166
- return np.array(pil_image)
167
-
168
- def _to_pil_image(self, image: Union[str, Image.Image, bytes]) -> Image.Image:
169
- """
170
- Convert different image inputs to PIL Image.
171
-
172
- Args:
173
- image: Image input (PIL Image, base64 string, or bytes)
174
-
175
- Returns:
176
- PIL Image
177
- """
178
- if isinstance(image, Image.Image):
179
- return image
180
-
181
- elif isinstance(image, str):
182
- # Check if it's a base64 string
183
- if image.startswith("data:image"):
184
- # Extract the base64 part
185
- image = image.split(",")[1]
186
-
187
- try:
188
- # Try to decode as base64
189
- image_bytes = base64.b64decode(image)
190
- return Image.open(io.BytesIO(image_bytes))
191
- except Exception:
192
- # Try to open as a file path
193
- return Image.open(image)
194
-
195
- elif isinstance(image, bytes):
196
- return Image.open(io.BytesIO(image))
197
-
198
- else:
199
- raise ValueError(f"Unsupported image type: {type(image)}")
@@ -1,116 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: isa-model
3
- Version: 0.1.0
4
- Summary: Unified AI model serving framework
5
- Author-email: isA_Model Contributors <your.email@example.com>
6
- License: MIT
7
- Classifier: Development Status :: 3 - Alpha
8
- Classifier: Intended Audience :: Developers
9
- Classifier: Operating System :: OS Independent
10
- Classifier: Programming Language :: Python :: 3
11
- Classifier: License :: OSI Approved :: MIT License
12
- Requires-Python: >=3.8
13
- Description-Content-Type: text/markdown
14
- License-File: LICENSE
15
- Requires-Dist: fastapi>=0.95.0
16
- Requires-Dist: numpy>=1.20.0
17
- Requires-Dist: httpx>=0.23.0
18
- Requires-Dist: pydantic>=2.0.0
19
- Requires-Dist: uvicorn>=0.22.0
20
- Requires-Dist: requests>=2.28.0
21
- Requires-Dist: aiohttp>=3.8.0
22
- Requires-Dist: transformers>=4.30.0
23
- Requires-Dist: langchain-core>=0.1.0
24
- Requires-Dist: tritonclient[grpc,http]>=2.30.0
25
- Requires-Dist: huggingface-hub>=0.16.0
26
- Requires-Dist: kubernetes>=25.3.0
27
- Requires-Dist: mlflow>=2.4.0
28
- Requires-Dist: torch>=2.0.0
29
- Dynamic: license-file
30
-
31
- # isA_Model - AI服务工厂
32
-
33
- isA_Model是一个轻量级AI服务工厂,用于统一管理和调用不同的AI模型和服务提供商。
34
-
35
- ## 特性
36
-
37
- - 支持多种AI提供商(Ollama, OpenAI, Replicate, Triton)
38
- - 统一的API接口
39
- - 灵活的工厂模式
40
- - 异步支持
41
- - 单例模式,高效缓存
42
-
43
- ## 安装
44
-
45
- ```bash
46
- pip install -r requirements.txt
47
- ```
48
-
49
- ## 快速开始
50
-
51
- 使用AI工厂很简单:
52
-
53
- ```python
54
- from isa_model.inference.ai_factory import AIFactory
55
- from isa_model.inference.base import ModelType
56
-
57
- # 获取工厂实例
58
- factory = AIFactory()
59
-
60
- # LLM示例 - 使用Ollama
61
- llm = factory.get_llm(model_name="llama3.1", provider="ollama")
62
- response = await llm.generate("你好,请介绍一下自己。")
63
- print(response)
64
-
65
- # 图像生成示例 - 使用Replicate
66
- vision_service = factory.get_vision_model(
67
- model_name="stability-ai/sdxl:c221b2b8ef527988fb59bf24a8b97c4561f1c671f73bd389f866bfb27c061316",
68
- provider="replicate",
69
- config={"api_token": "your_replicate_token"}
70
- )
71
- result = await vision_service.generate_image({
72
- "prompt": "A beautiful sunset over mountains",
73
- "num_inference_steps": 25
74
- })
75
- print(result["urls"])
76
- ```
77
-
78
- ## 工厂架构
79
-
80
- isA_Model使用三层架构:
81
-
82
- 1. **客户端层** - 应用程序代码
83
- 2. **服务层** - 模型服务实现(LLM, 图像, 嵌入等)
84
- 3. **提供商层** - 底层API集成(Ollama, OpenAI, Replicate等)
85
-
86
- ### 主要组件
87
-
88
- - `AIFactory` - 中央工厂类,提供模型和服务访问
89
- - `BaseService` - 所有服务的基类
90
- - `BaseProvider` - 所有提供商的基类
91
- - 特定服务实现 - 如`ReplicateVisionService`, `OllamaLLMService`等
92
-
93
- ## 支持的模型类型
94
-
95
- - **LLM** - 大语言模型
96
- - **VISION** - 图像生成和分析
97
- - **EMBEDDING** - 文本嵌入
98
- - **AUDIO** - 语音识别
99
- - **RERANK** - 重排序
100
-
101
- ## 示例
102
-
103
- 查看`test_*.py`文件获取更多使用示例。
104
-
105
- ## 环境变量
106
-
107
- 将API密钥和其他配置添加到`.env.local`文件中:
108
-
109
- ```
110
- OPENAI_API_KEY=your_openai_key
111
- REPLICATE_API_TOKEN=your_replicate_token
112
- ```
113
-
114
- ## 许可证
115
-
116
- MIT