isa-model 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. isa_model/core/model_manager.py +69 -4
  2. isa_model/inference/ai_factory.py +335 -46
  3. isa_model/inference/billing_tracker.py +406 -0
  4. isa_model/inference/providers/base_provider.py +51 -4
  5. isa_model/inference/providers/ollama_provider.py +37 -18
  6. isa_model/inference/providers/openai_provider.py +65 -36
  7. isa_model/inference/providers/replicate_provider.py +42 -30
  8. isa_model/inference/services/audio/base_stt_service.py +21 -2
  9. isa_model/inference/services/audio/openai_realtime_service.py +353 -0
  10. isa_model/inference/services/audio/openai_stt_service.py +252 -0
  11. isa_model/inference/services/audio/openai_tts_service.py +48 -9
  12. isa_model/inference/services/audio/replicate_tts_service.py +239 -0
  13. isa_model/inference/services/base_service.py +36 -1
  14. isa_model/inference/services/embedding/openai_embed_service.py +223 -0
  15. isa_model/inference/services/llm/base_llm_service.py +88 -192
  16. isa_model/inference/services/llm/llm_adapter.py +459 -0
  17. isa_model/inference/services/llm/ollama_llm_service.py +111 -185
  18. isa_model/inference/services/llm/openai_llm_service.py +115 -360
  19. isa_model/inference/services/vision/helpers/image_utils.py +4 -3
  20. isa_model/inference/services/vision/ollama_vision_service.py +11 -3
  21. isa_model/inference/services/vision/openai_vision_service.py +275 -41
  22. isa_model/inference/services/vision/replicate_image_gen_service.py +233 -205
  23. {isa_model-0.3.0.dist-info → isa_model-0.3.2.dist-info}/METADATA +1 -1
  24. {isa_model-0.3.0.dist-info → isa_model-0.3.2.dist-info}/RECORD +26 -21
  25. {isa_model-0.3.0.dist-info → isa_model-0.3.2.dist-info}/WHEEL +0 -0
  26. {isa_model-0.3.0.dist-info → isa_model-0.3.2.dist-info}/top_level.txt +0 -0
@@ -2,25 +2,23 @@
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
4
  """
5
- Replicate Vision服务
6
- 用于与Replicate API交互,支持图像生成和图像分析
5
+ Replicate 图像生成服务
6
+ 支持 flux-schnell (文生图) 和 flux-kontext-pro (图生图) 模型
7
7
  """
8
8
 
9
9
  import os
10
10
  import time
11
11
  import uuid
12
12
  import logging
13
- from typing import Dict, Any, List, Optional, Union, Tuple
13
+ from typing import Dict, Any, List, Optional, Union
14
14
  import asyncio
15
15
  import aiohttp
16
- import replicate # 导入 replicate 库
16
+ import replicate
17
17
  from PIL import Image
18
18
  from io import BytesIO
19
19
 
20
- # 调整导入路径以使用正确的基类
21
20
  from isa_model.inference.services.vision.base_image_gen_service import BaseImageGenService
22
21
  from isa_model.inference.providers.base_provider import BaseProvider
23
- from isa_model.inference.base import ModelType
24
22
 
25
23
  # 设置日志记录
26
24
  logging.basicConfig(level=logging.INFO)
@@ -28,188 +26,181 @@ logger = logging.getLogger(__name__)
28
26
 
29
27
  class ReplicateImageGenService(BaseImageGenService):
30
28
  """
31
- Replicate Vision服务,用于处理图像生成和分析。
32
- 经过调整,使用原生异步调用并优化了文件处理。
29
+ Replicate 图像生成服务
30
+ - flux-schnell: 文生图 (t2i) - $3 per 1000 images
31
+ - flux-kontext-pro: 图生图 (i2i) - $0.04 per image
33
32
  """
34
33
 
35
34
  def __init__(self, provider: BaseProvider, model_name: str):
36
- """
37
- 初始化Replicate Vision服务
38
- """
39
35
  super().__init__(provider, model_name)
40
- # 从 provider 或环境变量获取 API token
41
- self.api_token = self.provider.config.get("api_token", os.environ.get("REPLICATE_API_TOKEN"))
42
- self.model_type = ModelType.VISION
43
36
 
44
- # 可选的默认配置
45
- self.guidance_scale = self.provider.config.get("guidance_scale", 7.5)
46
- self.num_inference_steps = self.provider.config.get("num_inference_steps", 30)
37
+ # 获取配置
38
+ provider_config = provider.get_full_config()
39
+ self.api_token = provider_config.get("api_token") or provider_config.get("replicate_api_token")
47
40
 
48
- # 生成的图像存储目录
41
+ if not self.api_token:
42
+ raise ValueError("Replicate API token not found in provider configuration")
43
+
44
+ # 设置 API token
45
+ os.environ["REPLICATE_API_TOKEN"] = self.api_token
46
+
47
+ # 生成图像存储目录
49
48
  self.output_dir = "generated_images"
50
49
  os.makedirs(self.output_dir, exist_ok=True)
51
50
 
52
- # ★ 调整点: 为 replicate 库设置 API token
53
- if self.api_token:
54
- # replicate 库会自动从环境变量读取,我们确保它被设置
55
- os.environ["REPLICATE_API_TOKEN"] = self.api_token
51
+ # 统计信息
52
+ self.last_generation_count = 0
53
+ self.total_generation_count = 0
54
+
55
+ logger.info(f"Initialized ReplicateImageGenService with model '{self.model_name}'")
56
+
57
+ async def generate_image(
58
+ self,
59
+ prompt: str,
60
+ negative_prompt: Optional[str] = None,
61
+ width: int = 512,
62
+ height: int = 512,
63
+ num_inference_steps: int = 4,
64
+ guidance_scale: float = 7.5,
65
+ seed: Optional[int] = None
66
+ ) -> Dict[str, Any]:
67
+ """生成单张图像 (文生图)"""
68
+
69
+ if "flux-schnell" in self.model_name:
70
+ # FLUX Schnell 参数
71
+ input_data = {
72
+ "prompt": prompt,
73
+ "go_fast": True,
74
+ "megapixels": "1",
75
+ "num_outputs": 1,
76
+ "aspect_ratio": "1:1",
77
+ "output_format": "jpg",
78
+ "output_quality": 90,
79
+ "num_inference_steps": 4
80
+ }
56
81
  else:
57
- logger.warning("Replicate API token 未找到。服务可能无法正常工作。")
82
+ # 默认参数
83
+ input_data = {
84
+ "prompt": prompt,
85
+ "width": width,
86
+ "height": height,
87
+ "num_inference_steps": num_inference_steps,
88
+ "guidance_scale": guidance_scale
89
+ }
90
+
91
+ if negative_prompt:
92
+ input_data["negative_prompt"] = negative_prompt
93
+ if seed:
94
+ input_data["seed"] = seed
95
+
96
+ return await self._generate_internal(input_data)
58
97
 
59
- async def _prepare_input_files(self, input_data: Dict[str, Any]) -> Tuple[Dict[str, Any], List[Any]]:
60
- """
61
- 新增辅助函数: 准备输入数据,将本地文件路径转换为文件对象。
62
- 这使得服务能统一处理本地文件和URL。
63
- """
64
- prepared_input = input_data.copy()
65
- files_to_close = []
66
- for key, value in prepared_input.items():
67
- # 如果值是字符串,且看起来像一个存在的本地文件路径
68
- if isinstance(value, str) and not value.startswith(('http://', 'https://')) and os.path.exists(value):
69
- logger.info(f"检测到本地文件路径 '{value}',准备打开文件。")
70
- try:
71
- file_handle = open(value, "rb")
72
- prepared_input[key] = file_handle
73
- files_to_close.append(file_handle)
74
- except Exception as e:
75
- logger.error(f"打开文件失败 '{value}': {e}")
76
- raise
77
- return prepared_input, files_to_close
98
+ async def image_to_image(
99
+ self,
100
+ prompt: str,
101
+ init_image: Union[str, Any],
102
+ strength: float = 0.8,
103
+ negative_prompt: Optional[str] = None,
104
+ num_inference_steps: int = 20,
105
+ guidance_scale: float = 7.5,
106
+ seed: Optional[int] = None
107
+ ) -> Dict[str, Any]:
108
+ """图生图"""
109
+
110
+ if "flux-kontext-pro" in self.model_name:
111
+ # FLUX Kontext Pro 参数
112
+ input_data = {
113
+ "prompt": prompt,
114
+ "input_image": init_image,
115
+ "aspect_ratio": "match_input_image",
116
+ "output_format": "jpg",
117
+ "safety_tolerance": 2
118
+ }
119
+ else:
120
+ # 默认参数
121
+ input_data = {
122
+ "prompt": prompt,
123
+ "image": init_image,
124
+ "strength": strength,
125
+ "num_inference_steps": num_inference_steps,
126
+ "guidance_scale": guidance_scale
127
+ }
128
+
129
+ if negative_prompt:
130
+ input_data["negative_prompt"] = negative_prompt
131
+ if seed:
132
+ input_data["seed"] = seed
133
+
134
+ return await self._generate_internal(input_data)
78
135
 
79
- async def _generate_image_internal(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
80
- """
81
- 内部方法:使用Replicate模型生成图像 (已优化为原生异步)
82
- """
83
- prepared_input, files_to_close = await self._prepare_input_files(input_data)
136
+ async def _generate_internal(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
137
+ """内部生成方法"""
84
138
  try:
85
- # 设置默认参数
86
- if "guidance_scale" not in prepared_input:
87
- prepared_input["guidance_scale"] = self.guidance_scale
88
- if "num_inference_steps" not in prepared_input:
89
- prepared_input["num_inference_steps"] = self.num_inference_steps
139
+ logger.info(f"开始使用模型 {self.model_name} 生成图像")
90
140
 
91
- logger.info(f"开始使用模型 {self.model_name} 生成图像 (原生异步)")
141
+ # 调用 Replicate API
142
+ output = await replicate.async_run(self.model_name, input=input_data)
92
143
 
93
- # ★ 调整点: 使用原生异步的 replicate.async_run
94
- output = await replicate.async_run(self.model_name, input=prepared_input)
95
-
96
- # 将结果转换为标准格式 (此部分逻辑无需改变)
144
+ # 处理输出
97
145
  if isinstance(output, list):
98
146
  urls = output
99
147
  else:
100
148
  urls = [output]
101
149
 
102
- result = {
103
- "urls": urls,
104
- "metadata": {
150
+ # 更新统计
151
+ self.last_generation_count = len(urls)
152
+ self.total_generation_count += len(urls)
153
+
154
+ # 计算成本
155
+ cost = self._calculate_cost(len(urls))
156
+
157
+ # 跟踪计费信息
158
+ from isa_model.inference.billing_tracker import ServiceType
159
+ self._track_usage(
160
+ service_type=ServiceType.IMAGE_GENERATION,
161
+ operation="image_generation",
162
+ input_units=len(urls), # 生成的图像数量
163
+ metadata={
105
164
  "model": self.model_name,
106
- "input": input_data # 返回原始输入以供参考
165
+ "prompt": input_data.get("prompt", "")[:100], # 截取前100字符
166
+ "generation_type": "t2i" if "flux-schnell" in self.model_name else "i2i"
107
167
  }
108
- }
109
- logger.info(f"图像生成完成: {result['urls']}")
110
- return result
111
- except Exception as e:
112
- logger.error(f"图像生成失败: {e}")
113
- raise
114
- finally:
115
- # ★ 新增: 确保所有打开的文件都被关闭
116
- for f in files_to_close:
117
- f.close()
118
-
119
- async def analyze_image(self, image_path: str, prompt: str) -> Dict[str, Any]:
120
- """
121
- 分析图像 (已优化为原生异步)
122
- """
123
- input_data = {"image": image_path, "prompt": prompt}
124
- prepared_input, files_to_close = await self._prepare_input_files(input_data)
125
- try:
126
- logger.info(f"开始使用模型 {self.model_name} 分析图像 (原生异步)")
127
- # ★ 调整点: 使用原生异步的 replicate.async_run
128
- output = await replicate.async_run(self.model_name, input=prepared_input)
168
+ )
129
169
 
130
170
  result = {
131
- "text": "".join(output) if isinstance(output, list) else output,
171
+ "urls": urls,
172
+ "count": len(urls),
173
+ "cost_usd": cost,
174
+ "model": self.model_name,
132
175
  "metadata": {
133
- "model": self.model_name,
134
- "input": input_data
176
+ "input": input_data,
177
+ "generation_count": len(urls)
135
178
  }
136
179
  }
137
- logger.info(f"图像分析完成")
180
+
181
+ logger.info(f"图像生成完成: {len(urls)} 张图像, 成本: ${cost:.6f}")
138
182
  return result
183
+
139
184
  except Exception as e:
140
- logger.error(f"图像分析失败: {e}")
141
- raise
142
- finally:
143
- # ★ 新增: 确保所有打开的文件都被关闭
144
- for f in files_to_close:
145
- f.close()
146
-
147
- async def generate_and_save(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
148
- """生成图像并保存到本地 (此方法无需修改)"""
149
- result = await self._generate_image_internal(input_data)
150
- saved_paths = []
151
- for i, url in enumerate(result["urls"]):
152
- timestamp = int(time.time())
153
- file_name = f"{self.output_dir}/{timestamp}_{uuid.uuid4().hex[:8]}_{i+1}.png"
154
- try:
155
- # Convert FileOutput object to string if necessary
156
- url_str = str(url) if hasattr(url, "__str__") else url
157
- await self._download_image(url_str, file_name)
158
- saved_paths.append(file_name)
159
- logger.info(f"图像已保存至: {file_name}")
160
- except Exception as e:
161
- logger.error(f"保存图像失败: {e}")
162
- result["saved_paths"] = saved_paths
163
- return result
164
-
165
- async def _download_image(self, url: str, save_path: str) -> None:
166
- """异步下载图像并保存 (此方法无需修改)"""
167
- try:
168
- async with aiohttp.ClientSession() as session:
169
- async with session.get(url) as response:
170
- response.raise_for_status()
171
- content = await response.read()
172
- with Image.open(BytesIO(content)) as img:
173
- img.save(save_path)
174
- except Exception as e:
175
- logger.error(f"下载图像时出错: {url}, {e}")
185
+ logger.error(f"图像生成失败: {e}")
176
186
  raise
177
187
 
178
- # `load` `unload` 方法在Replicate API场景下通常是轻量级的
179
- async def load(self) -> None:
180
- if not self.api_token:
181
- raise ValueError("缺少Replicate API令牌,请设置REPLICATE_API_TOKEN环境变量或在provider配置中提供")
182
- logger.info(f"Replicate Vision服务已准备就绪,使用模型: {self.model_name}")
183
-
184
- async def unload(self) -> None:
185
- logger.info(f"卸载Replicate Vision服务: {self.model_name}")
186
-
187
- # 实现BaseImageGenService的抽象方法
188
- async def generate_image(
189
- self,
190
- prompt: str,
191
- negative_prompt: Optional[str] = None,
192
- width: int = 512,
193
- height: int = 512,
194
- num_inference_steps: int = 20,
195
- guidance_scale: float = 7.5,
196
- seed: Optional[int] = None
197
- ) -> Dict[str, Any]:
198
- """Generate a single image from text prompt"""
199
- input_data = {
200
- "prompt": prompt,
201
- "width": width,
202
- "height": height,
203
- "num_inference_steps": num_inference_steps,
204
- "guidance_scale": guidance_scale
205
- }
188
+ def _calculate_cost(self, image_count: int) -> float:
189
+ """计算生成成本"""
190
+ from isa_model.core.model_manager import ModelManager
206
191
 
207
- if negative_prompt:
208
- input_data["negative_prompt"] = negative_prompt
209
- if seed:
210
- input_data["seed"] = seed
211
-
212
- return await self._generate_image_internal(input_data)
192
+ manager = ModelManager()
193
+
194
+ if "flux-schnell" in self.model_name:
195
+ # $3 per 1000 images
196
+ return (image_count / 1000) * 3.0
197
+ elif "flux-kontext-pro" in self.model_name:
198
+ # $0.04 per image
199
+ return image_count * 0.04
200
+ else:
201
+ # 使用 ModelManager 的定价
202
+ pricing = manager.get_model_pricing("replicate", self.model_name)
203
+ return (image_count / 1000) * pricing.get("input", 0.0)
213
204
 
214
205
  async def generate_images(
215
206
  self,
@@ -218,11 +209,11 @@ class ReplicateImageGenService(BaseImageGenService):
218
209
  negative_prompt: Optional[str] = None,
219
210
  width: int = 512,
220
211
  height: int = 512,
221
- num_inference_steps: int = 20,
212
+ num_inference_steps: int = 4,
222
213
  guidance_scale: float = 7.5,
223
214
  seed: Optional[int] = None
224
215
  ) -> List[Dict[str, Any]]:
225
- """Generate multiple images from text prompt"""
216
+ """生成多张图像"""
226
217
  results = []
227
218
  for i in range(num_images):
228
219
  current_seed = seed + i if seed else None
@@ -240,17 +231,17 @@ class ReplicateImageGenService(BaseImageGenService):
240
231
  negative_prompt: Optional[str] = None,
241
232
  width: int = 512,
242
233
  height: int = 512,
243
- num_inference_steps: int = 20,
234
+ num_inference_steps: int = 4,
244
235
  guidance_scale: float = 7.5,
245
236
  seed: Optional[int] = None
246
237
  ) -> Dict[str, Any]:
247
- """Generate image and save directly to file"""
238
+ """生成图像并保存到文件"""
248
239
  result = await self.generate_image(
249
240
  prompt, negative_prompt, width, height,
250
241
  num_inference_steps, guidance_scale, seed
251
242
  )
252
243
 
253
- # Save the first generated image to the specified path
244
+ # 保存第一张图像
254
245
  if result.get("urls"):
255
246
  url = result["urls"][0]
256
247
  url_str = str(url) if hasattr(url, "__str__") else url
@@ -258,60 +249,97 @@ class ReplicateImageGenService(BaseImageGenService):
258
249
 
259
250
  return {
260
251
  "file_path": output_path,
261
- "width": width,
262
- "height": height,
263
- "seed": seed
252
+ "cost_usd": result.get("cost_usd", 0.0),
253
+ "model": self.model_name
264
254
  }
265
255
  else:
266
256
  raise ValueError("No image generated")
267
257
 
268
- async def image_to_image(
269
- self,
270
- prompt: str,
271
- init_image: Union[str, Any],
272
- strength: float = 0.8,
273
- negative_prompt: Optional[str] = None,
274
- num_inference_steps: int = 20,
275
- guidance_scale: float = 7.5,
276
- seed: Optional[int] = None
277
- ) -> Dict[str, Any]:
278
- """Generate image based on existing image and prompt"""
279
- input_data = {
280
- "prompt": prompt,
281
- "image": init_image,
282
- "strength": strength,
283
- "num_inference_steps": num_inference_steps,
284
- "guidance_scale": guidance_scale
285
- }
258
+ async def _download_image(self, url: str, save_path: str) -> None:
259
+ """下载图像并保存"""
260
+ try:
261
+ async with aiohttp.ClientSession() as session:
262
+ async with session.get(url) as response:
263
+ response.raise_for_status()
264
+ content = await response.read()
265
+ with Image.open(BytesIO(content)) as img:
266
+ img.save(save_path)
267
+ except Exception as e:
268
+ logger.error(f"下载图像时出错: {url}, {e}")
269
+ raise
270
+
271
+ def get_generation_stats(self) -> Dict[str, Any]:
272
+ """获取生成统计信息"""
273
+ total_cost = 0.0
274
+ if "flux-schnell" in self.model_name:
275
+ total_cost = (self.total_generation_count / 1000) * 3.0
276
+ elif "flux-kontext-pro" in self.model_name:
277
+ total_cost = self.total_generation_count * 0.04
286
278
 
287
- if negative_prompt:
288
- input_data["negative_prompt"] = negative_prompt
289
- if seed:
290
- input_data["seed"] = seed
291
-
292
- return await self._generate_image_internal(input_data)
279
+ return {
280
+ "last_generation_count": self.last_generation_count,
281
+ "total_generation_count": self.total_generation_count,
282
+ "total_cost_usd": total_cost,
283
+ "model": self.model_name
284
+ }
293
285
 
294
286
  def get_supported_sizes(self) -> List[Dict[str, int]]:
295
- """Get list of supported image dimensions"""
296
- return [
297
- {"width": 512, "height": 512},
298
- {"width": 768, "height": 768},
299
- {"width": 1024, "height": 1024},
300
- {"width": 768, "height": 1344},
301
- {"width": 1344, "height": 768},
302
- ]
287
+ """获取支持的图像尺寸"""
288
+ if "flux" in self.model_name:
289
+ return [
290
+ {"width": 512, "height": 512},
291
+ {"width": 768, "height": 768},
292
+ {"width": 1024, "height": 1024},
293
+ ]
294
+ else:
295
+ return [
296
+ {"width": 512, "height": 512},
297
+ {"width": 768, "height": 768},
298
+ {"width": 1024, "height": 1024},
299
+ {"width": 768, "height": 1344},
300
+ {"width": 1344, "height": 768},
301
+ ]
303
302
 
304
303
  def get_model_info(self) -> Dict[str, Any]:
305
- """Get information about the image generation model"""
306
- return {
307
- "name": self.model_name,
308
- "max_width": 1344,
309
- "max_height": 1344,
310
- "supports_negative_prompt": True,
311
- "supports_img2img": True
312
- }
304
+ """获取模型信息"""
305
+ if "flux-schnell" in self.model_name:
306
+ return {
307
+ "name": self.model_name,
308
+ "type": "t2i",
309
+ "cost_per_1000_images": 3.0,
310
+ "supports_negative_prompt": False,
311
+ "supports_img2img": False,
312
+ "max_steps": 4
313
+ }
314
+ elif "flux-kontext-pro" in self.model_name:
315
+ return {
316
+ "name": self.model_name,
317
+ "type": "i2i",
318
+ "cost_per_image": 0.04,
319
+ "supports_negative_prompt": False,
320
+ "supports_img2img": True,
321
+ "max_width": 1024,
322
+ "max_height": 1024
323
+ }
324
+ else:
325
+ return {
326
+ "name": self.model_name,
327
+ "type": "general",
328
+ "supports_negative_prompt": True,
329
+ "supports_img2img": True
330
+ }
331
+
332
+ async def load(self) -> None:
333
+ """加载服务"""
334
+ if not self.api_token:
335
+ raise ValueError("缺少 Replicate API 令牌")
336
+ logger.info(f"Replicate 图像生成服务已准备就绪,使用模型: {self.model_name}")
337
+
338
+ async def unload(self) -> None:
339
+ """卸载服务"""
340
+ logger.info(f"卸载 Replicate 图像生成服务: {self.model_name}")
313
341
 
314
342
  async def close(self):
315
- """Cleanup resources"""
343
+ """关闭服务"""
316
344
  await self.unload()
317
345
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: isa_model
3
- Version: 0.3.0
3
+ Version: 0.3.2
4
4
  Summary: Unified AI model serving framework
5
5
  Author: isA_Model Contributors
6
6
  Classifier: Development Status :: 3 - Alpha
@@ -1,5 +1,5 @@
1
1
  isa_model/__init__.py,sha256=skxx7AA-1BzIT_UaDHcNmIo4rEhgL8MqOk8vPpZPrAo,87
2
- isa_model/core/model_manager.py,sha256=eQp0MV0x5sghL1qliPUWkFX4sEKqInyGLoICfNkJnZM,5275
2
+ isa_model/core/model_manager.py,sha256=bPXlx170FG_fcPZBWByCVln4flQV7AU9cEZyj1E_J88,8443
3
3
  isa_model/core/model_registry.py,sha256=gT8yFxi1gC-45Bolc9WX19ZvrjuV1xyBgQX6TFhz62k,14032
4
4
  isa_model/core/model_storage.py,sha256=yMLapW87EY1EPXw6S7H8UQAZh3hJ1KxsEohjgjw-HrA,4507
5
5
  isa_model/core/storage/hf_storage.py,sha256=HTj1-YGJM3Q-9_Adw7u4NjEmSdr0njsFEL45KXzfcFw,14701
@@ -18,29 +18,34 @@ isa_model/eval/benchmarks.py,sha256=_L4Vwj2hwf2yhqoleIASO9z5e3LRCClCVEVCQbGt0I8,
18
18
  isa_model/eval/factory.py,sha256=uQXD1cZGPaMss2YGwtr8xONK9i_K7kHZG7-uwvNgEpk,29416
19
19
  isa_model/eval/metrics.py,sha256=mYeGwSa9PkgY0p-vadAscvak-pLrVfCSrsmAodVpgNQ,22584
20
20
  isa_model/inference/__init__.py,sha256=usfuQJ4zYY2RRtHkE-V6LuJ5aN7WJogtPUj9Qmy4Wvw,318
21
- isa_model/inference/ai_factory.py,sha256=ccU-OlnOz3X7ohjg9fa6tzS0CDR0LDKUR8oA_qctwF4,10858
21
+ isa_model/inference/ai_factory.py,sha256=FvclMvrTfgXUm-xauv32mO2FHbzmUY0ryNknQcuSp7Q,24413
22
22
  isa_model/inference/base.py,sha256=qwOddnSGI0GUdD6qIdGBPQpkW7UjU3Y-zaZvu70B4WA,1278
23
+ isa_model/inference/billing_tracker.py,sha256=uimayifP3oBZfU03qgveArJGl-1u6Vw2VTPj40O27t8,14888
23
24
  isa_model/inference/adapter/unified_api.py,sha256=67_Ok8W20m6Otf6r9WyOEVpnxondP4UAxOASk9ozDk4,8668
24
25
  isa_model/inference/providers/__init__.py,sha256=a83q-LMFv8u47wf0XtxvqOw_mlVgA_90wtuwy02qdDE,581
25
- isa_model/inference/providers/base_provider.py,sha256=btkSXE7o1IfOpv22hMM6_DNlm05tbLMszsP1J4T26KE,924
26
+ isa_model/inference/providers/base_provider.py,sha256=PT-YnGwBu-Jn_4T3iAphkAJw_mYmKVLjUID62vf2_Ow,2711
26
27
  isa_model/inference/providers/ml_provider.py,sha256=4oGGF7lVWQ91Qh3h7olyPFoACLxCROaMxUZlDiZrRL4,1661
27
28
  isa_model/inference/providers/model_cache_manager.py,sha256=dLRpx7OJweQ5LcSAkU7D0DQRfLtIhG6nGvg4W_gau80,15315
28
- isa_model/inference/providers/ollama_provider.py,sha256=BLkWp4gmCw6Fwf1yNRY90VftMqwca9YOGOHf6DqVEKs,2692
29
- isa_model/inference/providers/openai_provider.py,sha256=8ywUsrvlvC7VY3LNOVJP1IcRwBMi1NvG0PoI0lYo4jM,3881
30
- isa_model/inference/providers/replicate_provider.py,sha256=qXnK3Yzy5-gaduVJVY8asrIIi-97m4WGUkG963_4ifk,3948
29
+ isa_model/inference/providers/ollama_provider.py,sha256=IfM9XhdzfE1faguzS2-4GfhK30v5kDPecD3l4z2eB1w,3620
30
+ isa_model/inference/providers/openai_provider.py,sha256=tB8FMsMivlRx0cfPJ0Yrxh1DCvuXyyjNFXrO4lMkkhA,5366
31
+ isa_model/inference/providers/replicate_provider.py,sha256=0oi_BglIE6-HYgzLau9ifP8OdpAMO-QkwYk0OXRUzPk,4490
31
32
  isa_model/inference/providers/triton_provider.py,sha256=GKlth7cTOx6ERbsXXJ0gDNby3kVGQNULBDt098BXBSU,15258
32
33
  isa_model/inference/services/__init__.py,sha256=p-UlEGMnadGUD6zzwfAjf367S2QQ-z1sD6TP-K4EjEM,353
33
- isa_model/inference/services/base_service.py,sha256=PB6eZp-PynUdo9a0QofvHgrrJLUFYM_FSafTg7fvWrY,3083
34
- isa_model/inference/services/audio/base_stt_service.py,sha256=tIfdRLEppcFEyTEmI8zi8OwMd7wVP423MQDN4iYDEcE,2800
34
+ isa_model/inference/services/base_service.py,sha256=Fqe3Q9m2hC5xXNbVGa095WUvtF9cq5ayY8n02mKtCjg,4569
35
+ isa_model/inference/services/audio/base_stt_service.py,sha256=OP2kFU5ZZT8yMpcbD3dpuCVzYOryY9XjQqAdalaFeYc,3347
35
36
  isa_model/inference/services/audio/base_tts_service.py,sha256=BzZ3JrrLpm4COthNyNrIO2QgP7RZkXDNPEELEKHzIbA,4164
36
- isa_model/inference/services/audio/openai_tts_service.py,sha256=0YvSfG4q3IEuJveXVdsGq6jbkJ9AbcLf1k4RDnKB5ks,6222
37
+ isa_model/inference/services/audio/openai_realtime_service.py,sha256=UENsx1bEb7aJoXNuBtFGIbTmETpNTZcCHlv0RydEp_U,13340
38
+ isa_model/inference/services/audio/openai_stt_service.py,sha256=S1BEhEctj5Aw86edn1Nd38tqB-cWq2RdiGxWCzhH2a8,10225
39
+ isa_model/inference/services/audio/openai_tts_service.py,sha256=KtzcvS_szB_Xo26Lu3T9-Wc641aiyppCfyX4xUsYhSw,8132
40
+ isa_model/inference/services/audio/replicate_tts_service.py,sha256=bW074ohLM4QQp0eaDq395_2hggUVMVAHoVVbSf75bvI,10000
37
41
  isa_model/inference/services/embedding/base_embed_service.py,sha256=Nr6snNtOM0_ZqFfJdV7ThTb2nYVHYddGoOJXkGuyBIg,3259
38
42
  isa_model/inference/services/embedding/ollama_embed_service.py,sha256=s6LPSh-D06kFYXQjoKJp8jnatW5cx_unGbVFaq7tm5c,4745
39
- isa_model/inference/services/embedding/openai_embed_service.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
+ isa_model/inference/services/embedding/openai_embed_service.py,sha256=cjAzmYXlY0XLgwemdlhhUVlDecgzFy2xn3DSycoYvdo,8474
40
44
  isa_model/inference/services/llm/__init__.py,sha256=C6t9w33j3Ap4oGcJal9-htifKe0rxwws_kC3F-_B_Ps,341
41
- isa_model/inference/services/llm/base_llm_service.py,sha256=hf4egO9_s3rOQYwyhDS6O_8ECIAltkj4Ir89PTosraE,8381
42
- isa_model/inference/services/llm/ollama_llm_service.py,sha256=EfLdoovyrChYBlGreQukpSZt5l6DkfXwjjmPPovmm70,12934
43
- isa_model/inference/services/llm/openai_llm_service.py,sha256=XarEWzPg3DnITxrhkVtdR1RC0puklFIAUALgC61P8LM,19279
45
+ isa_model/inference/services/llm/base_llm_service.py,sha256=SXxFhH2z1UYqt2ihiuursrKEjiiiC3vWCvRQVETsdVc,5238
46
+ isa_model/inference/services/llm/llm_adapter.py,sha256=Zb9-M7lqnryOVN18U5zvpy67B-jlks2ZlhFU0gcIUDM,17201
47
+ isa_model/inference/services/llm/ollama_llm_service.py,sha256=c5xAdbbicskbxhivIQUse-QiUbCElR_Bo88wIA3dZUI,10194
48
+ isa_model/inference/services/llm/openai_llm_service.py,sha256=e5GmO4YpVUBImQvQTZ26UwiAmHkU1RPl_vIa1dNe_NM,8148
44
49
  isa_model/inference/services/llm/triton_llm_service.py,sha256=ZFo7JoZ799Nvyi8Cz1jfWOa6TUn0hDRJtBrotadMAd4,17673
45
50
  isa_model/inference/services/ml/base_ml_service.py,sha256=mLBA6ENowa3KVzNqHyhWxf_Pr-cJJj84lDE4TniPzYI,2894
46
51
  isa_model/inference/services/ml/sklearn_ml_service.py,sha256=Lf9JrwvI25lca7JBbjB_e66eAUtXFbwxZ3Hs13dVGkA,5512
@@ -48,10 +53,10 @@ isa_model/inference/services/others/table_transformer_service.py,sha256=r74h6QUS
48
53
  isa_model/inference/services/vision/__init__.py,sha256=N9Zr7o2uQKoyUEvpmyOIgXPx9ivrix3gQ1OLoiQ7BLo,283
49
54
  isa_model/inference/services/vision/base_image_gen_service.py,sha256=XC0PWlH3LXMGhic57BjEucwXm1rU5_g3mbMoYQiEU5c,5410
50
55
  isa_model/inference/services/vision/base_vision_service.py,sha256=Yk2C9rD3zfORWCXSYTWPj5HB08A_eD1YiNIShF0_MjY,5418
51
- isa_model/inference/services/vision/ollama_vision_service.py,sha256=KE0D-Q75bTcxNcigo_wfPAtSHrzQzWNvN6Pcs2c_N-w,6495
52
- isa_model/inference/services/vision/openai_vision_service.py,sha256=5M182cV-wKCnV_U0CGWu4uFrggo--3YLD_0_FpNW9Ak,2920
53
- isa_model/inference/services/vision/replicate_image_gen_service.py,sha256=3jjZ1c7YVCT_Or212SI0zTGRJtmUOs1yWLs8jV8QgOA,12162
54
- isa_model/inference/services/vision/helpers/image_utils.py,sha256=hTZi4MLktETupPIbE-TXMSi1kix6h8UfLiyEIDt2rzA,1751
56
+ isa_model/inference/services/vision/ollama_vision_service.py,sha256=Btm3jJmnSBcJDiTujr51eWC3a3eA_58xKMj5TsatXJQ,6821
57
+ isa_model/inference/services/vision/openai_vision_service.py,sha256=dCtxng8hC1mpL7MzVlABzBsDcCzDI03exQpVkEPj15w,12894
58
+ isa_model/inference/services/vision/replicate_image_gen_service.py,sha256=yrIdSgpNlXI5qJd0dkZ86Ngrbdr-_ZlKL-_2v1h1MAA,11768
59
+ isa_model/inference/services/vision/helpers/image_utils.py,sha256=ieEL69LQ9-T4zsSFj2Mmt2jRUU_UOUAgt1W6Je9kaa8,1800
55
60
  isa_model/inference/services/vision/helpers/text_splitter.py,sha256=6AbvcQ7H6MS54B9d9T1XBGg4GhvmKfZqp00lKp9pF-U,1635
56
61
  isa_model/inference/utils/conversion/bge_rerank_convert.py,sha256=1dvtxe5-PPCe2Au6SO8F2XaD-xdIoeA4zDTcid2L9FU,2691
57
62
  isa_model/inference/utils/conversion/onnx_converter.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -80,7 +85,7 @@ isa_model/training/core/config.py,sha256=oqgKpBvtzrN6jwLIQYQ2707lH6nmjrktRiSxp9i
80
85
  isa_model/training/core/dataset.py,sha256=XCFsnf0NUMU1dJpdvo_CAMyvXB-9_RCUEiy8TU50e20,7802
81
86
  isa_model/training/core/trainer.py,sha256=h5TjqjdFr0Fsv5y4-0siy1KmOlqLfliVaUXybvuoeXU,26932
82
87
  isa_model/training/core/utils.py,sha256=Nik0M2ssfNbWqP6fKO0Kfyhzr_H6Q19ioxB-qCYbn5E,8387
83
- isa_model-0.3.0.dist-info/METADATA,sha256=vKAOkCdWjst6VFeisv1QxEHUEzxJpMOd5FO-RMG_C6M,12226
84
- isa_model-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
85
- isa_model-0.3.0.dist-info/top_level.txt,sha256=eHSy_Xb3kNkh2kK11mi1mZh0Wz91AQ5b8k2KFYO-rE8,10
86
- isa_model-0.3.0.dist-info/RECORD,,
88
+ isa_model-0.3.2.dist-info/METADATA,sha256=nzfl95_XbhGMH8eXAqOBsAp4qU_3QTGBVhfunEi6eXQ,12226
89
+ isa_model-0.3.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
90
+ isa_model-0.3.2.dist-info/top_level.txt,sha256=eHSy_Xb3kNkh2kK11mi1mZh0Wz91AQ5b8k2KFYO-rE8,10
91
+ isa_model-0.3.2.dist-info/RECORD,,