model-library 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. model_library/__init__.py +7 -3
  2. model_library/base/__init__.py +7 -0
  3. model_library/{base.py → base/base.py} +47 -423
  4. model_library/base/batch.py +121 -0
  5. model_library/base/delegate_only.py +94 -0
  6. model_library/base/input.py +100 -0
  7. model_library/base/output.py +175 -0
  8. model_library/base/utils.py +42 -0
  9. model_library/config/all_models.json +164 -2
  10. model_library/config/anthropic_models.yaml +4 -0
  11. model_library/config/deepseek_models.yaml +3 -1
  12. model_library/config/openai_models.yaml +48 -0
  13. model_library/exceptions.py +2 -0
  14. model_library/logging.py +30 -0
  15. model_library/providers/__init__.py +0 -0
  16. model_library/providers/ai21labs.py +2 -0
  17. model_library/providers/alibaba.py +16 -78
  18. model_library/providers/amazon.py +3 -0
  19. model_library/providers/anthropic.py +213 -2
  20. model_library/providers/azure.py +2 -0
  21. model_library/providers/cohere.py +14 -80
  22. model_library/providers/deepseek.py +14 -90
  23. model_library/providers/fireworks.py +17 -81
  24. model_library/providers/google/google.py +22 -20
  25. model_library/providers/inception.py +15 -83
  26. model_library/providers/kimi.py +15 -83
  27. model_library/providers/mistral.py +2 -0
  28. model_library/providers/openai.py +2 -0
  29. model_library/providers/perplexity.py +12 -79
  30. model_library/providers/together.py +2 -0
  31. model_library/providers/vals.py +2 -0
  32. model_library/providers/xai.py +2 -0
  33. model_library/providers/zai.py +15 -83
  34. model_library/register_models.py +75 -55
  35. model_library/registry_utils.py +5 -5
  36. model_library/utils.py +3 -28
  37. {model_library-0.1.0.dist-info → model_library-0.1.2.dist-info}/METADATA +36 -7
  38. model_library-0.1.2.dist-info/RECORD +61 -0
  39. model_library-0.1.0.dist-info/RECORD +0 -53
  40. {model_library-0.1.0.dist-info → model_library-0.1.2.dist-info}/WHEEL +0 -0
  41. {model_library-0.1.0.dist-info → model_library-0.1.2.dist-info}/licenses/LICENSE +0 -0
  42. {model_library-0.1.0.dist-info → model_library-0.1.2.dist-info}/top_level.txt +0 -0
@@ -31,9 +31,11 @@ from model_library.exceptions import (
31
31
  from model_library.file_utils import trim_images
32
32
  from model_library.model_utils import get_reasoning_in_tag
33
33
  from model_library.providers.openai import OpenAIModel
34
+ from model_library.register_models import register_provider
34
35
  from model_library.utils import create_openai_client_with_defaults
35
36
 
36
37
 
38
+ @register_provider("together")
37
39
  class TogetherModel(LLM):
38
40
  _client: AsyncTogether | None = None
39
41
 
@@ -27,6 +27,7 @@ from model_library.base import (
27
27
  TextInput,
28
28
  ToolDefinition,
29
29
  )
30
+ from model_library.register_models import register_provider
30
31
  from model_library.utils import truncate_str
31
32
 
32
33
  FAIL_RATE = 0.1
@@ -145,6 +146,7 @@ class DummyAIBatchMixin(LLMBatchMixin):
145
146
  return batch_status == "failed"
146
147
 
147
148
 
149
+ @register_provider("vals")
148
150
  class DummyAIModel(LLM):
149
151
  _client: Redis | None = None
150
152
 
@@ -39,6 +39,7 @@ from model_library.exceptions import (
39
39
  RateLimitException,
40
40
  )
41
41
  from model_library.providers.openai import OpenAIModel
42
+ from model_library.register_models import register_provider
42
43
  from model_library.utils import create_openai_client_with_defaults
43
44
 
44
45
  Chat = AsyncChat | SyncChat
@@ -48,6 +49,7 @@ class XAIConfig(ProviderConfig):
48
49
  sync_client: bool = False
49
50
 
50
51
 
52
+ @register_provider("grok")
51
53
  class XAIModel(LLM):
52
54
  provider_config = XAIConfig()
53
55
 
@@ -1,27 +1,17 @@
1
- import io
2
- from typing import Any, Literal, Sequence
3
-
4
- from typing_extensions import override
1
+ from typing import Literal
5
2
 
6
3
  from model_library import model_library_settings
7
4
  from model_library.base import (
8
- LLM,
9
- FileInput,
10
- FileWithId,
11
- InputItem,
5
+ DelegateOnly,
12
6
  LLMConfig,
13
- QueryResult,
14
- ToolDefinition,
15
7
  )
16
8
  from model_library.providers.openai import OpenAIModel
9
+ from model_library.register_models import register_provider
17
10
  from model_library.utils import create_openai_client_with_defaults
18
11
 
19
12
 
20
- class ZAIModel(LLM):
21
- @override
22
- def get_client(self) -> None:
23
- raise NotImplementedError("Not implemented")
24
-
13
+ @register_provider("zai")
14
+ class ZAIModel(DelegateOnly):
25
15
  def __init__(
26
16
  self,
27
17
  model_name: str,
@@ -30,73 +20,15 @@ class ZAIModel(LLM):
30
20
  config: LLMConfig | None = None,
31
21
  ):
32
22
  super().__init__(model_name, provider, config=config)
33
- self.model_name: str = model_name
34
- self.native: bool = False
35
23
 
36
- # https://docs.z.ai/
37
- self.delegate: OpenAIModel | None = (
38
- None
39
- if self.native
40
- else OpenAIModel(
41
- model_name=self.model_name,
42
- provider=provider,
43
- config=config,
44
- custom_client=create_openai_client_with_defaults(
45
- api_key=model_library_settings.ZAI_API_KEY,
46
- base_url="https://open.bigmodel.cn/api/paas/v4/",
47
- ),
48
- use_completions=True,
49
- )
24
+ # https://docs.z.ai/guides/develop/openai/python
25
+ self.delegate = OpenAIModel(
26
+ model_name=self.model_name,
27
+ provider=self.provider,
28
+ config=config,
29
+ custom_client=create_openai_client_with_defaults(
30
+ api_key=model_library_settings.ZAI_API_KEY,
31
+ base_url="https://open.bigmodel.cn/api/paas/v4/",
32
+ ),
33
+ use_completions=True,
50
34
  )
51
-
52
- @override
53
- async def parse_input(
54
- self,
55
- input: Sequence[InputItem],
56
- **kwargs: Any,
57
- ) -> Any:
58
- raise NotImplementedError()
59
-
60
- @override
61
- async def parse_image(
62
- self,
63
- image: FileInput,
64
- ) -> Any:
65
- raise NotImplementedError()
66
-
67
- @override
68
- async def parse_file(
69
- self,
70
- file: FileInput,
71
- ) -> Any:
72
- raise NotImplementedError()
73
-
74
- @override
75
- async def parse_tools(
76
- self,
77
- tools: list[ToolDefinition],
78
- ) -> Any:
79
- raise NotImplementedError()
80
-
81
- @override
82
- async def upload_file(
83
- self,
84
- name: str,
85
- mime: str,
86
- bytes: io.BytesIO,
87
- type: Literal["image", "file"] = "file",
88
- ) -> FileWithId:
89
- raise NotImplementedError()
90
-
91
- @override
92
- async def _query_impl(
93
- self,
94
- input: Sequence[InputItem],
95
- *,
96
- tools: list[ToolDefinition],
97
- **kwargs: object,
98
- ) -> QueryResult:
99
- # relies on oAI delegate
100
- if self.delegate:
101
- return await self.delegate_query(input, tools=tools, **kwargs)
102
- raise NotImplementedError()
@@ -1,61 +1,23 @@
1
+ import importlib
2
+ import pkgutil
1
3
  import threading
2
4
  from copy import deepcopy
3
5
  from datetime import date
4
6
  from pathlib import Path
5
- from typing import TYPE_CHECKING, Any, cast, get_type_hints
7
+ from typing import Any, Callable, Type, TypeVar, cast, get_type_hints
6
8
 
7
9
  import yaml
8
10
  from pydantic import create_model, model_validator
9
11
  from pydantic.fields import Field
10
12
  from pydantic.main import BaseModel
11
13
 
14
+ from model_library import providers
12
15
  from model_library.base import LLM, ProviderConfig
13
- from model_library.providers.ai21labs import AI21LabsModel
14
- from model_library.providers.alibaba import AlibabaModel
15
- from model_library.providers.amazon import AmazonModel
16
- from model_library.providers.anthropic import AnthropicModel
17
- from model_library.providers.azure import AzureOpenAIModel
18
- from model_library.providers.cohere import CohereModel
19
- from model_library.providers.deepseek import DeepSeekModel
20
- from model_library.providers.fireworks import FireworksModel
21
- from model_library.providers.google.google import GoogleModel
22
- from model_library.providers.inception import MercuryModel
23
- from model_library.providers.kimi import KimiModel
24
- from model_library.providers.mistral import MistralModel
25
- from model_library.providers.openai import OpenAIModel
26
- from model_library.providers.perplexity import PerplexityModel
27
- from model_library.providers.together import TogetherModel
28
- from model_library.providers.vals import DummyAIModel
29
- from model_library.providers.xai import XAIModel
30
- from model_library.providers.zai import ZAIModel
31
16
  from model_library.utils import get_logger
32
17
 
33
- MAPPING_PROVIDERS: dict[str, type[LLM]] = {
34
- "openai": OpenAIModel,
35
- "azure": AzureOpenAIModel,
36
- "anthropic": AnthropicModel,
37
- "together": TogetherModel,
38
- "mistralai": MistralModel,
39
- "grok": XAIModel,
40
- "fireworks": FireworksModel,
41
- "ai21labs": AI21LabsModel,
42
- "amazon": AmazonModel,
43
- "bedrock": AmazonModel,
44
- "cohere": CohereModel,
45
- "google": GoogleModel,
46
- "vals": DummyAIModel,
47
- "alibaba": AlibabaModel,
48
- "perplexity": PerplexityModel,
49
- "deepseek": DeepSeekModel,
50
- "zai": ZAIModel,
51
- "kimi": KimiModel,
52
- "inception": MercuryModel,
53
- }
54
-
55
- logger = get_logger(__name__)
56
- # Folder containing provider YAMLs
57
- path_library = Path(__file__).parent / "config"
18
+ T = TypeVar("T", bound=LLM)
58
19
 
20
+ logger = get_logger("register_models")
59
21
 
60
22
  """
61
23
  Model Registry structure
@@ -174,6 +136,7 @@ class ClassProperties(BaseModel):
174
136
  Each provider can have a set of provider-specific properties, we however want to accept
175
137
  any possible property from a provider in the yaml, and validate later. So we join all
176
138
  provider-specific properties into a single class.
139
+ This has no effect on runtime use of ProviderConfig, only used to load the yaml
177
140
  """
178
141
 
179
142
 
@@ -210,14 +173,6 @@ def get_dynamic_provider_properties_model() -> type[BaseProviderProperties]:
210
173
  )
211
174
 
212
175
 
213
- ProviderProperties = get_dynamic_provider_properties_model()
214
-
215
- if TYPE_CHECKING:
216
- ProviderPropertiesType = BaseProviderProperties
217
- else:
218
- ProviderPropertiesType = ProviderProperties
219
-
220
-
221
176
  class DefaultParameters(BaseModel):
222
177
  max_output_tokens: int | None = None
223
178
  temperature: float | None = None
@@ -234,13 +189,20 @@ class RawModelConfig(BaseModel):
234
189
  documentation_url: str | None = None
235
190
  properties: Properties = Field(default_factory=Properties)
236
191
  class_properties: ClassProperties = Field(default_factory=ClassProperties)
237
- provider_properties: ProviderPropertiesType = Field(
238
- default_factory=ProviderProperties
239
- )
192
+ provider_properties: BaseProviderProperties | None = None
240
193
  costs_per_million_token: CostProperties = Field(default_factory=CostProperties)
241
194
  alternative_keys: list[str | dict[str, Any]] = Field(default_factory=list)
242
195
  default_parameters: DefaultParameters = Field(default_factory=DefaultParameters)
243
196
 
197
+ def model_dump(self, *args: object, **kwargs: object):
198
+ data = super().model_dump(*args, **kwargs)
199
+ if self.provider_properties is not None:
200
+ # explicitly dump dynamic ProviderProperties instance
201
+ data["provider_properties"] = self.provider_properties.model_dump(
202
+ *args, **kwargs
203
+ )
204
+ return data
205
+
244
206
 
245
207
  class ModelConfig(RawModelConfig):
246
208
  # post processing fields
@@ -252,6 +214,9 @@ class ModelConfig(RawModelConfig):
252
214
 
253
215
  ModelRegistry = dict[str, ModelConfig]
254
216
 
217
+ # Folder containing provider YAMLs
218
+ path_library = Path(__file__).parent / "config"
219
+
255
220
 
256
221
  def deep_update(
257
222
  base: dict[str, Any], updates: dict[str, str | dict[str, Any]]
@@ -270,6 +235,9 @@ def _register_models() -> ModelRegistry:
270
235
 
271
236
  registry: ModelRegistry = {}
272
237
 
238
+ # generate ProviderProperties class
239
+ ProviderProperties = get_dynamic_provider_properties_model()
240
+
273
241
  # load each provider YAML
274
242
  sections = Path(path_library).glob("*.yaml")
275
243
  sections = sorted(sections, key=lambda x: "openai" in x.name.lower())
@@ -325,6 +293,10 @@ def _register_models() -> ModelRegistry:
325
293
  "slug": model_name.replace("/", "_"),
326
294
  }
327
295
  )
296
+ # load provider properties separately since the model was generated at runtime
297
+ model_obj.provider_properties = ProviderProperties.model_validate(
298
+ current_model_config.get("provider_properties", {})
299
+ )
328
300
 
329
301
  registry[model_name] = model_obj
330
302
 
@@ -371,6 +343,50 @@ def _register_models() -> ModelRegistry:
371
343
  return registry
372
344
 
373
345
 
346
+ _provider_registry: dict[str, type[LLM]] = {}
347
+ _provider_registry_lock = threading.Lock()
348
+ _imported_providers = False
349
+
350
+
351
+ def register_provider(name: str) -> Callable[[Type[T]], Type[T]]:
352
+ def decorator(cls: Type[T]) -> Type[T]:
353
+ logger.debug(f"Registering provider {name}")
354
+
355
+ if name in _provider_registry:
356
+ raise ValueError(f"Provider {name} is already registered.")
357
+ _provider_registry[name] = cls
358
+ return cls
359
+
360
+ return decorator
361
+
362
+
363
+ def _import_all_providers():
364
+ """Import all provider modules. Any class with @register_provider will be automatically registered upon import"""
365
+
366
+ package_name = providers.__name__
367
+
368
+ # walk all submodules recursively
369
+ for _, module_name, _ in pkgutil.walk_packages(
370
+ providers.__path__, package_name + "."
371
+ ):
372
+ # skip private modules
373
+ if module_name.split(".")[-1].startswith("_"):
374
+ continue
375
+ importlib.import_module(module_name)
376
+
377
+
378
+ def get_provider_registry() -> dict[str, type[LLM]]:
379
+ """Return the provider registry, lazily loading all modules on first call."""
380
+ global _imported_providers
381
+ if not _imported_providers:
382
+ with _provider_registry_lock:
383
+ if not _imported_providers:
384
+ _import_all_providers()
385
+ _imported_providers = True
386
+
387
+ return _provider_registry
388
+
389
+
374
390
  _model_registry: ModelRegistry | None = None
375
391
  _model_registry_lock = threading.Lock()
376
392
 
@@ -381,5 +397,9 @@ def get_model_registry() -> ModelRegistry:
381
397
  if _model_registry is None:
382
398
  with _model_registry_lock:
383
399
  if _model_registry is None:
400
+ # initialize provider registry
401
+ global get_provider_registry
402
+ get_provider_registry()
403
+
384
404
  _model_registry = _register_models()
385
405
  return _model_registry
@@ -5,10 +5,10 @@ import tiktoken
5
5
 
6
6
  from model_library.base import LLM, LLMConfig, ProviderConfig
7
7
  from model_library.register_models import (
8
- MAPPING_PROVIDERS,
9
8
  CostProperties,
10
9
  ModelConfig,
11
10
  get_model_registry,
11
+ get_provider_registry,
12
12
  )
13
13
 
14
14
  ALL_MODELS_PATH = Path(__file__).parent / "config" / "all_models.json"
@@ -51,7 +51,7 @@ def create_config(
51
51
 
52
52
  # load provider config with correct type
53
53
  if provider_properties:
54
- ModelClass: type[LLM] = MAPPING_PROVIDERS[registry_config.provider_name]
54
+ ModelClass: type[LLM] = get_provider_registry()[registry_config.provider_name]
55
55
  if hasattr(ModelClass, "provider_config"):
56
56
  ProviderConfigClass: type[ProviderConfig] = type(ModelClass.provider_config) # type: ignore
57
57
  provider_config: ProviderConfig = ProviderConfigClass.model_validate(
@@ -89,7 +89,7 @@ def _get_model_from_registry(
89
89
 
90
90
  provider_name: str = registry_config.provider_name
91
91
  provider_endpoint: str = registry_config.provider_endpoint
92
- ModelClass: type[LLM] = MAPPING_PROVIDERS[provider_name]
92
+ ModelClass: type[LLM] = get_provider_registry()[provider_name]
93
93
 
94
94
  return ModelClass(
95
95
  model_name=provider_endpoint,
@@ -115,7 +115,7 @@ def get_registry_model(model_str: str, override_config: LLMConfig | None = None)
115
115
  def get_raw_model(model_str: str, config: LLMConfig | None = None) -> LLM:
116
116
  """Get a model exluding default config"""
117
117
  provider, model_name = model_str.split("/", 1)
118
- ModelClass = MAPPING_PROVIDERS[provider]
118
+ ModelClass = get_provider_registry()[provider]
119
119
  return ModelClass(model_name=model_name, provider=provider, config=config)
120
120
 
121
121
 
@@ -130,7 +130,7 @@ def get_model_cost(model_str: str) -> CostProperties | None:
130
130
  @cache
131
131
  def get_provider_names() -> list[str]:
132
132
  """Return all provider names in the registry"""
133
- return sorted([provider_name for provider_name in MAPPING_PROVIDERS.keys()])
133
+ return sorted([provider_name for provider_name in get_provider_registry().keys()])
134
134
 
135
135
 
136
136
  @cache
model_library/utils.py CHANGED
@@ -1,4 +1,3 @@
1
- import inspect
2
1
  import logging
3
2
  from collections.abc import Mapping, Sequence
4
3
  from typing import Any
@@ -8,6 +7,7 @@ from openai import AsyncOpenAI
8
7
  from pydantic.main import BaseModel
9
8
 
10
9
  MAX_LLM_LOG_LENGTH = 100
10
+ logger = logging.getLogger("llm")
11
11
 
12
12
 
13
13
  def truncate_str(s: str | None, max_len: int = MAX_LLM_LOG_LENGTH) -> str:
@@ -21,20 +21,8 @@ def truncate_str(s: str | None, max_len: int = MAX_LLM_LOG_LENGTH) -> str:
21
21
 
22
22
  def get_logger(name: str | None = None):
23
23
  if not name:
24
- caller = inspect.stack()[1]
25
- module = inspect.getmodule(caller[0])
26
- name = module.__name__ if module else "__main__"
27
-
28
- logger = logging.getLogger(name)
29
- if not logger.handlers:
30
- logger.setLevel(logging.DEBUG)
31
- handler = logging.StreamHandler()
32
- formatter = logging.Formatter(
33
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
34
- )
35
- handler.setFormatter(formatter)
36
- logger.addHandler(handler)
37
- return logger
24
+ return logger
25
+ return logging.getLogger(f"{logger.name}.{name}")
38
26
 
39
27
 
40
28
  def deep_model_dump(obj: object) -> object:
@@ -75,17 +63,6 @@ def create_openai_client_with_defaults(
75
63
  )
76
64
 
77
65
 
78
- def sum_optional(a: int | None, b: int | None) -> int | None:
79
- """Sum two optional integers, returning None if both are None.
80
-
81
- Preserves None to indicate "unknown/not provided" when both inputs are None,
82
- otherwise treats None as 0 for summation.
83
- """
84
- if a is None and b is None:
85
- return None
86
- return (a or 0) + (b or 0)
87
-
88
-
89
66
  def get_context_window_for_model(model_name: str, default: int = 128_000) -> int:
90
67
  """
91
68
  Get the context window for a model by looking up its configuration from the registry.
@@ -99,9 +76,7 @@ def get_context_window_for_model(model_name: str, default: int = 128_000) -> int
99
76
  """
100
77
  # import here to avoid circular imports
101
78
  from model_library.register_models import get_model_registry
102
- from model_library.utils import get_logger
103
79
 
104
- logger = get_logger(__name__)
105
80
  model_config = get_model_registry().get(model_name, None)
106
81
  if (
107
82
  model_config
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: model-library
3
- Version: 0.1.0
3
+ Version: 0.1.2
4
4
  Summary: Model Library for vals.ai
5
5
  Author-email: "Vals AI, Inc." <contact@vals.ai>
6
6
  License: MIT
@@ -10,6 +10,7 @@ License-File: LICENSE
10
10
  Requires-Dist: typing-extensions<5.0,>=4.14.1
11
11
  Requires-Dist: pydantic<3.0,>=2.11.7
12
12
  Requires-Dist: pyyaml>=6.0.2
13
+ Requires-Dist: rich
13
14
  Requires-Dist: backoff<3.0,>=2.2.1
14
15
  Requires-Dist: redis<7.0,>=6.2.0
15
16
  Requires-Dist: tiktoken==0.11.0
@@ -53,7 +54,14 @@ Open-source model library for interacting with a variety of LLM providers. Origi
53
54
  - X AI
54
55
  - ZhipuAI (zai)
55
56
 
56
- Run `python -m scripts.browse_models` to browse the model registry.
57
+ Run `python -m scripts.browse_models` to browse the model registry or
58
+
59
+ ```python
60
+ from model_library.registry_utils import get_model_names_by_provider, get_provider_names
61
+
62
+ print(get_provider_names())
63
+ print(get_model_names_by_provider("chosen-provider"))
64
+ ```
57
65
 
58
66
  ### Supported Input
59
67
 
@@ -70,16 +78,26 @@ Here is a basic example of how to query a model:
70
78
 
71
79
  ```python
72
80
  import asyncio
73
- from model_library.registry_utils import get_registry_model
81
+ from model_library import model
74
82
 
75
83
  async def main():
76
84
  # Load a model from the registry
77
- model = get_registry_model("openai/gemini-2.5-flash")
85
+ llm = model("anthropic/claude-opus-4-1-20250805-thinking")
86
+
87
+ # Display the LLM instance
88
+ llm.logger.info(llm)
89
+ # or print(llm)
78
90
 
79
91
  # Query the model with a simple text input
80
- response = await model.query("What is QSBS? Explain your thinking in detail and make it concise.")
92
+ result = await llm.query(
93
+ "What is QSBS? Explain your thinking in detail and make it concise."
94
+ )
95
+
96
+ # Logger automatically logs the result
97
+
98
+ # Display only the output text
99
+ llm.logger.info(result.output_text)
81
100
 
82
- # Logger automatically logs the response
83
101
 
84
102
  if __name__ == "__main__":
85
103
  asyncio.run(main())
@@ -88,7 +106,18 @@ if __name__ == "__main__":
88
106
  The model registry holds model attributes, ex. reasoning, file support, tool support, max tokens. You may also use models not included in the registry.
89
107
 
90
108
  ```python
91
- model = get_raw_model("openai/gpt-3.5-turbo", config=LLMConfig(max_tokens=1000))
109
+ from model_library import raw_model
110
+ from model_library.base import LLMConfig
111
+
112
+ model = raw_model("grok/grok-code-fast", LLMConfig(max_tokens=10000))
113
+ ```
114
+
115
+ Root logger is named "llm". To disable logging:
116
+
117
+ ```python
118
+ from model_library import set_logging
119
+
120
+ set_logging(enable=False)
92
121
  ```
93
122
 
94
123
  ### Environment Setup
@@ -0,0 +1,61 @@
1
+ model_library/__init__.py,sha256=AKc_15aklOf-LbcS9z1Xer_moRWNpG6Dh3kqvSQ0nOI,714
2
+ model_library/exceptions.py,sha256=T_CEX6STyGPMFFAz4kXZg7fv6YfvPi8UJjRSeogP1fk,8845
3
+ model_library/file_utils.py,sha256=vLxYWI0-kwp67UONcFdZw2qDTV38P7IZLBaXFJDNtO4,3666
4
+ model_library/logging.py,sha256=McyaPHUk7RkB38-LrfnudrrU1B62ta8wAbbIBwLRmj0,853
5
+ model_library/model_utils.py,sha256=l8oCltGeimMGtnne_3Q1EguVtzCj61UMsLsma-1czwg,753
6
+ model_library/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ model_library/register_models.py,sha256=kAttwWA4tpb8WPnJSoANpQ0sa1ERWWgGx1EQcuuCaHI,13863
8
+ model_library/registry_utils.py,sha256=Op1NnQGD0XZyP032pGJdWdHgdEoHwKOcoWNCihT69SE,6977
9
+ model_library/settings.py,sha256=QyeUqzWBpexFi014L_mZkoXP49no3SAQNJRObATXrL8,873
10
+ model_library/utils.py,sha256=jQqBbP9vafpuFxp7kb53XYvCAtW79FtFJelnGGPn-pQ,4011
11
+ model_library/base/__init__.py,sha256=TtxCXGUtkEqWZNMMofLPuC4orN7Ja2hemtbtHitt_UA,266
12
+ model_library/base/base.py,sha256=gatrvBdSt2z2wOncgi4FtlPqvlOqmVfkRTh2eZcqgKk,13937
13
+ model_library/base/batch.py,sha256=-jd6L0ECc5pkj73zoX2ZYcv_9iQdqxEi1kEilwaXWSA,2895
14
+ model_library/base/delegate_only.py,sha256=V2MzENtvBg0pySKncgE-mfCLBhhRZ0y4BntQwQsxbqU,2111
15
+ model_library/base/input.py,sha256=Nhg8Ril1kFau1DnE8u102JC1l-vxNd-v9e3SjovR-Do,1876
16
+ model_library/base/output.py,sha256=5DG077lU-CNXVVfJAELaR4gJh0_bw3V5MwbgZIQphiY,5504
17
+ model_library/base/utils.py,sha256=6okObx8VJc7xKmPR-tBFLTWFbnZTNhTdCPbuoV-Mef8,1246
18
+ model_library/config/ai21labs_models.yaml,sha256=su8YrHwLTVfIvRrk4AFaLi_Xg1BU_-g8AK1ExKZUXSk,2547
19
+ model_library/config/alibaba_models.yaml,sha256=2tIdj_7Qiw_-jZmutdtdJWyAXPl4WEFh1ij3ubymov0,2252
20
+ model_library/config/all_models.json,sha256=8XKNQbk0Zgmmkfio1kRDYs3MhFmaRQsVS30c0EENB4Y,488427
21
+ model_library/config/amazon_models.yaml,sha256=RGj7DH0IzXNs4JmAk6adC2jUEefVxBJNVQIB-n-fXbc,8988
22
+ model_library/config/anthropic_models.yaml,sha256=VHfmm2mRjKa0Ieusk0N4tRT_oy5BjIoNy1BkZJpEcnU,10490
23
+ model_library/config/cohere_models.yaml,sha256=BQSqIsGUXvULeFLGDFJNBxen-6CC5hKNW2s4lSC8Np0,5186
24
+ model_library/config/deepseek_models.yaml,sha256=42bfyNZtaiOYx188FMqkfuCBSLNM1EBTYzf7hwzsjHw,1318
25
+ model_library/config/dummy_model.yaml,sha256=CTnSdFYC-KTsHt5TvS0echWwUlb_H3eATZL1tVIkXkM,915
26
+ model_library/config/fireworks_models.yaml,sha256=VG_Fo8X6qSA3VALn1SKehjj3B1HP0XO1fCYDdaCUdnM,5905
27
+ model_library/config/google_models.yaml,sha256=QE9QjBrVlRDvuQ5tZ041VktcMJVGF7aAcYEJoBsimJQ,15661
28
+ model_library/config/inception_models.yaml,sha256=g6PC0qjEC2SUPTo2Rad34Dl8dt8ZBv1svaaP2_PIrYg,660
29
+ model_library/config/kimi_models.yaml,sha256=BySTLTc0m24oBC94VegosQgxpHglthe5dGRwF-fyduo,840
30
+ model_library/config/mistral_models.yaml,sha256=MjKEYFYcGBsFd6iXekE_1oGa3CmEWAVPABJR94gV6SE,3839
31
+ model_library/config/openai_models.yaml,sha256=6ZpqIoSGbR-K0JQzOq8kjnQT-I78ap7psRpj_D-iUas,25283
32
+ model_library/config/perplexity_models.yaml,sha256=avTBrwnG-5Y6kle9t9vBrwcImhSzw-dgoYQuaw7K7Rs,2962
33
+ model_library/config/together_models.yaml,sha256=9FtYtEyPFiuInfamuncg5b7PjSh-tc6k1448QulEIg4,24422
34
+ model_library/config/xai_models.yaml,sha256=iNVTlpBXHF__KkvAQHgoyDPWrDvyIcOFHZOF8crAMVM,7798
35
+ model_library/config/zai_models.yaml,sha256=lyAUPp4qOxkAAKCcbX48IKLaYYPAkp-Jn1wyCjLqmeA,1396
36
+ model_library/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
+ model_library/providers/ai21labs.py,sha256=7PnXKl-Fv8KlE95eBv2izbFg1u7utDRQPdWXYVl_-as,5832
38
+ model_library/providers/alibaba.py,sha256=k6LZErV_l9oTFTdKTwyw1SXD509Rl3AqFbN8umCryEE,2941
39
+ model_library/providers/amazon.py,sha256=Qd5zAEn71WVo6c8mpPW0gE7WHLarzKzh6r4b_R4FaYk,13137
40
+ model_library/providers/anthropic.py,sha256=iEQ5qRctp3iR_i4FZMWjKOIIifN2aGgW10j7Q8H4LhQ,22237
41
+ model_library/providers/azure.py,sha256=brQNCED-zHvYjL5K5hdjFBNso6hJZg0HTHNnAgJPPG0,1408
42
+ model_library/providers/cohere.py,sha256=lCBm1PP1l_UOa1pKFMIZM3C0wCv3QWB6UP0-jvjkFa4,1066
43
+ model_library/providers/deepseek.py,sha256=7T4lxDiV5wmWUK7TAKwr332_T6uyXNCOiirZOCCETL0,1159
44
+ model_library/providers/fireworks.py,sha256=w-5mOF5oNzqx_0ijCoTm1lSn2ZHwhp6fURKhV3LEqIc,2309
45
+ model_library/providers/inception.py,sha256=Nrky53iujIM9spAWoNRtoJg2inFiL0li6E75vT3b6V8,1107
46
+ model_library/providers/kimi.py,sha256=zzvcKpZLsM1xPebpLeMxNKTt_FRiLN1rFWrIly7wfXA,1092
47
+ model_library/providers/mistral.py,sha256=DHl0BYUZOrCvD4La5cyzcpQKHh4RbTbgMORWFbU_TuQ,9536
48
+ model_library/providers/openai.py,sha256=2hEJIgv5HnfrNhs_E3xzDpjV64QATJxu6R0R8JMMAV0,33447
49
+ model_library/providers/perplexity.py,sha256=eIzzkaZ4ZMlRKFVI9bnwyo91iJkh7aEmJ-0_4OKeAWc,1083
50
+ model_library/providers/together.py,sha256=ElE9k2H6kkMiK23yxz5-Czg6sOKDy80uyKAI1_oPM_4,8178
51
+ model_library/providers/vals.py,sha256=VLF1rsCR13a_kmtZfboDzJJ64Io_tBFe60vf-0BdYPc,9830
52
+ model_library/providers/xai.py,sha256=oJiMICYLkybHpLv77PmMbi1Xj9IUZmKX3kANksjjFEQ,10828
53
+ model_library/providers/zai.py,sha256=O_GM6KlJ0fM2wYoxO9xrCWfnpYH7IpoKEzjiD4jB8Kc,1050
54
+ model_library/providers/google/__init__.py,sha256=ypuLVL_QJEQ7C3S47FhC9y4wyawYOdGikAViJmACI0U,115
55
+ model_library/providers/google/batch.py,sha256=4TE90Uo1adi54dVtGcGyUAxw11YExJq-Y4KmkQ-cyHA,9978
56
+ model_library/providers/google/google.py,sha256=WKuQr4C-ZVcK6ew50YEHgcPA91pEafFXSI7cNi6nRlQ,16436
57
+ model_library-0.1.2.dist-info/licenses/LICENSE,sha256=x6mf4o7U_wHaaqcfxoU-0R6uYJLbqL_TNuoULP3asaA,1070
58
+ model_library-0.1.2.dist-info/METADATA,sha256=dEDbkl76pIIbzBEZ6qtTXMUxr3sX_-AO9vC76y8OSho,7034
59
+ model_library-0.1.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
+ model_library-0.1.2.dist-info/top_level.txt,sha256=HtQYxA_7RP8UT35I6VcUw20L6edI0Zf2t5Ys1uDGVjs,14
61
+ model_library-0.1.2.dist-info/RECORD,,
@@ -1,53 +0,0 @@
1
- model_library/__init__.py,sha256=9EDDz6UKJG21p7mrak6Rxzr-DUMOwCgE8-yAWP1-W_s,652
2
- model_library/base.py,sha256=JRzDZkYhzlEarknC0gX0sRplfxoFaqSb47gYhQy57sA,23834
3
- model_library/exceptions.py,sha256=FnEQXTeC1GpcMEpwukGK7Uwu1_Bnvl87MUs1zOqVm0o,8750
4
- model_library/file_utils.py,sha256=vLxYWI0-kwp67UONcFdZw2qDTV38P7IZLBaXFJDNtO4,3666
5
- model_library/model_utils.py,sha256=l8oCltGeimMGtnne_3Q1EguVtzCj61UMsLsma-1czwg,753
6
- model_library/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- model_library/register_models.py,sha256=JOSt_VdtsllIshRGKVzKWG9DT7cFHd9JsjZLysi7cUw,13267
8
- model_library/registry_utils.py,sha256=B9lnxkhdobNlkgP0a-QzgiK6W_YHFF7qgkMMerEZcL0,6949
9
- model_library/settings.py,sha256=QyeUqzWBpexFi014L_mZkoXP49no3SAQNJRObATXrL8,873
10
- model_library/utils.py,sha256=Ybcsf_HuKrYzOpf5dvz_gHo0HVjnunOK2BWHb51bo5U,4842
11
- model_library/config/ai21labs_models.yaml,sha256=su8YrHwLTVfIvRrk4AFaLi_Xg1BU_-g8AK1ExKZUXSk,2547
12
- model_library/config/alibaba_models.yaml,sha256=2tIdj_7Qiw_-jZmutdtdJWyAXPl4WEFh1ij3ubymov0,2252
13
- model_library/config/all_models.json,sha256=ol5pkto4gJ8ncvXx6RE25mHZp3j7ILGssLVvaFfuTNg,482811
14
- model_library/config/amazon_models.yaml,sha256=RGj7DH0IzXNs4JmAk6adC2jUEefVxBJNVQIB-n-fXbc,8988
15
- model_library/config/anthropic_models.yaml,sha256=QY1273cboa_Tuaxkjya5ne4YwA6j2yZShDPl3M4KqrM,10416
16
- model_library/config/cohere_models.yaml,sha256=BQSqIsGUXvULeFLGDFJNBxen-6CC5hKNW2s4lSC8Np0,5186
17
- model_library/config/deepseek_models.yaml,sha256=TytG4HnrWd4Q1KiNnEG2ZBX5WDzVv_tYRB51GzUD6qk,1274
18
- model_library/config/dummy_model.yaml,sha256=CTnSdFYC-KTsHt5TvS0echWwUlb_H3eATZL1tVIkXkM,915
19
- model_library/config/fireworks_models.yaml,sha256=VG_Fo8X6qSA3VALn1SKehjj3B1HP0XO1fCYDdaCUdnM,5905
20
- model_library/config/google_models.yaml,sha256=QE9QjBrVlRDvuQ5tZ041VktcMJVGF7aAcYEJoBsimJQ,15661
21
- model_library/config/inception_models.yaml,sha256=g6PC0qjEC2SUPTo2Rad34Dl8dt8ZBv1svaaP2_PIrYg,660
22
- model_library/config/kimi_models.yaml,sha256=BySTLTc0m24oBC94VegosQgxpHglthe5dGRwF-fyduo,840
23
- model_library/config/mistral_models.yaml,sha256=MjKEYFYcGBsFd6iXekE_1oGa3CmEWAVPABJR94gV6SE,3839
24
- model_library/config/openai_models.yaml,sha256=SKGifDmdAz2NP3FES9-fMKIreBBk_DvQPy5x5WgPWq4,23909
25
- model_library/config/perplexity_models.yaml,sha256=avTBrwnG-5Y6kle9t9vBrwcImhSzw-dgoYQuaw7K7Rs,2962
26
- model_library/config/together_models.yaml,sha256=9FtYtEyPFiuInfamuncg5b7PjSh-tc6k1448QulEIg4,24422
27
- model_library/config/xai_models.yaml,sha256=iNVTlpBXHF__KkvAQHgoyDPWrDvyIcOFHZOF8crAMVM,7798
28
- model_library/config/zai_models.yaml,sha256=lyAUPp4qOxkAAKCcbX48IKLaYYPAkp-Jn1wyCjLqmeA,1396
29
- model_library/providers/ai21labs.py,sha256=ubkC0dPTSeZXzPqn6n9H3B60bTBjEIGgzXIXMqIh1Y0,5741
30
- model_library/providers/alibaba.py,sha256=tuXMd8_YxoJ9mcRA__GZB-Gw1xWMLPNRWADvsbrqC5w,4221
31
- model_library/providers/amazon.py,sha256=_oKTEA9y-2gB0axrBMFEZ-yD9WRK_bqI3fcTEtkGubA,13018
32
- model_library/providers/anthropic.py,sha256=Rvg-9ot5DAY8GZ47JMxLMI6M_DxJMp1ZGIJVPYxLAp0,14269
33
- model_library/providers/azure.py,sha256=y4y4-5v8js1SJCqbDZkjZzPW6i9zGVbt5wt1vwdxpLI,1320
34
- model_library/providers/cohere.py,sha256=-MA3xJJUSA5N3f35zPrMzTyFSPV4fWXbnl68YaEgCXk,2484
35
- model_library/providers/deepseek.py,sha256=Mu803nDHWWscHfbu480SJPcn3wj34glve2YSCRkiBqM,3069
36
- model_library/providers/fireworks.py,sha256=10VHV8CMv4gGn6dnQjsFWFOeiwIj0mjVh0SIepnNVmI,3652
37
- model_library/providers/inception.py,sha256=FcLETPzRRTbYKCcWxe66amx_DRGYhX7nU6HeJ6GocLQ,2567
38
- model_library/providers/kimi.py,sha256=3yWy6zSUsxAZWtqJltQp2ohanBQoj7eyZkZHGTpT2k8,2532
39
- model_library/providers/mistral.py,sha256=MeHi9H2CsFU6VlxxY2v942-QOpdHuLI5lpqC_JP4AcI,9444
40
- model_library/providers/openai.py,sha256=zuJQk52NN9XnAe9a6P63RfvMYvKMGCb2ru3tuxvyAPQ,33358
41
- model_library/providers/perplexity.py,sha256=RX5K32OIpqltYgeJjJkDsMjenxQLO7ujV00Oq6_dXoY,2479
42
- model_library/providers/together.py,sha256=KaBTefk2y-4cyjuNdj61D-WFXs43rybVzeXWBETjBNg,8087
43
- model_library/providers/vals.py,sha256=NGEfEaADdqWJr-tC8Xdf7XWNScwF9sJ4KvnGdM3kiFo,9743
44
- model_library/providers/xai.py,sha256=U3d5HJ1tS_FUU0B9F9P5eVz-MSXLbIHqnIdkYK09VsI,10741
45
- model_library/providers/zai.py,sha256=4Ui-2bPQ7m0NwQLJAaMezbLUNZOqBfWzq1JCR8f65Nw,2523
46
- model_library/providers/google/__init__.py,sha256=ypuLVL_QJEQ7C3S47FhC9y4wyawYOdGikAViJmACI0U,115
47
- model_library/providers/google/batch.py,sha256=4TE90Uo1adi54dVtGcGyUAxw11YExJq-Y4KmkQ-cyHA,9978
48
- model_library/providers/google/google.py,sha256=nsfiA6x0Mo6e6Q307ogA99fAOUqiEqWoMjmbLvFUikQ,16347
49
- model_library-0.1.0.dist-info/licenses/LICENSE,sha256=x6mf4o7U_wHaaqcfxoU-0R6uYJLbqL_TNuoULP3asaA,1070
50
- model_library-0.1.0.dist-info/METADATA,sha256=mwsJnEXNJ6A9mXXoFkVeSyOdgYV9yosX6WoVMoiIAW4,6490
51
- model_library-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
52
- model_library-0.1.0.dist-info/top_level.txt,sha256=HtQYxA_7RP8UT35I6VcUw20L6edI0Zf2t5Ys1uDGVjs,14
53
- model_library-0.1.0.dist-info/RECORD,,