airtrain 0.1.41__py3-none-any.whl → 0.1.44__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
airtrain/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  """Airtrain - A platform for building and deploying AI agents with structured skills"""
2
2
 
3
- __version__ = "0.1.41"
3
+ __version__ = "0.1.44"
4
4
 
5
5
  # Core imports
6
6
  from .core.skills import Skill, ProcessingError
@@ -9,6 +9,12 @@ from .models_config import (
9
9
  get_default_model,
10
10
  calculate_cost,
11
11
  )
12
+ from .list_models import (
13
+ AnthropicListModelsSkill,
14
+ AnthropicListModelsInput,
15
+ AnthropicListModelsOutput,
16
+ AnthropicModel,
17
+ )
12
18
 
13
19
  __all__ = [
14
20
  "AnthropicCredentials",
@@ -20,4 +26,8 @@ __all__ = [
20
26
  "get_model_config",
21
27
  "get_default_model",
22
28
  "calculate_cost",
29
+ "AnthropicListModelsSkill",
30
+ "AnthropicListModelsInput",
31
+ "AnthropicListModelsOutput",
32
+ "AnthropicModel",
23
33
  ]
@@ -0,0 +1,110 @@
1
+ from typing import Optional, List, Dict, Any
2
+ from pydantic import Field
3
+
4
+ from airtrain.core.skills import Skill, ProcessingError
5
+ from airtrain.core.schemas import InputSchema, OutputSchema
6
+ from .credentials import AnthropicCredentials
7
+ from .models_config import ANTHROPIC_MODELS, AnthropicModelConfig
8
+
9
+
10
+ class AnthropicModel:
11
+ """Class to represent an Anthropic model."""
12
+
13
+ def __init__(self, model_id: str, config: AnthropicModelConfig):
14
+ """Initialize the Anthropic model."""
15
+ self.id = model_id
16
+ self.display_name = config.display_name
17
+ self.base_model = config.base_model
18
+ self.input_price = config.input_price
19
+ self.cached_write_price = config.cached_write_price
20
+ self.cached_read_price = config.cached_read_price
21
+ self.output_price = config.output_price
22
+
23
+ def dict(self, exclude_none=False):
24
+ """Convert the model to a dictionary."""
25
+ result = {
26
+ "id": self.id,
27
+ "display_name": self.display_name,
28
+ "base_model": self.base_model,
29
+ "input_price": float(self.input_price),
30
+ "output_price": float(self.output_price),
31
+ }
32
+
33
+ if self.cached_write_price is not None:
34
+ result["cached_write_price"] = float(self.cached_write_price)
35
+ elif not exclude_none:
36
+ result["cached_write_price"] = None
37
+
38
+ if self.cached_read_price is not None:
39
+ result["cached_read_price"] = float(self.cached_read_price)
40
+ elif not exclude_none:
41
+ result["cached_read_price"] = None
42
+
43
+ return result
44
+
45
+
46
+ class AnthropicListModelsInput(InputSchema):
47
+ """Schema for Anthropic list models input"""
48
+
49
+ api_models_only: bool = Field(
50
+ default=False,
51
+ description=(
52
+ "If True, fetch models from the API only. If False, use local config."
53
+ )
54
+ )
55
+
56
+
57
+ class AnthropicListModelsOutput(OutputSchema):
58
+ """Schema for Anthropic list models output"""
59
+
60
+ models: List[Dict[str, Any]] = Field(
61
+ default_factory=list,
62
+ description="List of Anthropic models"
63
+ )
64
+
65
+
66
+ class AnthropicListModelsSkill(
67
+ Skill[AnthropicListModelsInput, AnthropicListModelsOutput]
68
+ ):
69
+ """Skill for listing Anthropic models"""
70
+
71
+ input_schema = AnthropicListModelsInput
72
+ output_schema = AnthropicListModelsOutput
73
+
74
+ def __init__(self, credentials: Optional[AnthropicCredentials] = None):
75
+ """Initialize the skill with optional credentials"""
76
+ super().__init__()
77
+ self.credentials = credentials
78
+
79
+ def process(
80
+ self, input_data: AnthropicListModelsInput
81
+ ) -> AnthropicListModelsOutput:
82
+ """Process the input and return a list of models."""
83
+ try:
84
+ models = []
85
+
86
+ if input_data.api_models_only:
87
+ # Fetch models from Anthropic API
88
+ # Require credentials if using API models
89
+ if not self.credentials:
90
+ raise ProcessingError(
91
+ "Anthropic credentials required for API models"
92
+ )
93
+
94
+ # Note: Anthropic doesn't have a public models list endpoint
95
+ # We'll raise an error instead
96
+ raise ProcessingError(
97
+ "Anthropic API does not provide a models list endpoint. "
98
+ "Use api_models_only=False to list models from local config."
99
+ )
100
+ else:
101
+ # Use local model config - no credentials needed
102
+ for model_id, config in ANTHROPIC_MODELS.items():
103
+ model = AnthropicModel(model_id, config)
104
+ models.append(model.dict())
105
+
106
+ # Return the output
107
+ return AnthropicListModelsOutput(models=models)
108
+
109
+ except Exception as e:
110
+ raise ProcessingError(f"Failed to list Anthropic models: {str(e)}")
@@ -26,7 +26,7 @@ class AnthropicInput(InputSchema):
26
26
  default="claude-3-opus-20240229", description="Anthropic model to use"
27
27
  )
28
28
  max_tokens: Optional[int] = Field(
29
- default=1024, description="Maximum tokens in response"
29
+ default=131072, description="Maximum tokens in response"
30
30
  )
31
31
  temperature: float = Field(
32
32
  default=0.7, description="Temperature for response generation", ge=0, le=1
@@ -21,7 +21,7 @@ class AWSBedrockInput(InputSchema):
21
21
  default="anthropic.claude-3-sonnet-20240229-v1:0",
22
22
  description="AWS Bedrock model to use",
23
23
  )
24
- max_tokens: int = Field(default=1024, description="Maximum tokens in response")
24
+ max_tokens: int = Field(default=131072, description="Maximum tokens in response")
25
25
  temperature: float = Field(
26
26
  default=0.7, description="Temperature for response generation", ge=0, le=1
27
27
  )
@@ -22,7 +22,7 @@ class CerebrasInput(InputSchema):
22
22
  )
23
23
  model: str = Field(default="llama3.1-8b", description="Cerebras model to use")
24
24
  max_tokens: Optional[int] = Field(
25
- default=1024, description="Maximum tokens in response"
25
+ default=131072, description="Maximum tokens in response"
26
26
  )
27
27
  temperature: float = Field(
28
28
  default=0.7, description="Temperature for response generation", ge=0, le=1
@@ -17,7 +17,7 @@ class FireworksCompletionInput(InputSchema):
17
17
  default="accounts/fireworks/models/deepseek-r1",
18
18
  description="Fireworks AI model to use",
19
19
  )
20
- max_tokens: int = Field(default=4096, description="Maximum tokens in response")
20
+ max_tokens: int = Field(default=131072, description="Maximum tokens in response")
21
21
  temperature: float = Field(
22
22
  default=0.7, description="Temperature for response generation", ge=0, le=1
23
23
  )
@@ -20,7 +20,7 @@ class ConversationState(BaseModel):
20
20
  description="Model being used for the conversation",
21
21
  )
22
22
  temperature: float = Field(default=0.7, description="Temperature setting")
23
- max_tokens: Optional[int] = Field(default=None, description="Max tokens setting")
23
+ max_tokens: Optional[int] = Field(default=131072, description="Max tokens setting")
24
24
 
25
25
 
26
26
  class FireworksConversationManager:
@@ -29,7 +29,7 @@ class FireworksRequestInput(InputSchema):
29
29
  temperature: float = Field(
30
30
  default=0.7, description="Temperature for response generation", ge=0, le=1
31
31
  )
32
- max_tokens: int = Field(default=4096, description="Maximum tokens in response")
32
+ max_tokens: int = Field(default=131072, description="Maximum tokens in response")
33
33
  top_p: float = Field(
34
34
  default=1.0, description="Top p sampling parameter", ge=0, le=1
35
35
  )
@@ -27,7 +27,7 @@ class FireworksInput(InputSchema):
27
27
  default=0.7, description="Temperature for response generation", ge=0, le=1
28
28
  )
29
29
  max_tokens: Optional[int] = Field(
30
- default=None, description="Maximum tokens in response"
30
+ default=131072, description="Maximum tokens in response"
31
31
  )
32
32
  context_length_exceeded_behavior: str = Field(
33
33
  default="truncate", description="Behavior when context length is exceeded"
@@ -21,7 +21,7 @@ class FireworksStructuredCompletionInput(InputSchema):
21
21
  temperature: float = Field(
22
22
  default=0.7, description="Temperature for response generation", ge=0, le=1
23
23
  )
24
- max_tokens: int = Field(default=4096, description="Maximum tokens in response")
24
+ max_tokens: int = Field(default=131072, description="Maximum tokens in response")
25
25
  response_model: Type[ResponseT]
26
26
  stream: bool = Field(
27
27
  default=False,
@@ -31,7 +31,7 @@ class FireworksStructuredRequestInput(InputSchema):
31
31
  temperature: float = Field(
32
32
  default=0.7, description="Temperature for response generation", ge=0, le=1
33
33
  )
34
- max_tokens: int = Field(default=4096, description="Maximum tokens in response")
34
+ max_tokens: int = Field(default=131072, description="Maximum tokens in response")
35
35
  response_model: Type[ResponseT]
36
36
  stream: bool = Field(
37
37
  default=False, description="Whether to stream the response token by token"
@@ -17,7 +17,7 @@ class FireworksParserInput(InputSchema):
17
17
  system_prompt: str = "You are a helpful assistant that provides structured data."
18
18
  model: str = "accounts/fireworks/models/deepseek-r1"
19
19
  temperature: float = 0.7
20
- max_tokens: Optional[int] = None
20
+ max_tokens: Optional[int] = 131072
21
21
  response_model: Type[ResponseT]
22
22
  conversation_history: List[Dict[str, str]] = Field(
23
23
  default_factory=list,
@@ -21,7 +21,7 @@ class GroqInput(InputSchema):
21
21
  model: str = Field(
22
22
  default="deepseek-r1-distill-llama-70b-specdec", description="Groq model to use"
23
23
  )
24
- max_tokens: int = Field(default=1024, description="Maximum tokens in response")
24
+ max_tokens: int = Field(default=131072, description="Maximum tokens in response")
25
25
  temperature: float = Field(
26
26
  default=0.7, description="Temperature for response generation", ge=0, le=1
27
27
  )
@@ -14,7 +14,7 @@ class OllamaInput(InputSchema):
14
14
  description="System prompt to guide the model's behavior",
15
15
  )
16
16
  model: str = Field(default="llama2", description="Ollama model to use")
17
- max_tokens: int = Field(default=1024, description="Maximum tokens in response")
17
+ max_tokens: int = Field(default=131072, description="Maximum tokens in response")
18
18
  temperature: float = Field(
19
19
  default=0.7, description="Temperature for response generation", ge=0, le=1
20
20
  )
@@ -1,3 +1,5 @@
1
+ """OpenAI API integration."""
2
+
1
3
  from .skills import (
2
4
  OpenAIChatSkill,
3
5
  OpenAIInput,
@@ -10,6 +12,12 @@ from .skills import (
10
12
  OpenAIEmbeddingsOutput,
11
13
  )
12
14
  from .credentials import OpenAICredentials
15
+ from .list_models import (
16
+ OpenAIListModelsSkill,
17
+ OpenAIListModelsInput,
18
+ OpenAIListModelsOutput,
19
+ OpenAIModel,
20
+ )
13
21
 
14
22
  __all__ = [
15
23
  "OpenAIChatSkill",
@@ -22,4 +30,8 @@ __all__ = [
22
30
  "OpenAIEmbeddingsSkill",
23
31
  "OpenAIEmbeddingsInput",
24
32
  "OpenAIEmbeddingsOutput",
33
+ "OpenAIListModelsSkill",
34
+ "OpenAIListModelsInput",
35
+ "OpenAIListModelsOutput",
36
+ "OpenAIModel",
25
37
  ]
@@ -17,7 +17,7 @@ class ChineseAssistantInput(OpenAIInput):
17
17
  description="System prompt in Chinese",
18
18
  )
19
19
  model: str = Field(default="gpt-4o", description="OpenAI model to use")
20
- max_tokens: int = Field(default=8096, description="Maximum tokens in response")
20
+ max_tokens: int = Field(default=131072, description="Maximum tokens in response")
21
21
  temperature: float = Field(
22
22
  default=0.7, description="Temperature for response generation", ge=0, le=1
23
23
  )
@@ -0,0 +1,112 @@
1
+ from typing import Optional, List, Dict, Any
2
+ from pydantic import Field
3
+
4
+ from airtrain.core.skills import Skill, ProcessingError
5
+ from airtrain.core.schemas import InputSchema, OutputSchema
6
+ from .credentials import OpenAICredentials
7
+ from .models_config import OPENAI_MODELS, OpenAIModelConfig
8
+
9
+
10
+ class OpenAIModel:
11
+ """Class to represent an OpenAI model."""
12
+
13
+ def __init__(self, model_id: str, config: OpenAIModelConfig):
14
+ """Initialize the OpenAI model."""
15
+ self.id = model_id
16
+ self.display_name = config.display_name
17
+ self.base_model = config.base_model
18
+ self.input_price = config.input_price
19
+ self.cached_input_price = config.cached_input_price
20
+ self.output_price = config.output_price
21
+
22
+ def dict(self, exclude_none=False):
23
+ """Convert the model to a dictionary."""
24
+ result = {
25
+ "id": self.id,
26
+ "display_name": self.display_name,
27
+ "base_model": self.base_model,
28
+ "input_price": float(self.input_price),
29
+ "output_price": float(self.output_price),
30
+ }
31
+ if self.cached_input_price is not None:
32
+ result["cached_input_price"] = float(self.cached_input_price)
33
+ elif not exclude_none:
34
+ result["cached_input_price"] = None
35
+ return result
36
+
37
+
38
+ class OpenAIListModelsInput(InputSchema):
39
+ """Schema for OpenAI list models input"""
40
+
41
+ api_models_only: bool = Field(
42
+ default=False,
43
+ description=(
44
+ "If True, fetch models from the API only. If False, use local config."
45
+ )
46
+ )
47
+
48
+
49
+ class OpenAIListModelsOutput(OutputSchema):
50
+ """Schema for OpenAI list models output"""
51
+
52
+ models: List[Dict[str, Any]] = Field(
53
+ default_factory=list,
54
+ description="List of OpenAI models"
55
+ )
56
+
57
+
58
+ class OpenAIListModelsSkill(Skill[OpenAIListModelsInput, OpenAIListModelsOutput]):
59
+ """Skill for listing OpenAI models"""
60
+
61
+ input_schema = OpenAIListModelsInput
62
+ output_schema = OpenAIListModelsOutput
63
+
64
+ def __init__(self, credentials: Optional[OpenAICredentials] = None):
65
+ """Initialize the skill with optional credentials"""
66
+ super().__init__()
67
+ self.credentials = credentials
68
+
69
+ def process(
70
+ self, input_data: OpenAIListModelsInput
71
+ ) -> OpenAIListModelsOutput:
72
+ """Process the input and return a list of models."""
73
+ try:
74
+ models = []
75
+
76
+ if input_data.api_models_only:
77
+ # Fetch models from OpenAI API - requires credentials
78
+ if not self.credentials:
79
+ raise ProcessingError(
80
+ "OpenAI credentials required for API models"
81
+ )
82
+
83
+ from openai import OpenAI
84
+ client = OpenAI(
85
+ api_key=self.credentials.openai_api_key.get_secret_value(),
86
+ organization=self.credentials.openai_organization_id,
87
+ )
88
+
89
+ # Make API call to get models
90
+ response = client.models.list()
91
+
92
+ # Convert response to our format
93
+ for model in response.data:
94
+ models.append({
95
+ "id": model.id,
96
+ "display_name": model.id, # API doesn't provide display_name
97
+ "base_model": model.id, # API doesn't provide base_model
98
+ "created": model.created,
99
+ "owned_by": model.owned_by,
100
+ # Pricing info not available from API
101
+ })
102
+ else:
103
+ # Use local model config - no credentials needed
104
+ for model_id, config in OPENAI_MODELS.items():
105
+ model = OpenAIModel(model_id, config)
106
+ models.append(model.dict())
107
+
108
+ # Return the output
109
+ return OpenAIListModelsOutput(models=models)
110
+
111
+ except Exception as e:
112
+ raise ProcessingError(f"Failed to list OpenAI models: {str(e)}")
@@ -29,7 +29,7 @@ class OpenAIInput(InputSchema):
29
29
  default=0.7, description="Temperature for response generation", ge=0, le=1
30
30
  )
31
31
  max_tokens: Optional[int] = Field(
32
- default=None, description="Maximum tokens in response"
32
+ default=131072, description="Maximum tokens in response"
33
33
  )
34
34
  stream: bool = Field(
35
35
  default=False,
@@ -62,13 +62,12 @@ class TogetherListModelsSkill(Skill[TogetherListModelsInput, TogetherListModelsO
62
62
 
63
63
  # Convert the models to TogetherModel objects
64
64
  models = []
65
- for model_data in result.get("data", []):
65
+ for model_data in result:
66
66
  models.append(TogetherModel(**model_data))
67
67
 
68
68
  # Return the output
69
69
  return TogetherListModelsOutput(
70
70
  data=models,
71
- object=result.get("object")
72
71
  )
73
72
 
74
73
  except requests.RequestException as e:
@@ -13,7 +13,7 @@ TOGETHER_MODELS: Dict[str, ModelConfig] = {
13
13
  "deepseek-ai/DeepSeek-R1": ModelConfig(
14
14
  organization="DeepSeek",
15
15
  display_name="DeepSeek-R1",
16
- context_length=32768,
16
+ context_length=131072,
17
17
  quantization="FP8",
18
18
  ),
19
19
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B": ModelConfig(
@@ -37,7 +37,7 @@ TOGETHER_MODELS: Dict[str, ModelConfig] = {
37
37
  "deepseek-ai/DeepSeek-V3": ModelConfig(
38
38
  organization="DeepSeek",
39
39
  display_name="DeepSeek-V3",
40
- context_length=16384,
40
+ context_length=131072,
41
41
  quantization="FP8",
42
42
  ),
43
43
  # Meta Models
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: airtrain
3
- Version: 0.1.41
3
+ Version: 0.1.44
4
4
  Summary: A platform for building and deploying AI agents with structured skills
5
5
  Home-page: https://github.com/rosaboyle/airtrain.dev
6
6
  Author: Dheeraj Pai
@@ -1,4 +1,4 @@
1
- airtrain/__init__.py,sha256=KrDT-Fn1iruBhdnav_vK-wkcO6-ZfwcOCDukwElOxEU,2099
1
+ airtrain/__init__.py,sha256=_i8k5dricoc_30LRe5yFCrObuhrmuImelQnbT_Kkmmw,2099
2
2
  airtrain/__main__.py,sha256=EU8ffFmCdC1G-UcHHt0Oo3lB1PGqfC6kwzH39CnYSwU,72
3
3
  airtrain/builder/__init__.py,sha256=D33sr0k_WAe6FAJkk8rUaivEzFaeVqLXkQgyFWEhfPU,110
4
4
  airtrain/builder/agent_builder.py,sha256=3XnGUAcK_6lWoUDtL0TanliQZuh7u0unhNbnrz1z2-I,5018
@@ -14,41 +14,43 @@ airtrain/core/credentials.py,sha256=PgQotrQc46J5djidKnkK1znUv3fyNkUFDO-m2Kn_Gzo,
14
14
  airtrain/core/schemas.py,sha256=MMXrDviC4gRea_QaPpbjgO--B_UKxnD7YrxqZOLJZZU,7003
15
15
  airtrain/core/skills.py,sha256=LljalzeSHK5eQPTAOEAYc5D8Qn1kVSfiz9WgziTD5UM,4688
16
16
  airtrain/integrations/__init__.py,sha256=rk9QFl0Dd7Qp4rULhi_u4smwsJwk69Kg_-fv0GQ43iw,1782
17
- airtrain/integrations/anthropic/__init__.py,sha256=F4kB5fuj7nYgTVcgzeHGc91LT96FZfsCJVBVCnTRh-k,541
17
+ airtrain/integrations/anthropic/__init__.py,sha256=K741w3v7fWsCknTo38ARqDL0D3HPlwDIvDuuBao9Tto,800
18
18
  airtrain/integrations/anthropic/credentials.py,sha256=hlTSw9HX66kYNaeQUtn0JjdZQBMNkzzFOJOoLOOzvcY,1246
19
+ airtrain/integrations/anthropic/list_models.py,sha256=o7FABp0Cq3gs76zOF-CM9ohmYslWT6vK9qtQabV9XzI,3973
19
20
  airtrain/integrations/anthropic/models_config.py,sha256=TZt31hLcT-9YK-NxqiarMyOwvUWMgXAzAcPfSwzDSiQ,3347
20
- airtrain/integrations/anthropic/skills.py,sha256=WV-9254H2VqUAq_7Zr1xG5IhejeC_gQSqyH0hwW1_tY,5870
21
+ airtrain/integrations/anthropic/skills.py,sha256=bs9OYHeY1ETsvXt60vq6xNj2PQK_sBULnaVGvSQ9vXQ,5872
21
22
  airtrain/integrations/aws/__init__.py,sha256=3x7v2NxpAfI-U-YgwQeH5PtsmUrNLPMfLyUGFLiBjbs,155
22
23
  airtrain/integrations/aws/credentials.py,sha256=nN-daKAl7qOb_VdRpsThG8gN5GeSUkx-ji5E_gF_vYw,1444
23
- airtrain/integrations/aws/skills.py,sha256=TQiMXeXRRcJ14fe8Xi7Uk20iS6_INbcznuLGtMorcKY,3870
24
+ airtrain/integrations/aws/skills.py,sha256=2l16Y5zYeNd9trrPca6Rbhvl6a-GJBuCQMu7RqX9txo,3872
24
25
  airtrain/integrations/cerebras/__init__.py,sha256=zAD-qV38OzHhMCz1z-NvjjqcYEhURbm8RWTOKHNqbew,174
25
26
  airtrain/integrations/cerebras/credentials.py,sha256=KDEH4r8FGT68L9p34MLZWK65wq_a703pqIF3ODaSbts,694
26
- airtrain/integrations/cerebras/skills.py,sha256=hmqcnF-nkFk5YJVf8f-TiKBfb8kYCfnC30W67VZ7CKU,4922
27
+ airtrain/integrations/cerebras/skills.py,sha256=BJEb_7TglCYAukD3kcx37R8ibnJWdxVrBrwf3ZTYP-4,4924
27
28
  airtrain/integrations/fireworks/__init__.py,sha256=GstUg0rYC-7Pg0DVbDXwL5eO1hp3WCSfroWazbGpfi0,545
28
- airtrain/integrations/fireworks/completion_skills.py,sha256=G657xWd7izLOxXq7RmqdupBF4DHqXQgXuhQ-MW7mtqc,5613
29
- airtrain/integrations/fireworks/conversation_manager.py,sha256=m6VEHijqpYEYawkKhuHtb8RQxw4kxGWFWdbSK6zGuro,3704
29
+ airtrain/integrations/fireworks/completion_skills.py,sha256=zxx7aNlum9scQMri5Ek0qN8VfAomhyUp3u8JJo_AFWM,5615
30
+ airtrain/integrations/fireworks/conversation_manager.py,sha256=ifscKHYKWM_NDElin-oTzpRhyoh6pzBnklmMuH5geOY,3706
30
31
  airtrain/integrations/fireworks/credentials.py,sha256=eeV9y_4pTe8LZX02I7kfA_YNY2D7MSbFl7JEZVn22zQ,864
31
32
  airtrain/integrations/fireworks/list_models.py,sha256=o4fP0K3qstBopO7va2LysLp4_KUf5Iz_YROrYkaNtVs,4686
32
33
  airtrain/integrations/fireworks/models.py,sha256=yo4xtweSi4qQftg04r4naRddx3KjU9Jluzqf5C7V9f4,4626
33
- airtrain/integrations/fireworks/requests_skills.py,sha256=c84Vy_4EcBrwJfp3jqizzlcja_LsEtvWh59qiaIjukg,8233
34
- airtrain/integrations/fireworks/skills.py,sha256=o9OY69cC10P8BtBBYRYLCyR_GwxmNlF6YhnrXiNS53o,7154
35
- airtrain/integrations/fireworks/structured_completion_skills.py,sha256=-AJTaOFC8vkFiEjHW24VL8ymcNSVbhZp6xb4enkL95U,6620
36
- airtrain/integrations/fireworks/structured_requests_skills.py,sha256=FgUdWb6_GI2ZBWhK2wp-WqKZUkwCkKNBBjYcRkHtjog,11850
37
- airtrain/integrations/fireworks/structured_skills.py,sha256=BZaLqSOTC11QdZ4kDORS4JnwF_YXBAa-IiwQ5dJiHXw,3895
34
+ airtrain/integrations/fireworks/requests_skills.py,sha256=h6HRV5dGvV7t3zyjD-awW47RyeDbu8onNevhcgSSy94,8235
35
+ airtrain/integrations/fireworks/skills.py,sha256=Ns1tXXTVtTeeVYadzm4dnmmOboo430WTMu2o56oWTDc,7156
36
+ airtrain/integrations/fireworks/structured_completion_skills.py,sha256=airYakYWXzYRS9nfNfrH90N3eeN8YW7GaY3ygLSiBO8,6622
37
+ airtrain/integrations/fireworks/structured_requests_skills.py,sha256=uQR-nygtWmdGTwvU-aUdMNOMit_PiBVPYRa80ZloHLs,11852
38
+ airtrain/integrations/fireworks/structured_skills.py,sha256=1wZ_7QDUhKWCSv_1lSEF6VnAqEeEA3jWHq7n0fWicgw,3897
38
39
  airtrain/integrations/google/__init__.py,sha256=ElwgcXfbg_gGMm6zbkMXCQPFKZUb-yTJk986o19A7Cs,214
39
40
  airtrain/integrations/google/credentials.py,sha256=KSvWNqW8Mjr4MkysRvUqlrOSGdShNIe5u2OPO6vRrWY,2047
40
41
  airtrain/integrations/google/skills.py,sha256=ytsoksCY4qbfRO9Brnxhc2694fAj0ytnHX20SXS_FOM,4547
41
42
  airtrain/integrations/groq/__init__.py,sha256=B_X2fXbsJfFD6GquKeVCsEJjwd9Ygbq1uEHlV4Jy7YE,154
42
43
  airtrain/integrations/groq/credentials.py,sha256=bdTHykcIeaQ7td8KZlQBPfEFAkvJuxk2f_cbTLPD_I4,844
43
- airtrain/integrations/groq/skills.py,sha256=qFyxC_2xZYnByAPo5p2aHbrqhdHYCoIdvDRAauSfnjk,4821
44
+ airtrain/integrations/groq/skills.py,sha256=XNwGE2fjb9MDth3NI5uqSiEQYLsLBuCFF7YTu_xoTug,4823
44
45
  airtrain/integrations/ollama/__init__.py,sha256=zMHBsGzViVrvxAeJmfq6r-ZfSE6Dy5QcKLhe4d5fEcM,164
45
46
  airtrain/integrations/ollama/credentials.py,sha256=D7O4kUAb_VHs5s1ncUN9Ezhu5PvLfgj3RifAkB9sEZk,940
46
- airtrain/integrations/ollama/skills.py,sha256=M_Un8D5VJ5XtPEq9IClzqV3jCPBoFTSm2ve6EO8W2JU,1556
47
- airtrain/integrations/openai/__init__.py,sha256=w5V7lxvrKtrrjyqGoppEKg9ORKKQ2cxaLOpgCZdm_H8,541
48
- airtrain/integrations/openai/chinese_assistant.py,sha256=MMhv4NBOoEQ0O22ZZtP255rd5ajHC9l6FPWIjpqxBOA,1581
47
+ airtrain/integrations/ollama/skills.py,sha256=QHEvIrFmuwFuC3ZAmy6xL2hNNGZWii1z9Y884JuOvnI,1558
48
+ airtrain/integrations/openai/__init__.py,sha256=LYLxgDOAMUhw0ChBqj7XAJSTMNt9JiS2hHJDnJWS6IE,807
49
+ airtrain/integrations/openai/chinese_assistant.py,sha256=F8bMeUUDly7BYG6wO648cAEIj5S_frVE5tm1Xno63Gc,1583
49
50
  airtrain/integrations/openai/credentials.py,sha256=NfRyp1QgEtgm8cxt2-BOLq-6d0X-Pcm80NnfHM8p0FY,1470
51
+ airtrain/integrations/openai/list_models.py,sha256=vg8pZwLZ3F2Fx42X18WykpJOzZD9JG-2KJi49XWgSKo,4121
50
52
  airtrain/integrations/openai/models_config.py,sha256=W9mu_z9tCC4ZUKHSJ6Hk4X09TRZLqEhT7TtRY5JEk5g,8007
51
- airtrain/integrations/openai/skills.py,sha256=1dvRJYrnU2hOmGRlkHBtyR6P8D7aIwHZfUKmjlReWrQ,12821
53
+ airtrain/integrations/openai/skills.py,sha256=kEDe5q0Zlv_X-JGOYtb552ktb3aQQYVUYczVwMH0jxA,12823
52
54
  airtrain/integrations/sambanova/__init__.py,sha256=dp_263iOckM_J9pOEvyqpf3FrejD6-_x33r0edMCTe0,179
53
55
  airtrain/integrations/sambanova/credentials.py,sha256=JyN8sbMCoXuXAjim46aI3LTicBijoemS7Ao0rn4yBJU,824
54
56
  airtrain/integrations/sambanova/skills.py,sha256=SZ_GAimMiOCILiNkzyhNflyRR6bdC5r0Tnog19K8geU,4997
@@ -58,16 +60,16 @@ airtrain/integrations/together/credentials.py,sha256=cYNhyIwgsxm8LfiFfT-omBvgV3m
58
60
  airtrain/integrations/together/embedding_models_config.py,sha256=F0ISAXCG_Pcnf-ojkvZwIXacXD8LaU8hQmGHCFzmlds,2927
59
61
  airtrain/integrations/together/image_models_config.py,sha256=JlCozrphI9zE4uYpGfj4DCWSN6GZGyr84Tb1HmjNQ28,2455
60
62
  airtrain/integrations/together/image_skill.py,sha256=wQ8wSzfL-QHpM_esYGLNXf8ciOPPsz-QJw6zSrxZT68,5214
61
- airtrain/integrations/together/list_models.py,sha256=BFq_w3Rz9WP2gKIaQNNIyUJUaYkz-FCSEbNMClccrsY,2580
63
+ airtrain/integrations/together/list_models.py,sha256=_QLGqweBiK6saz3n4xTQRmXSSs-qGFnV9kma-MSaE9o,2520
62
64
  airtrain/integrations/together/models.py,sha256=q5KsouOK7IvyzGZ7nhSjTpZw-CcLfPghJr6o_UU9uMo,3652
63
- airtrain/integrations/together/models_config.py,sha256=XMKp0Oq1nWWnMMdNAZxkFXmJaURwWrwLE18kFXsMsRw,8829
65
+ airtrain/integrations/together/models_config.py,sha256=lCPouXjKa49lebGbMaaL2SU-CMYxOc-dJviUOir2e_w,8831
64
66
  airtrain/integrations/together/rerank_models_config.py,sha256=coCg0IOG2tU4L2uc2uPtPdoBwGjSc_zQwxENwdDuwHE,1188
65
67
  airtrain/integrations/together/rerank_skill.py,sha256=gjH24hLWCweWKPyyfKZMG3K_g9gWzm80WgiJNjkA9eg,1894
66
68
  airtrain/integrations/together/schemas.py,sha256=pBMrbX67oxPCr-sg4K8_Xqu1DWbaC4uLCloVSascROg,1210
67
69
  airtrain/integrations/together/skills.py,sha256=8DwkexMJu1Gm6QmNDfNasYStQ31QsXBbFP99zR-YCf0,7598
68
70
  airtrain/integrations/together/vision_models_config.py,sha256=m28HwYDk2Kup_J-a1FtynIa2ZVcbl37kltfoHnK8zxs,1544
69
- airtrain-0.1.41.dist-info/METADATA,sha256=mOYF47bkfI4rQJFBkcmnHZ47u1Pqh6e_S8-4Ps3KmGg,5375
70
- airtrain-0.1.41.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
71
- airtrain-0.1.41.dist-info/entry_points.txt,sha256=rrJ36IUsyq6n1dSfTWXqVAgpQLPRWDfCqwd6_3B-G0U,52
72
- airtrain-0.1.41.dist-info/top_level.txt,sha256=cFWW1vY6VMCb3AGVdz6jBDpZ65xxBRSqlsPyySxTkxY,9
73
- airtrain-0.1.41.dist-info/RECORD,,
71
+ airtrain-0.1.44.dist-info/METADATA,sha256=EV6dwjtrrrN_YvL7KpCP9FNGpjZYNGWVwXN7tzP-czw,5375
72
+ airtrain-0.1.44.dist-info/WHEEL,sha256=tTnHoFhvKQHCh4jz3yCn0WPTYIy7wXx3CJtJ7SJGV7c,91
73
+ airtrain-0.1.44.dist-info/entry_points.txt,sha256=rrJ36IUsyq6n1dSfTWXqVAgpQLPRWDfCqwd6_3B-G0U,52
74
+ airtrain-0.1.44.dist-info/top_level.txt,sha256=cFWW1vY6VMCb3AGVdz6jBDpZ65xxBRSqlsPyySxTkxY,9
75
+ airtrain-0.1.44.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (76.1.0)
2
+ Generator: setuptools (77.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5