airtrain 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- airtrain/__init__.py +9 -0
- airtrain/core/__init__.py +7 -0
- airtrain/core/credentials.py +125 -0
- airtrain/core/schemas.py +237 -0
- airtrain/core/skills.py +167 -0
- airtrain/integrations/__init__.py +47 -0
- airtrain/integrations/anthropic/__init__.py +11 -0
- airtrain/integrations/anthropic/credentials.py +32 -0
- airtrain/integrations/anthropic/skills.py +135 -0
- airtrain/integrations/aws/__init__.py +6 -0
- airtrain/integrations/aws/credentials.py +36 -0
- airtrain/integrations/aws/skills.py +98 -0
- airtrain/integrations/cerebras/__init__.py +6 -0
- airtrain/integrations/cerebras/credentials.py +22 -0
- airtrain/integrations/cerebras/skills.py +41 -0
- airtrain/integrations/google/__init__.py +6 -0
- airtrain/integrations/google/credentials.py +27 -0
- airtrain/integrations/google/skills.py +41 -0
- airtrain/integrations/groq/__init__.py +6 -0
- airtrain/integrations/groq/credentials.py +24 -0
- airtrain/integrations/groq/skills.py +41 -0
- airtrain/integrations/ollama/__init__.py +6 -0
- airtrain/integrations/ollama/credentials.py +26 -0
- airtrain/integrations/ollama/skills.py +41 -0
- airtrain/integrations/openai/__init__.py +19 -0
- airtrain/integrations/openai/chinese_assistant.py +42 -0
- airtrain/integrations/openai/credentials.py +39 -0
- airtrain/integrations/openai/skills.py +208 -0
- airtrain/integrations/sambanova/__init__.py +6 -0
- airtrain/integrations/sambanova/credentials.py +20 -0
- airtrain/integrations/sambanova/skills.py +41 -0
- airtrain/integrations/together/__init__.py +6 -0
- airtrain/integrations/together/credentials.py +22 -0
- airtrain/integrations/together/skills.py +43 -0
- airtrain-0.1.10.dist-info/METADATA +168 -0
- airtrain-0.1.10.dist-info/RECORD +38 -0
- airtrain-0.1.10.dist-info/WHEEL +5 -0
- airtrain-0.1.10.dist-info/top_level.txt +1 -0
@@ -0,0 +1,39 @@
|
|
1
|
+
from datetime import datetime, timedelta
|
2
|
+
from typing import Optional
|
3
|
+
from pydantic import Field, SecretStr, validator
|
4
|
+
from openai import OpenAI
|
5
|
+
|
6
|
+
from airtrain.core.credentials import BaseCredentials, CredentialValidationError
|
7
|
+
|
8
|
+
|
9
|
+
class OpenAICredentials(BaseCredentials):
|
10
|
+
"""OpenAI API credentials with enhanced validation"""
|
11
|
+
|
12
|
+
openai_api_key: SecretStr = Field(..., description="OpenAI API key")
|
13
|
+
openai_organization_id: Optional[str] = Field(
|
14
|
+
None, description="OpenAI organization ID", pattern="^org-[A-Za-z0-9]{24}$"
|
15
|
+
)
|
16
|
+
|
17
|
+
_required_credentials = {"openai_api_key"}
|
18
|
+
|
19
|
+
@validator("openai_api_key")
|
20
|
+
def validate_api_key_format(cls, v: SecretStr) -> SecretStr:
|
21
|
+
key = v.get_secret_value()
|
22
|
+
if not key.startswith("sk-"):
|
23
|
+
raise ValueError("OpenAI API key must start with 'sk-'")
|
24
|
+
if len(key) < 40:
|
25
|
+
raise ValueError("OpenAI API key appears to be too short")
|
26
|
+
return v
|
27
|
+
|
28
|
+
async def validate_credentials(self) -> bool:
|
29
|
+
"""Validate credentials by making a test API call"""
|
30
|
+
try:
|
31
|
+
client = OpenAI(
|
32
|
+
api_key=self.openai_api_key.get_secret_value(),
|
33
|
+
organization=self.openai_organization_id,
|
34
|
+
)
|
35
|
+
# Make minimal API call to validate
|
36
|
+
await client.models.list(limit=1)
|
37
|
+
return True
|
38
|
+
except Exception as e:
|
39
|
+
raise CredentialValidationError(f"Invalid OpenAI credentials: {str(e)}")
|
@@ -0,0 +1,208 @@
|
|
1
|
+
from typing import List, Optional, Dict, Any, TypeVar, Type
|
2
|
+
from pydantic import Field, BaseModel
|
3
|
+
from openai import OpenAI
|
4
|
+
import base64
|
5
|
+
from pathlib import Path
|
6
|
+
from loguru import logger
|
7
|
+
|
8
|
+
from airtrain.core.skills import Skill, ProcessingError
|
9
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
10
|
+
from .credentials import OpenAICredentials
|
11
|
+
|
12
|
+
|
13
|
+
class OpenAIInput(InputSchema):
|
14
|
+
"""Schema for OpenAI chat input"""
|
15
|
+
|
16
|
+
user_input: str = Field(..., description="User's input text")
|
17
|
+
system_prompt: str = Field(
|
18
|
+
default="You are a helpful assistant.",
|
19
|
+
description="System prompt to guide the model's behavior",
|
20
|
+
)
|
21
|
+
model: str = Field(default="gpt-4o", description="OpenAI model to use")
|
22
|
+
max_tokens: int = Field(default=8192, description="Maximum tokens in response")
|
23
|
+
temperature: float = Field(
|
24
|
+
default=0.2, description="Temperature for response generation", ge=0, le=1
|
25
|
+
)
|
26
|
+
images: Optional[List[Path]] = Field(
|
27
|
+
default=None,
|
28
|
+
description="Optional list of image paths to include in the message",
|
29
|
+
)
|
30
|
+
functions: Optional[List[Dict[str, Any]]] = Field(
|
31
|
+
default=None,
|
32
|
+
description="Optional function definitions for function calling",
|
33
|
+
)
|
34
|
+
function_call: Optional[str] = Field(
|
35
|
+
default=None,
|
36
|
+
description="Controls function calling behavior",
|
37
|
+
)
|
38
|
+
|
39
|
+
|
40
|
+
class OpenAIOutput(OutputSchema):
|
41
|
+
"""Schema for OpenAI chat output"""
|
42
|
+
|
43
|
+
response: str = Field(..., description="Model's response text")
|
44
|
+
used_model: str = Field(..., description="Model used for generation")
|
45
|
+
usage: Dict[str, Any] = Field(
|
46
|
+
default_factory=dict, description="Usage statistics from the API"
|
47
|
+
)
|
48
|
+
function_call: Optional[Dict[str, Any]] = Field(
|
49
|
+
default=None, description="Function call information if applicable"
|
50
|
+
)
|
51
|
+
|
52
|
+
|
53
|
+
class OpenAIChatSkill(Skill[OpenAIInput, OpenAIOutput]):
|
54
|
+
"""Skill for interacting with OpenAI's models"""
|
55
|
+
|
56
|
+
input_schema = OpenAIInput
|
57
|
+
output_schema = OpenAIOutput
|
58
|
+
|
59
|
+
def __init__(self, credentials: Optional[OpenAICredentials] = None):
|
60
|
+
"""Initialize the skill with optional credentials"""
|
61
|
+
super().__init__()
|
62
|
+
self.credentials = credentials or OpenAICredentials.from_env()
|
63
|
+
self.client = OpenAI(
|
64
|
+
api_key=self.credentials.openai_api_key.get_secret_value(),
|
65
|
+
organization=self.credentials.openai_organization_id,
|
66
|
+
)
|
67
|
+
|
68
|
+
def _encode_image(self, image_path: Path) -> Dict[str, Any]:
|
69
|
+
"""Convert image to base64 for API consumption"""
|
70
|
+
try:
|
71
|
+
if not image_path.exists():
|
72
|
+
raise FileNotFoundError(f"Image file not found: {image_path}")
|
73
|
+
|
74
|
+
with open(image_path, "rb") as img_file:
|
75
|
+
encoded = base64.b64encode(img_file.read()).decode()
|
76
|
+
return {
|
77
|
+
"type": "image_url",
|
78
|
+
"image_url": {"url": f"data:image/jpeg;base64,{encoded}"},
|
79
|
+
}
|
80
|
+
except Exception as e:
|
81
|
+
logger.error(f"Failed to encode image {image_path}: {str(e)}")
|
82
|
+
raise ProcessingError(f"Image encoding failed: {str(e)}")
|
83
|
+
|
84
|
+
def process(self, input_data: OpenAIInput) -> OpenAIOutput:
|
85
|
+
"""Process the input using OpenAI's API"""
|
86
|
+
try:
|
87
|
+
logger.info(f"Processing request with model {input_data.model}")
|
88
|
+
|
89
|
+
# Prepare message content
|
90
|
+
content = []
|
91
|
+
|
92
|
+
# Add text content
|
93
|
+
content.append({"type": "text", "text": input_data.user_input})
|
94
|
+
|
95
|
+
# Add images if provided
|
96
|
+
if input_data.images:
|
97
|
+
logger.debug(f"Processing {len(input_data.images)} images")
|
98
|
+
for image_path in input_data.images:
|
99
|
+
content.append(self._encode_image(image_path))
|
100
|
+
|
101
|
+
# Prepare messages
|
102
|
+
messages = [
|
103
|
+
{"role": "system", "content": input_data.system_prompt},
|
104
|
+
{"role": "user", "content": content},
|
105
|
+
]
|
106
|
+
|
107
|
+
# Create completion parameters
|
108
|
+
params = {
|
109
|
+
"model": input_data.model,
|
110
|
+
"messages": messages,
|
111
|
+
"temperature": input_data.temperature,
|
112
|
+
"max_tokens": input_data.max_tokens,
|
113
|
+
}
|
114
|
+
|
115
|
+
# Add function calling if provided
|
116
|
+
if input_data.functions:
|
117
|
+
params["functions"] = input_data.functions
|
118
|
+
params["function_call"] = input_data.function_call
|
119
|
+
|
120
|
+
# Create chat completion
|
121
|
+
response = self.client.chat.completions.create(**params)
|
122
|
+
|
123
|
+
# Extract function call if present
|
124
|
+
function_call = None
|
125
|
+
if response.choices[0].message.function_call:
|
126
|
+
function_call = response.choices[0].message.function_call.model_dump()
|
127
|
+
|
128
|
+
logger.success("Successfully processed OpenAI request")
|
129
|
+
|
130
|
+
return OpenAIOutput(
|
131
|
+
response=response.choices[0].message.content or "",
|
132
|
+
used_model=response.model,
|
133
|
+
usage={
|
134
|
+
"prompt_tokens": response.usage.prompt_tokens,
|
135
|
+
"completion_tokens": response.usage.completion_tokens,
|
136
|
+
"total_tokens": response.usage.total_tokens,
|
137
|
+
},
|
138
|
+
function_call=function_call,
|
139
|
+
)
|
140
|
+
|
141
|
+
except Exception as e:
|
142
|
+
logger.exception(f"OpenAI processing failed: {str(e)}")
|
143
|
+
raise ProcessingError(f"OpenAI processing failed: {str(e)}")
|
144
|
+
|
145
|
+
|
146
|
+
ResponseT = TypeVar("ResponseT", bound=BaseModel)
|
147
|
+
|
148
|
+
|
149
|
+
class OpenAIParserInput(InputSchema):
|
150
|
+
"""Schema for OpenAI structured output input"""
|
151
|
+
|
152
|
+
user_input: str
|
153
|
+
system_prompt: str = "You are a helpful assistant that provides structured data."
|
154
|
+
model: str = "gpt-4o"
|
155
|
+
temperature: float = 0.7
|
156
|
+
max_tokens: Optional[int] = None
|
157
|
+
response_model: Type[ResponseT]
|
158
|
+
|
159
|
+
class Config:
|
160
|
+
arbitrary_types_allowed = True
|
161
|
+
|
162
|
+
|
163
|
+
class OpenAIParserOutput(OutputSchema):
|
164
|
+
"""Schema for OpenAI structured output"""
|
165
|
+
|
166
|
+
parsed_response: BaseModel
|
167
|
+
used_model: str
|
168
|
+
tokens_used: int
|
169
|
+
|
170
|
+
|
171
|
+
class OpenAIParserSkill(Skill[OpenAIParserInput, OpenAIParserOutput]):
|
172
|
+
"""Skill for getting structured responses from OpenAI"""
|
173
|
+
|
174
|
+
input_schema = OpenAIParserInput
|
175
|
+
output_schema = OpenAIParserOutput
|
176
|
+
|
177
|
+
def __init__(self, credentials: Optional[OpenAICredentials] = None):
|
178
|
+
"""Initialize the skill with optional credentials"""
|
179
|
+
super().__init__()
|
180
|
+
self.credentials = credentials or OpenAICredentials.from_env()
|
181
|
+
self.client = OpenAI(
|
182
|
+
api_key=self.credentials.openai_api_key.get_secret_value(),
|
183
|
+
organization=self.credentials.openai_organization_id,
|
184
|
+
)
|
185
|
+
|
186
|
+
def process(self, input_data: OpenAIParserInput) -> OpenAIParserOutput:
|
187
|
+
try:
|
188
|
+
# Use parse method instead of create
|
189
|
+
completion = self.client.beta.chat.completions.parse(
|
190
|
+
model=input_data.model,
|
191
|
+
messages=[
|
192
|
+
{"role": "system", "content": input_data.system_prompt},
|
193
|
+
{"role": "user", "content": input_data.user_input},
|
194
|
+
],
|
195
|
+
response_format=input_data.response_model,
|
196
|
+
)
|
197
|
+
|
198
|
+
if completion.choices[0].message.parsed is None:
|
199
|
+
raise ProcessingError("Failed to parse response")
|
200
|
+
|
201
|
+
return OpenAIParserOutput(
|
202
|
+
parsed_response=completion.choices[0].message.parsed,
|
203
|
+
used_model=completion.model,
|
204
|
+
tokens_used=completion.usage.total_tokens,
|
205
|
+
)
|
206
|
+
|
207
|
+
except Exception as e:
|
208
|
+
raise ProcessingError(f"OpenAI parsing failed: {str(e)}")
|
@@ -0,0 +1,20 @@
|
|
1
|
+
from pydantic import Field, SecretStr, HttpUrl
|
2
|
+
from airtrain.core.credentials import BaseCredentials, CredentialValidationError
|
3
|
+
|
4
|
+
|
5
|
+
class SambanovaCredentials(BaseCredentials):
|
6
|
+
"""SambaNova credentials"""
|
7
|
+
|
8
|
+
api_key: SecretStr = Field(..., description="SambaNova API key")
|
9
|
+
endpoint_url: HttpUrl = Field(..., description="SambaNova API endpoint")
|
10
|
+
|
11
|
+
_required_credentials = {"api_key", "endpoint_url"}
|
12
|
+
|
13
|
+
async def validate_credentials(self) -> bool:
|
14
|
+
"""Validate SambaNova credentials"""
|
15
|
+
try:
|
16
|
+
# Implement SambaNova-specific validation
|
17
|
+
# This would depend on their API client implementation
|
18
|
+
return True
|
19
|
+
except Exception as e:
|
20
|
+
raise CredentialValidationError(f"Invalid SambaNova credentials: {str(e)}")
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from typing import Optional, Dict, Any
|
2
|
+
from pydantic import Field
|
3
|
+
from airtrain.core.skills import Skill, ProcessingError
|
4
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
5
|
+
from .credentials import SambanovaCredentials
|
6
|
+
|
7
|
+
|
8
|
+
class SambanovaInput(InputSchema):
|
9
|
+
"""Schema for Sambanova input"""
|
10
|
+
|
11
|
+
user_input: str = Field(..., description="User's input text")
|
12
|
+
system_prompt: str = Field(
|
13
|
+
default="You are a helpful assistant.",
|
14
|
+
description="System prompt to guide the model's behavior",
|
15
|
+
)
|
16
|
+
model: str = Field(default="sambanova-llm", description="Sambanova model to use")
|
17
|
+
max_tokens: int = Field(default=1024, description="Maximum tokens in response")
|
18
|
+
temperature: float = Field(
|
19
|
+
default=0.7, description="Temperature for response generation", ge=0, le=1
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
class SambanovaOutput(OutputSchema):
|
24
|
+
"""Schema for Sambanova output"""
|
25
|
+
|
26
|
+
response: str = Field(..., description="Model's response text")
|
27
|
+
used_model: str = Field(..., description="Model used for generation")
|
28
|
+
usage: Dict[str, Any] = Field(default_factory=dict, description="Usage statistics")
|
29
|
+
|
30
|
+
|
31
|
+
class SambanovaChatSkill(Skill[SambanovaInput, SambanovaOutput]):
|
32
|
+
"""Skill for Sambanova - Not Implemented"""
|
33
|
+
|
34
|
+
input_schema = SambanovaInput
|
35
|
+
output_schema = SambanovaOutput
|
36
|
+
|
37
|
+
def __init__(self, credentials: Optional[SambanovaCredentials] = None):
|
38
|
+
raise NotImplementedError("SambanovaChatSkill is not implemented yet")
|
39
|
+
|
40
|
+
def process(self, input_data: SambanovaInput) -> SambanovaOutput:
|
41
|
+
raise NotImplementedError("SambanovaChatSkill is not implemented yet")
|
@@ -0,0 +1,22 @@
|
|
1
|
+
from pydantic import Field, SecretStr
|
2
|
+
from airtrain.core.credentials import BaseCredentials, CredentialValidationError
|
3
|
+
import together
|
4
|
+
|
5
|
+
|
6
|
+
class TogetherAICredentials(BaseCredentials):
|
7
|
+
"""Together AI credentials"""
|
8
|
+
|
9
|
+
api_key: SecretStr = Field(..., description="Together AI API key")
|
10
|
+
|
11
|
+
_required_credentials = {"api_key"}
|
12
|
+
|
13
|
+
async def validate_credentials(self) -> bool:
|
14
|
+
"""Validate Together AI credentials"""
|
15
|
+
try:
|
16
|
+
together.api_key = self.api_key.get_secret_value()
|
17
|
+
await together.Models.list()
|
18
|
+
return True
|
19
|
+
except Exception as e:
|
20
|
+
raise CredentialValidationError(
|
21
|
+
f"Invalid Together AI credentials: {str(e)}"
|
22
|
+
)
|
@@ -0,0 +1,43 @@
|
|
1
|
+
from typing import Optional, Dict, Any
|
2
|
+
from pydantic import Field
|
3
|
+
from airtrain.core.skills import Skill, ProcessingError
|
4
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
5
|
+
from .credentials import TogetherAICredentials
|
6
|
+
|
7
|
+
|
8
|
+
class TogetherAIInput(InputSchema):
|
9
|
+
"""Schema for Together AI input"""
|
10
|
+
|
11
|
+
user_input: str = Field(..., description="User's input text")
|
12
|
+
system_prompt: str = Field(
|
13
|
+
default="You are a helpful assistant.",
|
14
|
+
description="System prompt to guide the model's behavior",
|
15
|
+
)
|
16
|
+
model: str = Field(
|
17
|
+
default="togethercomputer/llama-2-70b", description="Together AI model to use"
|
18
|
+
)
|
19
|
+
max_tokens: int = Field(default=1024, description="Maximum tokens in response")
|
20
|
+
temperature: float = Field(
|
21
|
+
default=0.7, description="Temperature for response generation", ge=0, le=1
|
22
|
+
)
|
23
|
+
|
24
|
+
|
25
|
+
class TogetherAIOutput(OutputSchema):
|
26
|
+
"""Schema for Together AI output"""
|
27
|
+
|
28
|
+
response: str = Field(..., description="Model's response text")
|
29
|
+
used_model: str = Field(..., description="Model used for generation")
|
30
|
+
usage: Dict[str, Any] = Field(default_factory=dict, description="Usage statistics")
|
31
|
+
|
32
|
+
|
33
|
+
class TogetherAIChatSkill(Skill[TogetherAIInput, TogetherAIOutput]):
|
34
|
+
"""Skill for Together AI - Not Implemented"""
|
35
|
+
|
36
|
+
input_schema = TogetherAIInput
|
37
|
+
output_schema = TogetherAIOutput
|
38
|
+
|
39
|
+
def __init__(self, credentials: Optional[TogetherAICredentials] = None):
|
40
|
+
raise NotImplementedError("TogetherAIChatSkill is not implemented yet")
|
41
|
+
|
42
|
+
def process(self, input_data: TogetherAIInput) -> TogetherAIOutput:
|
43
|
+
raise NotImplementedError("TogetherAIChatSkill is not implemented yet")
|
@@ -0,0 +1,168 @@
|
|
1
|
+
Metadata-Version: 2.2
|
2
|
+
Name: airtrain
|
3
|
+
Version: 0.1.10
|
4
|
+
Summary: A platform for building and deploying AI agents with structured skills
|
5
|
+
Home-page: https://github.com/rosaboyle/airtrain.dev
|
6
|
+
Author: Dheeraj Pai
|
7
|
+
Author-email: helloworldcmu@gmail.com
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
9
|
+
Classifier: Intended Audience :: Developers
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
11
|
+
Classifier: Operating System :: OS Independent
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
13
|
+
Classifier: Programming Language :: Python :: 3.8
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
16
|
+
Requires-Python: >=3.8
|
17
|
+
Description-Content-Type: text/markdown
|
18
|
+
Requires-Dist: pydantic>=2.10.6
|
19
|
+
Requires-Dist: openai>=1.60.1
|
20
|
+
Requires-Dist: python-dotenv>=1.0.1
|
21
|
+
Requires-Dist: PyYAML>=6.0.2
|
22
|
+
Requires-Dist: firebase-admin>=6.6.0
|
23
|
+
Requires-Dist: loguru>=0.7.3
|
24
|
+
Requires-Dist: requests>=2.32.3
|
25
|
+
Requires-Dist: boto3>=1.36.6
|
26
|
+
Requires-Dist: together>=1.3.13
|
27
|
+
Requires-Dist: anthropic>=0.45.0
|
28
|
+
Dynamic: author
|
29
|
+
Dynamic: author-email
|
30
|
+
Dynamic: classifier
|
31
|
+
Dynamic: description
|
32
|
+
Dynamic: description-content-type
|
33
|
+
Dynamic: home-page
|
34
|
+
Dynamic: requires-dist
|
35
|
+
Dynamic: requires-python
|
36
|
+
Dynamic: summary
|
37
|
+
|
38
|
+
# Airtrain
|
39
|
+
|
40
|
+
A powerful platform for building and deploying AI agents with structured skills and capabilities.
|
41
|
+
|
42
|
+
## Features
|
43
|
+
|
44
|
+
- **Structured Skills**: Build modular AI skills with defined input/output schemas
|
45
|
+
- **Multiple LLM Integrations**: Built-in support for OpenAI and Anthropic models
|
46
|
+
- **Structured Outputs**: Parse LLM responses into structured Pydantic models
|
47
|
+
- **Credential Management**: Secure handling of API keys and credentials
|
48
|
+
- **Type Safety**: Full type hints and Pydantic model support
|
49
|
+
- **Image Support**: Handle image inputs for multimodal models
|
50
|
+
- **Error Handling**: Robust error handling and logging
|
51
|
+
|
52
|
+
## Installation
|
53
|
+
|
54
|
+
```bash
|
55
|
+
pip install airtrain
|
56
|
+
```
|
57
|
+
|
58
|
+
## Quick Start
|
59
|
+
|
60
|
+
### 1. Basic OpenAI Chat
|
61
|
+
|
62
|
+
```python
|
63
|
+
from airtrain.integrations.openai.skills import OpenAIChatSkill, OpenAIInput
|
64
|
+
|
65
|
+
# Initialize the skill
|
66
|
+
skill = OpenAIChatSkill()
|
67
|
+
|
68
|
+
# Create input
|
69
|
+
input_data = OpenAIInput(
|
70
|
+
user_input="Explain quantum computing in simple terms.",
|
71
|
+
system_prompt="You are a helpful teacher.",
|
72
|
+
max_tokens=500,
|
73
|
+
temperature=0.7
|
74
|
+
)
|
75
|
+
|
76
|
+
# Get response
|
77
|
+
result = skill.process(input_data)
|
78
|
+
print(result.response)
|
79
|
+
print(f"Tokens Used: {result.usage['total_tokens']}")
|
80
|
+
```
|
81
|
+
|
82
|
+
### 2. Anthropic Claude Integration
|
83
|
+
|
84
|
+
```python
|
85
|
+
from airtrain.integrations.anthropic.skills import AnthropicChatSkill, AnthropicInput
|
86
|
+
|
87
|
+
# Initialize the skill
|
88
|
+
skill = AnthropicChatSkill()
|
89
|
+
|
90
|
+
# Create input
|
91
|
+
input_data = AnthropicInput(
|
92
|
+
user_input="Explain the theory of relativity.",
|
93
|
+
system_prompt="You are a physics expert.",
|
94
|
+
model="claude-3-opus-20240229",
|
95
|
+
temperature=0.3
|
96
|
+
)
|
97
|
+
|
98
|
+
# Get response
|
99
|
+
result = skill.process(input_data)
|
100
|
+
print(result.response)
|
101
|
+
print(f"Usage: {result.usage}")
|
102
|
+
```
|
103
|
+
|
104
|
+
### 3. Structured Output with OpenAI
|
105
|
+
|
106
|
+
```python
|
107
|
+
from pydantic import BaseModel
|
108
|
+
from typing import List
|
109
|
+
from airtrain.integrations.openai.skills import OpenAIParserSkill, OpenAIParserInput
|
110
|
+
|
111
|
+
# Define your response model
|
112
|
+
class PersonInfo(BaseModel):
|
113
|
+
name: str
|
114
|
+
age: int
|
115
|
+
occupation: str
|
116
|
+
skills: List[str]
|
117
|
+
|
118
|
+
# Initialize the parser skill
|
119
|
+
parser_skill = OpenAIParserSkill()
|
120
|
+
|
121
|
+
# Create input with response model
|
122
|
+
input_data = OpenAIParserInput(
|
123
|
+
user_input="Tell me about John Doe, a 30-year-old software engineer who specializes in Python and AI",
|
124
|
+
system_prompt="Extract structured information about the person.",
|
125
|
+
response_model=PersonInfo
|
126
|
+
)
|
127
|
+
|
128
|
+
# Get structured response
|
129
|
+
result = parser_skill.process(input_data)
|
130
|
+
person_info = result.parsed_response
|
131
|
+
print(f"Name: {person_info.name}")
|
132
|
+
print(f"Skills: {', '.join(person_info.skills)}")
|
133
|
+
```
|
134
|
+
|
135
|
+
## Error Handling
|
136
|
+
|
137
|
+
All skills include built-in error handling:
|
138
|
+
|
139
|
+
```python
|
140
|
+
from airtrain.core.skills import ProcessingError
|
141
|
+
|
142
|
+
try:
|
143
|
+
result = skill.process(input_data)
|
144
|
+
except ProcessingError as e:
|
145
|
+
print(f"Processing failed: {e}")
|
146
|
+
```
|
147
|
+
|
148
|
+
## Advanced Features
|
149
|
+
|
150
|
+
- Image Analysis Support
|
151
|
+
- Function Calling
|
152
|
+
- Custom Validators
|
153
|
+
- Async Processing
|
154
|
+
- Token Usage Tracking
|
155
|
+
|
156
|
+
For more examples and detailed documentation, visit our [documentation](https://airtrain.readthedocs.io/).
|
157
|
+
|
158
|
+
## Documentation
|
159
|
+
|
160
|
+
For detailed documentation, visit [our documentation site](https://docs.airtrain.dev/).
|
161
|
+
|
162
|
+
## Contributing
|
163
|
+
|
164
|
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
165
|
+
|
166
|
+
## License
|
167
|
+
|
168
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
@@ -0,0 +1,38 @@
|
|
1
|
+
airtrain/__init__.py,sha256=QlWDodHKi49nnPROPULRasxMFzKN_R6iz8tPk5xT4rM,313
|
2
|
+
airtrain/core/__init__.py,sha256=9h7iKwTzZocCPc9bU6j8bA02BokteWIOcO1uaqGMcrk,254
|
3
|
+
airtrain/core/credentials.py,sha256=PgQotrQc46J5djidKnkK1znUv3fyNkUFDO-m2Kn_Gzo,4006
|
4
|
+
airtrain/core/schemas.py,sha256=MMXrDviC4gRea_QaPpbjgO--B_UKxnD7YrxqZOLJZZU,7003
|
5
|
+
airtrain/core/skills.py,sha256=LljalzeSHK5eQPTAOEAYc5D8Qn1kVSfiz9WgziTD5UM,4688
|
6
|
+
airtrain/integrations/__init__.py,sha256=Y-nxbWlPGy0Txt6qkRzLKQGapfAfL_q9W6U3ISTSg_I,1486
|
7
|
+
airtrain/integrations/anthropic/__init__.py,sha256=qwlWLDh1rEVizYFbW8430z-f1SxHio7_Gaw5cCTUtoo,274
|
8
|
+
airtrain/integrations/anthropic/credentials.py,sha256=hlTSw9HX66kYNaeQUtn0JjdZQBMNkzzFOJOoLOOzvcY,1246
|
9
|
+
airtrain/integrations/anthropic/skills.py,sha256=sT7dBYPVCsICYjgBjUlyyP84A8h9OkbgkslvKHk3Tjs,5273
|
10
|
+
airtrain/integrations/aws/__init__.py,sha256=3x7v2NxpAfI-U-YgwQeH5PtsmUrNLPMfLyUGFLiBjbs,155
|
11
|
+
airtrain/integrations/aws/credentials.py,sha256=nN-daKAl7qOb_VdRpsThG8gN5GeSUkx-ji5E_gF_vYw,1444
|
12
|
+
airtrain/integrations/aws/skills.py,sha256=TQiMXeXRRcJ14fe8Xi7Uk20iS6_INbcznuLGtMorcKY,3870
|
13
|
+
airtrain/integrations/cerebras/__init__.py,sha256=zAD-qV38OzHhMCz1z-NvjjqcYEhURbm8RWTOKHNqbew,174
|
14
|
+
airtrain/integrations/cerebras/credentials.py,sha256=IFkn8LxMAaOpvEWXDpb94VQGtqcDxQ7rZHKH-tX4Nuw,884
|
15
|
+
airtrain/integrations/cerebras/skills.py,sha256=O9vwFzvv_tUOwFOVE8CszAQEac711eVYVUj_8dVMTpc,1596
|
16
|
+
airtrain/integrations/google/__init__.py,sha256=INZFNOcNebz3m-Ggk07ZjmX0kNHIbTe_St9gBlZBki8,176
|
17
|
+
airtrain/integrations/google/credentials.py,sha256=yyl-MWl06wr4SWvcvJGSpJ3hGTz21ByrRSr_3np5cbU,1030
|
18
|
+
airtrain/integrations/google/skills.py,sha256=uwmgetl5Ien7fLOA5HIZdqoL6AZnexFDyzfsrGuJ1RU,1606
|
19
|
+
airtrain/integrations/groq/__init__.py,sha256=B_X2fXbsJfFD6GquKeVCsEJjwd9Ygbq1uEHlV4Jy7YE,154
|
20
|
+
airtrain/integrations/groq/credentials.py,sha256=A8-VIyoZTkHFQb-O-lmu-UrgaLZ3hfWfzzigkYteESk,829
|
21
|
+
airtrain/integrations/groq/skills.py,sha256=Qy6SBAb19SzOFuqgcLyzdyRBp4D7jKqsEeJ6UTDaqMM,1528
|
22
|
+
airtrain/integrations/ollama/__init__.py,sha256=zMHBsGzViVrvxAeJmfq6r-ZfSE6Dy5QcKLhe4d5fEcM,164
|
23
|
+
airtrain/integrations/ollama/credentials.py,sha256=D7O4kUAb_VHs5s1ncUN9Ezhu5PvLfgj3RifAkB9sEZk,940
|
24
|
+
airtrain/integrations/ollama/skills.py,sha256=M_Un8D5VJ5XtPEq9IClzqV3jCPBoFTSm2ve6EO8W2JU,1556
|
25
|
+
airtrain/integrations/openai/__init__.py,sha256=K-NY2_T1T6SEOgkpbUA55cWvK2nr2NOJgLCqmmtaCno,371
|
26
|
+
airtrain/integrations/openai/chinese_assistant.py,sha256=MMhv4NBOoEQ0O22ZZtP255rd5ajHC9l6FPWIjpqxBOA,1581
|
27
|
+
airtrain/integrations/openai/credentials.py,sha256=NfRyp1QgEtgm8cxt2-BOLq-6d0X-Pcm80NnfHM8p0FY,1470
|
28
|
+
airtrain/integrations/openai/skills.py,sha256=Olg9-6f_p2XgkVwwcB9tvjAMApmM2EK81i8LP4qVVvs,7676
|
29
|
+
airtrain/integrations/sambanova/__init__.py,sha256=dp_263iOckM_J9pOEvyqpf3FrejD6-_x33r0edMCTe0,179
|
30
|
+
airtrain/integrations/sambanova/credentials.py,sha256=U36RAEIPNuwo-vTrt3U9kkkj2GfdqSclA1ttOYHxS-w,784
|
31
|
+
airtrain/integrations/sambanova/skills.py,sha256=Po1ur_QFwzVIugbkk2mt73WdXDz_Gr9ASlUc9Y12Kok,1614
|
32
|
+
airtrain/integrations/together/__init__.py,sha256=we4KXn_pUs6Dxo3QcB-t40BSRraQFdKg2nXw7yi2FjM,185
|
33
|
+
airtrain/integrations/together/credentials.py,sha256=y5M6ZQrfYJLJbClxEasq4HaVyZM0l5lFshwVP6jq2E4,720
|
34
|
+
airtrain/integrations/together/skills.py,sha256=YMOULyk2TX32rCjhxK29e4ehn8iIzMXpg3xmdYtuyQQ,1664
|
35
|
+
airtrain-0.1.10.dist-info/METADATA,sha256=frRmuMVDKKHudrTIAIaIkIecEMYwkcb3BAbjGYxYZqA,4508
|
36
|
+
airtrain-0.1.10.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
37
|
+
airtrain-0.1.10.dist-info/top_level.txt,sha256=cFWW1vY6VMCb3AGVdz6jBDpZ65xxBRSqlsPyySxTkxY,9
|
38
|
+
airtrain-0.1.10.dist-info/RECORD,,
|
@@ -0,0 +1 @@
|
|
1
|
+
airtrain
|