airtrain 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
airtrain/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  """Airtrain - A platform for building and deploying AI agents with structured skills"""
2
2
 
3
- __version__ = "0.1.5"
3
+ __version__ = "0.1.7"
4
4
 
5
5
  from .core.skills import Skill
6
6
  from .core.schemas import InputSchema, OutputSchema
@@ -0,0 +1,11 @@
1
+ """Anthropic integration for Airtrain"""
2
+
3
+ from .credentials import AnthropicCredentials
4
+ from .skills import AnthropicChatSkill, AnthropicInput, AnthropicOutput
5
+
6
+ __all__ = [
7
+ "AnthropicCredentials",
8
+ "AnthropicChatSkill",
9
+ "AnthropicInput",
10
+ "AnthropicOutput",
11
+ ]
@@ -0,0 +1,32 @@
1
+ from pydantic import Field, SecretStr, validator
2
+ from airtrain.core.credentials import BaseCredentials, CredentialValidationError
3
+ from anthropic import Anthropic
4
+
5
+
6
+ class AnthropicCredentials(BaseCredentials):
7
+ """Anthropic API credentials"""
8
+
9
+ anthropic_api_key: SecretStr = Field(..., description="Anthropic API key")
10
+ version: str = Field(default="2023-06-01", description="API Version")
11
+
12
+ _required_credentials = {"anthropic_api_key"}
13
+
14
+ @validator("anthropic_api_key")
15
+ def validate_api_key_format(cls, v: SecretStr) -> SecretStr:
16
+ key = v.get_secret_value()
17
+ if not key.startswith("sk-ant-"):
18
+ raise ValueError("Anthropic API key must start with 'sk-ant-'")
19
+ return v
20
+
21
+ async def validate_credentials(self) -> bool:
22
+ """Validate Anthropic credentials"""
23
+ try:
24
+ client = Anthropic(api_key=self.anthropic_api_key.get_secret_value())
25
+ client.messages.create(
26
+ model="claude-3-opus-20240229",
27
+ max_tokens=1,
28
+ messages=[{"role": "user", "content": "Hi"}],
29
+ )
30
+ return True
31
+ except Exception as e:
32
+ raise CredentialValidationError(f"Invalid Anthropic credentials: {str(e)}")
@@ -0,0 +1,135 @@
1
+ from typing import List, Optional, Dict, Any
2
+ from pydantic import Field
3
+ from anthropic import Anthropic
4
+ import base64
5
+ from pathlib import Path
6
+ from loguru import logger
7
+
8
+ from airtrain.core.skills import Skill, ProcessingError
9
+ from airtrain.core.schemas import InputSchema, OutputSchema
10
+ from .credentials import AnthropicCredentials
11
+
12
+
13
+ class AnthropicInput(InputSchema):
14
+ """Schema for Anthropic chat input"""
15
+
16
+ user_input: str = Field(..., description="User's input text")
17
+ system_prompt: str = Field(
18
+ default="You are a helpful assistant.",
19
+ description="System prompt to guide the model's behavior",
20
+ )
21
+ model: str = Field(
22
+ default="claude-3-opus-20240229", description="Anthropic model to use"
23
+ )
24
+ max_tokens: int = Field(default=1024, description="Maximum tokens in response")
25
+ temperature: float = Field(
26
+ default=0.7, description="Temperature for response generation", ge=0, le=1
27
+ )
28
+ images: Optional[List[Path]] = Field(
29
+ default=None,
30
+ description="Optional list of image paths to include in the message",
31
+ )
32
+
33
+
34
+ class AnthropicOutput(OutputSchema):
35
+ """Schema for Anthropic chat output"""
36
+
37
+ response: str = Field(..., description="Model's response text")
38
+ used_model: str = Field(..., description="Model used for generation")
39
+ usage: Dict[str, Any] = Field(
40
+ default_factory=dict, description="Usage statistics from the API"
41
+ )
42
+
43
+
44
+ class AnthropicChatSkill(Skill[AnthropicInput, AnthropicOutput]):
45
+ """Skill for interacting with Anthropic's Claude models"""
46
+
47
+ input_schema = AnthropicInput
48
+ output_schema = AnthropicOutput
49
+
50
+ def __init__(self, credentials: Optional[AnthropicCredentials] = None):
51
+ """Initialize the skill with optional credentials"""
52
+ super().__init__()
53
+ self.credentials = credentials or AnthropicCredentials.from_env()
54
+ self.client = Anthropic(
55
+ api_key=self.credentials.anthropic_api_key.get_secret_value()
56
+ )
57
+
58
+ def _encode_image(self, image_path: Path) -> Dict[str, Any]:
59
+ """Convert image to base64 for API consumption"""
60
+ try:
61
+ if not image_path.exists():
62
+ raise FileNotFoundError(f"Image file not found: {image_path}")
63
+
64
+ with open(image_path, "rb") as img_file:
65
+ encoded = base64.b64encode(img_file.read()).decode()
66
+ return {
67
+ "type": "image",
68
+ "source": {
69
+ "type": "base64",
70
+ "media_type": f"image/{image_path.suffix[1:]}",
71
+ "data": encoded,
72
+ },
73
+ }
74
+ except Exception as e:
75
+ logger.error(f"Failed to encode image {image_path}: {str(e)}")
76
+ raise ProcessingError(f"Image encoding failed: {str(e)}")
77
+
78
+ def process(self, input_data: AnthropicInput) -> AnthropicOutput:
79
+ """Process the input using Anthropic's API"""
80
+ try:
81
+ logger.info(f"Processing request with model {input_data.model}")
82
+
83
+ # Prepare message content
84
+ content = []
85
+
86
+ # Add text content
87
+ content.append({"type": "text", "text": input_data.user_input})
88
+
89
+ # Add images if provided
90
+ if input_data.images:
91
+ logger.debug(f"Processing {len(input_data.images)} images")
92
+ for image_path in input_data.images:
93
+ content.append(self._encode_image(image_path))
94
+
95
+ # Create message
96
+ response = self.client.messages.create(
97
+ model=input_data.model,
98
+ max_tokens=input_data.max_tokens,
99
+ temperature=input_data.temperature,
100
+ system=input_data.system_prompt,
101
+ messages=[{"role": "user", "content": content}],
102
+ )
103
+
104
+ # Validate response content
105
+ if not response.content:
106
+ logger.error("Empty response received from Anthropic API")
107
+ raise ProcessingError("Empty response received from Anthropic API")
108
+
109
+ if not isinstance(response.content, list) or not response.content:
110
+ logger.error("Invalid response format from Anthropic API")
111
+ raise ProcessingError("Invalid response format from Anthropic API")
112
+
113
+ first_content = response.content[0]
114
+ if not hasattr(first_content, "text"):
115
+ logger.error("Response content does not contain text")
116
+ raise ProcessingError("Response content does not contain text")
117
+
118
+ logger.success("Successfully processed Anthropic request")
119
+
120
+ # Create output
121
+ return AnthropicOutput(
122
+ response=first_content.text,
123
+ used_model=response.model,
124
+ usage={
125
+ "input_tokens": response.usage.input_tokens,
126
+ "output_tokens": response.usage.output_tokens,
127
+ },
128
+ )
129
+
130
+ except ProcessingError:
131
+ # Re-raise ProcessingError without modification
132
+ raise
133
+ except Exception as e:
134
+ logger.exception(f"Anthropic processing failed: {str(e)}")
135
+ raise ProcessingError(f"Anthropic processing failed: {str(e)}")
@@ -0,0 +1,19 @@
1
+ from .skills import (
2
+ OpenAIChatSkill,
3
+ OpenAIInput,
4
+ OpenAIParserSkill,
5
+ OpenAIOutput,
6
+ OpenAIParserInput,
7
+ OpenAIParserOutput,
8
+ )
9
+ from .credentials import OpenAICredentials
10
+
11
+ __all__ = [
12
+ "OpenAIChatSkill",
13
+ "OpenAIInput",
14
+ "OpenAIParserSkill",
15
+ "OpenAIParserInput",
16
+ "OpenAIParserOutput",
17
+ "OpenAICredentials",
18
+ "OpenAIOutput",
19
+ ]
@@ -0,0 +1,42 @@
1
+ from typing import Optional, TypeVar
2
+ from pydantic import Field
3
+ from .skills import OpenAIChatSkill, OpenAIInput, OpenAIOutput
4
+ from .credentials import OpenAICredentials
5
+
6
+ T = TypeVar("T", bound=OpenAIInput)
7
+
8
+
9
+ class ChineseAssistantInput(OpenAIInput):
10
+ """Schema for Chinese Assistant input"""
11
+
12
+ user_input: str = Field(
13
+ ..., description="User's input text (can be in any language)"
14
+ )
15
+ system_prompt: str = Field(
16
+ default="你是一个有帮助的助手。请用中文回答所有问题,即使问题是用其他语言问的。回答要准确、礼貌、专业。",
17
+ description="System prompt in Chinese",
18
+ )
19
+ model: str = Field(default="gpt-4o", description="OpenAI model to use")
20
+ max_tokens: int = Field(default=8096, description="Maximum tokens in response")
21
+ temperature: float = Field(
22
+ default=0.7, description="Temperature for response generation", ge=0, le=1
23
+ )
24
+
25
+
26
+ class ChineseAssistantSkill(OpenAIChatSkill):
27
+ """Skill for Chinese language assistance"""
28
+
29
+ input_schema = ChineseAssistantInput
30
+ output_schema = OpenAIOutput
31
+
32
+ def __init__(self, credentials: Optional[OpenAICredentials] = None):
33
+ super().__init__(credentials)
34
+
35
+ def process(self, input_data: T) -> OpenAIOutput:
36
+ # Add language check to ensure response is in Chinese
37
+ if "你是" not in input_data.system_prompt:
38
+ input_data.system_prompt = (
39
+ "你是一个中文助手。" + input_data.system_prompt + "请用中文回答。"
40
+ )
41
+
42
+ return super().process(input_data)
@@ -0,0 +1,39 @@
1
+ from datetime import datetime, timedelta
2
+ from typing import Optional
3
+ from pydantic import Field, SecretStr, validator
4
+ from openai import OpenAI
5
+
6
+ from airtrain.core.credentials import BaseCredentials, CredentialValidationError
7
+
8
+
9
+ class OpenAICredentials(BaseCredentials):
10
+ """OpenAI API credentials with enhanced validation"""
11
+
12
+ openai_api_key: SecretStr = Field(..., description="OpenAI API key")
13
+ openai_organization_id: Optional[str] = Field(
14
+ None, description="OpenAI organization ID", pattern="^org-[A-Za-z0-9]{24}$"
15
+ )
16
+
17
+ _required_credentials = {"openai_api_key"}
18
+
19
+ @validator("openai_api_key")
20
+ def validate_api_key_format(cls, v: SecretStr) -> SecretStr:
21
+ key = v.get_secret_value()
22
+ if not key.startswith("sk-"):
23
+ raise ValueError("OpenAI API key must start with 'sk-'")
24
+ if len(key) < 40:
25
+ raise ValueError("OpenAI API key appears to be too short")
26
+ return v
27
+
28
+ async def validate_credentials(self) -> bool:
29
+ """Validate credentials by making a test API call"""
30
+ try:
31
+ client = OpenAI(
32
+ api_key=self.openai_api_key.get_secret_value(),
33
+ organization=self.openai_organization_id,
34
+ )
35
+ # Make minimal API call to validate
36
+ await client.models.list(limit=1)
37
+ return True
38
+ except Exception as e:
39
+ raise CredentialValidationError(f"Invalid OpenAI credentials: {str(e)}")
@@ -0,0 +1,208 @@
1
+ from typing import List, Optional, Dict, Any, TypeVar, Type
2
+ from pydantic import Field, BaseModel
3
+ from openai import OpenAI
4
+ import base64
5
+ from pathlib import Path
6
+ from loguru import logger
7
+
8
+ from airtrain.core.skills import Skill, ProcessingError
9
+ from airtrain.core.schemas import InputSchema, OutputSchema
10
+ from .credentials import OpenAICredentials
11
+
12
+
13
+ class OpenAIInput(InputSchema):
14
+ """Schema for OpenAI chat input"""
15
+
16
+ user_input: str = Field(..., description="User's input text")
17
+ system_prompt: str = Field(
18
+ default="You are a helpful assistant.",
19
+ description="System prompt to guide the model's behavior",
20
+ )
21
+ model: str = Field(default="gpt-4o", description="OpenAI model to use")
22
+ max_tokens: int = Field(default=8192, description="Maximum tokens in response")
23
+ temperature: float = Field(
24
+ default=0.2, description="Temperature for response generation", ge=0, le=1
25
+ )
26
+ images: Optional[List[Path]] = Field(
27
+ default=None,
28
+ description="Optional list of image paths to include in the message",
29
+ )
30
+ functions: Optional[List[Dict[str, Any]]] = Field(
31
+ default=None,
32
+ description="Optional function definitions for function calling",
33
+ )
34
+ function_call: Optional[str] = Field(
35
+ default=None,
36
+ description="Controls function calling behavior",
37
+ )
38
+
39
+
40
+ class OpenAIOutput(OutputSchema):
41
+ """Schema for OpenAI chat output"""
42
+
43
+ response: str = Field(..., description="Model's response text")
44
+ used_model: str = Field(..., description="Model used for generation")
45
+ usage: Dict[str, Any] = Field(
46
+ default_factory=dict, description="Usage statistics from the API"
47
+ )
48
+ function_call: Optional[Dict[str, Any]] = Field(
49
+ default=None, description="Function call information if applicable"
50
+ )
51
+
52
+
53
+ class OpenAIChatSkill(Skill[OpenAIInput, OpenAIOutput]):
54
+ """Skill for interacting with OpenAI's models"""
55
+
56
+ input_schema = OpenAIInput
57
+ output_schema = OpenAIOutput
58
+
59
+ def __init__(self, credentials: Optional[OpenAICredentials] = None):
60
+ """Initialize the skill with optional credentials"""
61
+ super().__init__()
62
+ self.credentials = credentials or OpenAICredentials.from_env()
63
+ self.client = OpenAI(
64
+ api_key=self.credentials.openai_api_key.get_secret_value(),
65
+ organization=self.credentials.openai_organization_id,
66
+ )
67
+
68
+ def _encode_image(self, image_path: Path) -> Dict[str, Any]:
69
+ """Convert image to base64 for API consumption"""
70
+ try:
71
+ if not image_path.exists():
72
+ raise FileNotFoundError(f"Image file not found: {image_path}")
73
+
74
+ with open(image_path, "rb") as img_file:
75
+ encoded = base64.b64encode(img_file.read()).decode()
76
+ return {
77
+ "type": "image_url",
78
+ "image_url": {"url": f"data:image/jpeg;base64,{encoded}"},
79
+ }
80
+ except Exception as e:
81
+ logger.error(f"Failed to encode image {image_path}: {str(e)}")
82
+ raise ProcessingError(f"Image encoding failed: {str(e)}")
83
+
84
+ def process(self, input_data: OpenAIInput) -> OpenAIOutput:
85
+ """Process the input using OpenAI's API"""
86
+ try:
87
+ logger.info(f"Processing request with model {input_data.model}")
88
+
89
+ # Prepare message content
90
+ content = []
91
+
92
+ # Add text content
93
+ content.append({"type": "text", "text": input_data.user_input})
94
+
95
+ # Add images if provided
96
+ if input_data.images:
97
+ logger.debug(f"Processing {len(input_data.images)} images")
98
+ for image_path in input_data.images:
99
+ content.append(self._encode_image(image_path))
100
+
101
+ # Prepare messages
102
+ messages = [
103
+ {"role": "system", "content": input_data.system_prompt},
104
+ {"role": "user", "content": content},
105
+ ]
106
+
107
+ # Create completion parameters
108
+ params = {
109
+ "model": input_data.model,
110
+ "messages": messages,
111
+ "temperature": input_data.temperature,
112
+ "max_tokens": input_data.max_tokens,
113
+ }
114
+
115
+ # Add function calling if provided
116
+ if input_data.functions:
117
+ params["functions"] = input_data.functions
118
+ params["function_call"] = input_data.function_call
119
+
120
+ # Create chat completion
121
+ response = self.client.chat.completions.create(**params)
122
+
123
+ # Extract function call if present
124
+ function_call = None
125
+ if response.choices[0].message.function_call:
126
+ function_call = response.choices[0].message.function_call.model_dump()
127
+
128
+ logger.success("Successfully processed OpenAI request")
129
+
130
+ return OpenAIOutput(
131
+ response=response.choices[0].message.content or "",
132
+ used_model=response.model,
133
+ usage={
134
+ "prompt_tokens": response.usage.prompt_tokens,
135
+ "completion_tokens": response.usage.completion_tokens,
136
+ "total_tokens": response.usage.total_tokens,
137
+ },
138
+ function_call=function_call,
139
+ )
140
+
141
+ except Exception as e:
142
+ logger.exception(f"OpenAI processing failed: {str(e)}")
143
+ raise ProcessingError(f"OpenAI processing failed: {str(e)}")
144
+
145
+
146
+ ResponseT = TypeVar("ResponseT", bound=BaseModel)
147
+
148
+
149
+ class OpenAIParserInput(InputSchema):
150
+ """Schema for OpenAI structured output input"""
151
+
152
+ user_input: str
153
+ system_prompt: str = "You are a helpful assistant that provides structured data."
154
+ model: str = "gpt-4o"
155
+ temperature: float = 0.7
156
+ max_tokens: Optional[int] = None
157
+ response_model: Type[ResponseT]
158
+
159
+ class Config:
160
+ arbitrary_types_allowed = True
161
+
162
+
163
+ class OpenAIParserOutput(OutputSchema):
164
+ """Schema for OpenAI structured output"""
165
+
166
+ parsed_response: BaseModel
167
+ used_model: str
168
+ tokens_used: int
169
+
170
+
171
+ class OpenAIParserSkill(Skill[OpenAIParserInput, OpenAIParserOutput]):
172
+ """Skill for getting structured responses from OpenAI"""
173
+
174
+ input_schema = OpenAIParserInput
175
+ output_schema = OpenAIParserOutput
176
+
177
+ def __init__(self, credentials: Optional[OpenAICredentials] = None):
178
+ """Initialize the skill with optional credentials"""
179
+ super().__init__()
180
+ self.credentials = credentials or OpenAICredentials.from_env()
181
+ self.client = OpenAI(
182
+ api_key=self.credentials.openai_api_key.get_secret_value(),
183
+ organization=self.credentials.openai_organization_id,
184
+ )
185
+
186
+ def process(self, input_data: OpenAIParserInput) -> OpenAIParserOutput:
187
+ try:
188
+ # Use parse method instead of create
189
+ completion = self.client.beta.chat.completions.parse(
190
+ model=input_data.model,
191
+ messages=[
192
+ {"role": "system", "content": input_data.system_prompt},
193
+ {"role": "user", "content": input_data.user_input},
194
+ ],
195
+ response_format=input_data.response_model,
196
+ )
197
+
198
+ if completion.choices[0].message.parsed is None:
199
+ raise ProcessingError("Failed to parse response")
200
+
201
+ return OpenAIParserOutput(
202
+ parsed_response=completion.choices[0].message.parsed,
203
+ used_model=completion.model,
204
+ tokens_used=completion.usage.total_tokens,
205
+ )
206
+
207
+ except Exception as e:
208
+ raise ProcessingError(f"OpenAI parsing failed: {str(e)}")
@@ -0,0 +1,164 @@
1
+ Metadata-Version: 2.2
2
+ Name: airtrain
3
+ Version: 0.1.7
4
+ Summary: A platform for building and deploying AI agents with structured skills
5
+ Home-page: https://github.com/rosaboyle/airtrain.dev
6
+ Author: Dheeraj Pai
7
+ Author-email: helloworldcmu@gmail.com
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.8
14
+ Classifier: Programming Language :: Python :: 3.9
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Requires-Python: >=3.8
17
+ Description-Content-Type: text/markdown
18
+ Requires-Dist: pydantic>=2.0.0
19
+ Requires-Dist: openai>=1.0.0
20
+ Requires-Dist: python-dotenv>=0.19.0
21
+ Requires-Dist: PyYAML>=5.4.1
22
+ Requires-Dist: firebase-admin>=5.0.0
23
+ Requires-Dist: loguru>=0.5.3
24
+ Dynamic: author
25
+ Dynamic: author-email
26
+ Dynamic: classifier
27
+ Dynamic: description
28
+ Dynamic: description-content-type
29
+ Dynamic: home-page
30
+ Dynamic: requires-dist
31
+ Dynamic: requires-python
32
+ Dynamic: summary
33
+
34
+ # Airtrain
35
+
36
+ A powerful platform for building and deploying AI agents with structured skills and capabilities.
37
+
38
+ ## Features
39
+
40
+ - **Structured Skills**: Build modular AI skills with defined input/output schemas
41
+ - **Multiple LLM Integrations**: Built-in support for OpenAI and Anthropic models
42
+ - **Structured Outputs**: Parse LLM responses into structured Pydantic models
43
+ - **Credential Management**: Secure handling of API keys and credentials
44
+ - **Type Safety**: Full type hints and Pydantic model support
45
+ - **Image Support**: Handle image inputs for multimodal models
46
+ - **Error Handling**: Robust error handling and logging
47
+
48
+ ## Installation
49
+
50
+ ```bash
51
+ pip install airtrain
52
+ ```
53
+
54
+ ## Quick Start
55
+
56
+ ### 1. Basic OpenAI Chat
57
+
58
+ ```python
59
+ from airtrain.integrations.openai.skills import OpenAIChatSkill, OpenAIInput
60
+
61
+ # Initialize the skill
62
+ skill = OpenAIChatSkill()
63
+
64
+ # Create input
65
+ input_data = OpenAIInput(
66
+ user_input="Explain quantum computing in simple terms.",
67
+ system_prompt="You are a helpful teacher.",
68
+ max_tokens=500,
69
+ temperature=0.7
70
+ )
71
+
72
+ # Get response
73
+ result = skill.process(input_data)
74
+ print(result.response)
75
+ print(f"Tokens Used: {result.usage['total_tokens']}")
76
+ ```
77
+
78
+ ### 2. Anthropic Claude Integration
79
+
80
+ ```python
81
+ from airtrain.integrations.anthropic.skills import AnthropicChatSkill, AnthropicInput
82
+
83
+ # Initialize the skill
84
+ skill = AnthropicChatSkill()
85
+
86
+ # Create input
87
+ input_data = AnthropicInput(
88
+ user_input="Explain the theory of relativity.",
89
+ system_prompt="You are a physics expert.",
90
+ model="claude-3-opus-20240229",
91
+ temperature=0.3
92
+ )
93
+
94
+ # Get response
95
+ result = skill.process(input_data)
96
+ print(result.response)
97
+ print(f"Usage: {result.usage}")
98
+ ```
99
+
100
+ ### 3. Structured Output with OpenAI
101
+
102
+ ```python
103
+ from pydantic import BaseModel
104
+ from typing import List
105
+ from airtrain.integrations.openai.skills import OpenAIParserSkill, OpenAIParserInput
106
+
107
+ # Define your response model
108
+ class PersonInfo(BaseModel):
109
+ name: str
110
+ age: int
111
+ occupation: str
112
+ skills: List[str]
113
+
114
+ # Initialize the parser skill
115
+ parser_skill = OpenAIParserSkill()
116
+
117
+ # Create input with response model
118
+ input_data = OpenAIParserInput(
119
+ user_input="Tell me about John Doe, a 30-year-old software engineer who specializes in Python and AI",
120
+ system_prompt="Extract structured information about the person.",
121
+ response_model=PersonInfo
122
+ )
123
+
124
+ # Get structured response
125
+ result = parser_skill.process(input_data)
126
+ person_info = result.parsed_response
127
+ print(f"Name: {person_info.name}")
128
+ print(f"Skills: {', '.join(person_info.skills)}")
129
+ ```
130
+
131
+ ## Error Handling
132
+
133
+ All skills include built-in error handling:
134
+
135
+ ```python
136
+ from airtrain.core.skills import ProcessingError
137
+
138
+ try:
139
+ result = skill.process(input_data)
140
+ except ProcessingError as e:
141
+ print(f"Processing failed: {e}")
142
+ ```
143
+
144
+ ## Advanced Features
145
+
146
+ - Image Analysis Support
147
+ - Function Calling
148
+ - Custom Validators
149
+ - Async Processing
150
+ - Token Usage Tracking
151
+
152
+ For more examples and detailed documentation, visit our [documentation](https://airtrain.readthedocs.io/).
153
+
154
+ ## Documentation
155
+
156
+ For detailed documentation, visit [our documentation site](https://docs.airtrain.dev/).
157
+
158
+ ## Contributing
159
+
160
+ Contributions are welcome! Please feel free to submit a Pull Request.
161
+
162
+ ## License
163
+
164
+ This project is licensed under the MIT License - see the LICENSE file for details.
@@ -0,0 +1,17 @@
1
+ airtrain/__init__.py,sha256=yLOg9WK7J4d_hfIEikVhCwtkwImkWEEB9CZKqW5gP00,312
2
+ airtrain/core/__init__.py,sha256=9h7iKwTzZocCPc9bU6j8bA02BokteWIOcO1uaqGMcrk,254
3
+ airtrain/core/credentials.py,sha256=PgQotrQc46J5djidKnkK1znUv3fyNkUFDO-m2Kn_Gzo,4006
4
+ airtrain/core/schemas.py,sha256=MMXrDviC4gRea_QaPpbjgO--B_UKxnD7YrxqZOLJZZU,7003
5
+ airtrain/core/skills.py,sha256=LljalzeSHK5eQPTAOEAYc5D8Qn1kVSfiz9WgziTD5UM,4688
6
+ airtrain/integrations/__init__.py,sha256=PRKI_A-KE307C4lpXgFAsZA2oFtTl1kt_4CrRUF2rpU,832
7
+ airtrain/integrations/anthropic/__init__.py,sha256=qwlWLDh1rEVizYFbW8430z-f1SxHio7_Gaw5cCTUtoo,274
8
+ airtrain/integrations/anthropic/credentials.py,sha256=hlTSw9HX66kYNaeQUtn0JjdZQBMNkzzFOJOoLOOzvcY,1246
9
+ airtrain/integrations/anthropic/skills.py,sha256=sT7dBYPVCsICYjgBjUlyyP84A8h9OkbgkslvKHk3Tjs,5273
10
+ airtrain/integrations/openai/__init__.py,sha256=K-NY2_T1T6SEOgkpbUA55cWvK2nr2NOJgLCqmmtaCno,371
11
+ airtrain/integrations/openai/chinese_assistant.py,sha256=MMhv4NBOoEQ0O22ZZtP255rd5ajHC9l6FPWIjpqxBOA,1581
12
+ airtrain/integrations/openai/credentials.py,sha256=NfRyp1QgEtgm8cxt2-BOLq-6d0X-Pcm80NnfHM8p0FY,1470
13
+ airtrain/integrations/openai/skills.py,sha256=Olg9-6f_p2XgkVwwcB9tvjAMApmM2EK81i8LP4qVVvs,7676
14
+ airtrain-0.1.7.dist-info/METADATA,sha256=o5KFAwTPPMRmKgmeIyLnNnVkWxwUEfo_am15JCOu7hI,4380
15
+ airtrain-0.1.7.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
16
+ airtrain-0.1.7.dist-info/top_level.txt,sha256=cFWW1vY6VMCb3AGVdz6jBDpZ65xxBRSqlsPyySxTkxY,9
17
+ airtrain-0.1.7.dist-info/RECORD,,
@@ -1,106 +0,0 @@
1
- Metadata-Version: 2.2
2
- Name: airtrain
3
- Version: 0.1.5
4
- Summary: A platform for building and deploying AI agents with structured skills
5
- Home-page: https://github.com/rosaboyle/airtrain.dev
6
- Author: Dheeraj Pai
7
- Author-email: helloworldcmu@gmail.com
8
- Classifier: Development Status :: 3 - Alpha
9
- Classifier: Intended Audience :: Developers
10
- Classifier: License :: OSI Approved :: MIT License
11
- Classifier: Operating System :: OS Independent
12
- Classifier: Programming Language :: Python :: 3
13
- Classifier: Programming Language :: Python :: 3.8
14
- Classifier: Programming Language :: Python :: 3.9
15
- Classifier: Programming Language :: Python :: 3.10
16
- Requires-Python: >=3.8
17
- Description-Content-Type: text/markdown
18
- Requires-Dist: pydantic>=2.0.0
19
- Requires-Dist: openai>=1.0.0
20
- Requires-Dist: python-dotenv>=0.19.0
21
- Requires-Dist: PyYAML>=5.4.1
22
- Requires-Dist: firebase-admin>=5.0.0
23
- Requires-Dist: loguru>=0.5.3
24
- Dynamic: author
25
- Dynamic: author-email
26
- Dynamic: classifier
27
- Dynamic: description
28
- Dynamic: description-content-type
29
- Dynamic: home-page
30
- Dynamic: requires-dist
31
- Dynamic: requires-python
32
- Dynamic: summary
33
-
34
- # Airtrain
35
-
36
- A powerful platform for building and deploying AI agents with structured skills and capabilities.
37
-
38
- ## Features
39
-
40
- - **Structured Skills**: Build modular AI skills with defined input/output schemas
41
- - **OpenAI Integration**: Built-in support for OpenAI's GPT models with structured outputs
42
- - **Credential Management**: Secure handling of API keys and credentials
43
- - **Type Safety**: Full type hints and Pydantic model support
44
- - **Async Support**: Both synchronous and asynchronous API implementations
45
-
46
- ## Installation
47
-
48
- ```bash
49
- pip install airtrain
50
- ```
51
-
52
- ## Quick Start
53
-
54
- ### Creating a Structured OpenAI Skill
55
-
56
- ```python
57
- from airtrain.core.skills import Skill
58
- from airtrain.core.schemas import InputSchema, OutputSchema
59
- from pydantic import BaseModel
60
- from typing import List
61
-
62
- # Define your response model
63
- class PersonInfo(BaseModel):
64
- name: str
65
- age: int
66
- occupation: str
67
- skills: List[str]
68
-
69
- # Create a skill
70
- class OpenAIParserSkill(Skill):
71
- def process(self, input_data):
72
- # Implementation
73
- return parsed_response
74
-
75
- # Use the skill
76
- skill = OpenAIParserSkill()
77
- result = skill.process(input_data)
78
- ```
79
-
80
- ### Managing Credentials
81
-
82
- ```python
83
- from airtrain.core.credentials import OpenAICredentials
84
- from pathlib import Path
85
-
86
- # Load credentials
87
- creds = OpenAICredentials(
88
- api_key="your-api-key",
89
- organization_id="optional-org-id"
90
- )
91
-
92
- # Save to environment
93
- creds.load_to_env()
94
- ```
95
-
96
- ## Documentation
97
-
98
- For detailed documentation, visit [our documentation site](https://docs.airtrain.dev/).
99
-
100
- ## Contributing
101
-
102
- Contributions are welcome! Please feel free to submit a Pull Request.
103
-
104
- ## License
105
-
106
- This project is licensed under the MIT License - see the LICENSE file for details.
@@ -1,10 +0,0 @@
1
- airtrain/__init__.py,sha256=eiw0x9Hie0dA0nrYtKSmD3XbW1KKOLr640Lu13DZFBs,312
2
- airtrain/core/__init__.py,sha256=9h7iKwTzZocCPc9bU6j8bA02BokteWIOcO1uaqGMcrk,254
3
- airtrain/core/credentials.py,sha256=PgQotrQc46J5djidKnkK1znUv3fyNkUFDO-m2Kn_Gzo,4006
4
- airtrain/core/schemas.py,sha256=MMXrDviC4gRea_QaPpbjgO--B_UKxnD7YrxqZOLJZZU,7003
5
- airtrain/core/skills.py,sha256=LljalzeSHK5eQPTAOEAYc5D8Qn1kVSfiz9WgziTD5UM,4688
6
- airtrain/integrations/__init__.py,sha256=PRKI_A-KE307C4lpXgFAsZA2oFtTl1kt_4CrRUF2rpU,832
7
- airtrain-0.1.5.dist-info/METADATA,sha256=YJiHLxGUOwqQvLlqis-JQrtmLcpeIP_11T_hn8pIFxg,2786
8
- airtrain-0.1.5.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
9
- airtrain-0.1.5.dist-info/top_level.txt,sha256=cFWW1vY6VMCb3AGVdz6jBDpZ65xxBRSqlsPyySxTkxY,9
10
- airtrain-0.1.5.dist-info/RECORD,,