airtrain 0.1.3__tar.gz → 0.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. airtrain-0.1.6/.gitignore +183 -0
  2. airtrain-0.1.6/EXPERIMENTS/integrations_examples/anthropic_with_image.py +43 -0
  3. airtrain-0.1.6/PKG-INFO +164 -0
  4. airtrain-0.1.6/README.md +131 -0
  5. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain/__init__.py +1 -1
  6. airtrain-0.1.6/airtrain/core/__pycache__/credentials.cpython-310.pyc +0 -0
  7. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain/core/credentials.py +3 -31
  8. airtrain-0.1.6/airtrain/integrations/__init__.py +26 -0
  9. airtrain-0.1.6/airtrain/integrations/anthropic/credentials.py +32 -0
  10. airtrain-0.1.6/airtrain/integrations/anthropic/skills.py +135 -0
  11. airtrain-0.1.6/airtrain/integrations/aws/credentials.py +36 -0
  12. airtrain-0.1.6/airtrain/integrations/cerebras/credentials.py +22 -0
  13. airtrain-0.1.6/airtrain/integrations/google/credentials.py +27 -0
  14. airtrain-0.1.6/airtrain/integrations/groq/credentials.py +24 -0
  15. airtrain-0.1.6/airtrain/integrations/ollama/credentials.py +26 -0
  16. airtrain-0.1.6/airtrain/integrations/openai/chinese_assistant.py +42 -0
  17. airtrain-0.1.6/airtrain/integrations/openai/credentials.py +39 -0
  18. airtrain-0.1.6/airtrain/integrations/openai/skills.py +208 -0
  19. airtrain-0.1.6/airtrain/integrations/sambanova/credentials.py +20 -0
  20. airtrain-0.1.6/airtrain/integrations/together/credentials.py +22 -0
  21. airtrain-0.1.6/airtrain.egg-info/PKG-INFO +164 -0
  22. airtrain-0.1.6/airtrain.egg-info/SOURCES.txt +60 -0
  23. airtrain-0.1.6/examples/creating-skills/anthropic_skills_usage.py +56 -0
  24. airtrain-0.1.6/examples/creating-skills/chinese_anthropic_assistant.py +44 -0
  25. airtrain-0.1.6/examples/creating-skills/chinese_anthropic_usage.py +60 -0
  26. airtrain-0.1.6/examples/creating-skills/chinese_assistant_usage.py +45 -0
  27. airtrain-0.1.6/examples/creating-skills/icon128.png +0 -0
  28. airtrain-0.1.6/examples/creating-skills/icon16.png +0 -0
  29. {airtrain-0.1.3 → airtrain-0.1.6}/examples/creating-skills/openai_skills.py +6 -6
  30. airtrain-0.1.6/examples/creating-skills/openai_skills_usage.py +175 -0
  31. {airtrain-0.1.3 → airtrain-0.1.6}/examples/credentials_usage.py +0 -1
  32. airtrain-0.1.6/examples/images/quantum-circuit.png +0 -0
  33. airtrain-0.1.6/scripts/release.py +60 -0
  34. airtrain-0.1.3/PKG-INFO +0 -106
  35. airtrain-0.1.3/README.md +0 -73
  36. airtrain-0.1.3/airtrain/core/__pycache__/credentials.cpython-310.pyc +0 -0
  37. airtrain-0.1.3/airtrain.egg-info/PKG-INFO +0 -106
  38. airtrain-0.1.3/airtrain.egg-info/SOURCES.txt +0 -36
  39. {airtrain-0.1.3 → airtrain-0.1.6}/.flake8 +0 -0
  40. {airtrain-0.1.3 → airtrain-0.1.6}/.github/workflows/publish.yml +0 -0
  41. {airtrain-0.1.3 → airtrain-0.1.6}/.mypy.ini +0 -0
  42. {airtrain-0.1.3 → airtrain-0.1.6}/.pre-commit-config.yaml +0 -0
  43. {airtrain-0.1.3 → airtrain-0.1.6}/.vscode/extensions.json +0 -0
  44. {airtrain-0.1.3 → airtrain-0.1.6}/.vscode/launch.json +0 -0
  45. {airtrain-0.1.3 → airtrain-0.1.6}/.vscode/settings.json +0 -0
  46. {airtrain-0.1.3 → airtrain-0.1.6}/EXPERIMENTS/schema_exps/pydantic_schemas.py +0 -0
  47. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain/core/__init__.py +0 -0
  48. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain/core/__pycache__/schemas.cpython-310.pyc +0 -0
  49. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain/core/__pycache__/skills.cpython-310.pyc +0 -0
  50. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain/core/schemas.py +0 -0
  51. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain/core/skills.py +0 -0
  52. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain.egg-info/dependency_links.txt +0 -0
  53. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain.egg-info/requires.txt +0 -0
  54. {airtrain-0.1.3 → airtrain-0.1.6}/airtrain.egg-info/top_level.txt +0 -0
  55. {airtrain-0.1.3 → airtrain-0.1.6}/examples/creating-skills/image1.jpg +0 -0
  56. {airtrain-0.1.3 → airtrain-0.1.6}/examples/creating-skills/image2.jpg +0 -0
  57. {airtrain-0.1.3 → airtrain-0.1.6}/examples/creating-skills/openai_structured_skills.py +0 -0
  58. {airtrain-0.1.3 → airtrain-0.1.6}/examples/schema_usage.py +0 -0
  59. {airtrain-0.1.3 → airtrain-0.1.6}/examples/skill_usage.py +0 -0
  60. {airtrain-0.1.3 → airtrain-0.1.6}/pyproject.toml +0 -0
  61. {airtrain-0.1.3 → airtrain-0.1.6}/scripts/build.sh +0 -0
  62. {airtrain-0.1.3 → airtrain-0.1.6}/scripts/bump_version.py +0 -0
  63. {airtrain-0.1.3 → airtrain-0.1.6}/scripts/publish.sh +0 -0
  64. {airtrain-0.1.3 → airtrain-0.1.6}/services/firebase_service.py +0 -0
  65. {airtrain-0.1.3 → airtrain-0.1.6}/services/openai_service.py +0 -0
  66. {airtrain-0.1.3 → airtrain-0.1.6}/setup.cfg +0 -0
  67. {airtrain-0.1.3 → airtrain-0.1.6}/setup.py +0 -0
@@ -0,0 +1,183 @@
1
+ package
2
+ .env
3
+ .mypy_cache
4
+ firebaseadmin.json
5
+ **pyc=
6
+ 50mb_test3.bin
7
+ *bin
8
+ **bin
9
+ token.pickle
10
+ temp_workspace
11
+ temp*
12
+
13
+ # Byte-compiled / optimized / DLL files
14
+ __pycache__/
15
+ *.py[cod]
16
+ *$py.class
17
+
18
+ # C extensions
19
+ *.so
20
+
21
+ # Distribution / packaging
22
+ .Python
23
+ build/
24
+ develop-eggs/
25
+ dist/
26
+ downloads/
27
+ eggs/
28
+ .eggs/
29
+ lib/
30
+ lib64/
31
+ parts/
32
+ sdist/
33
+ var/
34
+ wheels/
35
+ share/python-wheels/
36
+ *.egg-info/
37
+ .installed.cfg
38
+ *.egg
39
+ MANIFEST
40
+
41
+ # PyInstaller
42
+ # Usually these files are written by a python script from a template
43
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
44
+ *.manifest
45
+ *.spec
46
+
47
+ # Installer logs
48
+ pip-log.txt
49
+ pip-delete-this-directory.txt
50
+
51
+ # Unit test / coverage reports
52
+ htmlcov/
53
+ .tox/
54
+ .nox/
55
+ .coverage
56
+ .coverage.*
57
+ .cache
58
+ nosetests.xml
59
+ coverage.xml
60
+ *.cover
61
+ *.py,cover
62
+ .hypothesis/
63
+ .pytest_cache/
64
+ cover/
65
+
66
+ # Translations
67
+ *.mo
68
+ *.pot
69
+
70
+ # Django stuff:
71
+ *.log
72
+ local_settings.py
73
+ db.sqlite3
74
+ db.sqlite3-journal
75
+
76
+ # Flask stuff:
77
+ instance/
78
+ .webassets-cache
79
+
80
+ # Scrapy stuff:
81
+ .scrapy
82
+
83
+ # Sphinx documentation
84
+ docs/_build/
85
+
86
+ # PyBuilder
87
+ .pybuilder/
88
+ target/
89
+
90
+ # Jupyter Notebook
91
+ .ipynb_checkpoints
92
+
93
+ # IPython
94
+ profile_default/
95
+ ipython_config.py
96
+
97
+ # pyenv
98
+ # For a library or package, you might want to ignore these files since the code is
99
+ # intended to run in multiple environments; otherwise, check them in:
100
+ # .python-version
101
+
102
+ # pipenv
103
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
104
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
105
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
106
+ # install all needed dependencies.
107
+ #Pipfile.lock
108
+
109
+ # UV
110
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
111
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
112
+ # commonly ignored for libraries.
113
+ #uv.lock
114
+
115
+ # poetry
116
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
117
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
118
+ # commonly ignored for libraries.
119
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
120
+ #poetry.lock
121
+
122
+ # pdm
123
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
124
+ #pdm.lock
125
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
126
+ # in version control.
127
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
128
+ .pdm.toml
129
+ .pdm-python
130
+ .pdm-build/
131
+
132
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
133
+ __pypackages__/
134
+
135
+ # Celery stuff
136
+ celerybeat-schedule
137
+ celerybeat.pid
138
+
139
+ # SageMath parsed files
140
+ *.sage.py
141
+
142
+ # Environments
143
+ .env
144
+ .venv
145
+ env/
146
+ venv/
147
+ ENV/
148
+ env.bak/
149
+ venv.bak/
150
+
151
+ # Spyder project settings
152
+ .spyderproject
153
+ .spyproject
154
+
155
+ # Rope project settings
156
+ .ropeproject
157
+
158
+ # mkdocs documentation
159
+ /site
160
+
161
+ # mypy
162
+ .mypy_cache/
163
+ .dmypy.json
164
+ dmypy.json
165
+
166
+ # Pyre type checker
167
+ .pyre/
168
+
169
+ # pytype static type analyzer
170
+ .pytype/
171
+
172
+ # Cython debug symbols
173
+ cython_debug/
174
+
175
+ # PyCharm
176
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
177
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
178
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
179
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
180
+ #.idea/
181
+
182
+ # PyPI configuration file
183
+ .pypirc
@@ -0,0 +1,43 @@
1
+ import anthropic
2
+
3
+ client = anthropic.Anthropic(
4
+ # defaults to os.environ.get("ANTHROPIC_API_KEY")
5
+ api_key="my_api_key",
6
+ )
7
+
8
+ # Replace placeholders like {{PR_DESCRIPTION}} with real values,
9
+ # because the SDK does not support variables.
10
+ message = client.messages.create(
11
+ model="claude-3-5-sonnet-20241022",
12
+ max_tokens=8192,
13
+ temperature=0,
14
+ system="You are an experienced software engineer tasked with reviewing a GitHub Pull Request (PR). Your goal is to analyze the code quality and suggest improvements. Follow these steps carefully:\n\n1. Review the PR description:\n<PR_DESCRIPTION>\n{{PR_DESCRIPTION}}\n</PR_DESCRIPTION>\n\n2. Examine the code changes:\n<CODE_CHANGES>\n{{CODE_CHANGES}}\n</CODE_CHANGES>\n\n3. Consider any existing comments:\n<EXISTING_COMMENTS>\n{{EXISTING_COMMENTS}}\n</EXISTING_COMMENTS>\n\n4. Analyze the code quality:\n a. Check for adherence to coding standards and best practices\n b. Evaluate code readability and maintainability\n c. Assess performance implications\n d. Look for potential bugs or edge cases\n e. Consider security implications\n\n5. Suggest improvements:\n a. Identify areas where the code can be optimized or simplified\n b. Propose alternative approaches if applicable\n c. Recommend additional tests or error handling if needed\n\n6. Format your response as follows:\n <code_review>\n <quality_analysis>\n Provide a detailed analysis of the code quality, addressing points 4a-4e.\n </quality_analysis>\n\n <improvement_suggestions>\n List your suggestions for improvement, addressing points 5a-5c. Number each suggestion.\n </improvement_suggestions>\n\n <summary>\n Provide a brief summary of your overall assessment and key recommendations.\n </summary>\n </code_review>\n\nRemember to be constructive and specific in your feedback. Use code snippets or pseudocode to illustrate your suggestions when appropriate. If you need clarification on any part of the code or PR description, state your assumptions clearly.\n\nDo not comment on aspects unrelated to code quality or potential improvements. Focus solely on the technical aspects of the code changes presented.",
15
+ messages=[
16
+ {
17
+ "role": "user",
18
+ "content": [
19
+ {
20
+ "type": "text",
21
+ "text": "\nAnalyze the above examples and give me some updates. Analyze this image as well.\n\nOne more image is this. Can you image this as well.",
22
+ },
23
+ {
24
+ "type": "image",
25
+ "source": {
26
+ "type": "base64",
27
+ "media_type": "image/jpeg",
28
+ "data": "<base64_encoded_image>",
29
+ },
30
+ },
31
+ {
32
+ "type": "image",
33
+ "source": {
34
+ "type": "base64",
35
+ "media_type": "image/jpeg",
36
+ "data": "<base64_encoded_image>",
37
+ },
38
+ },
39
+ ],
40
+ }
41
+ ],
42
+ )
43
+ print(message.content)
@@ -0,0 +1,164 @@
1
+ Metadata-Version: 2.2
2
+ Name: airtrain
3
+ Version: 0.1.6
4
+ Summary: A platform for building and deploying AI agents with structured skills
5
+ Home-page: https://github.com/rosaboyle/airtrain.dev
6
+ Author: Dheeraj Pai
7
+ Author-email: helloworldcmu@gmail.com
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.8
14
+ Classifier: Programming Language :: Python :: 3.9
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Requires-Python: >=3.8
17
+ Description-Content-Type: text/markdown
18
+ Requires-Dist: pydantic>=2.0.0
19
+ Requires-Dist: openai>=1.0.0
20
+ Requires-Dist: python-dotenv>=0.19.0
21
+ Requires-Dist: PyYAML>=5.4.1
22
+ Requires-Dist: firebase-admin>=5.0.0
23
+ Requires-Dist: loguru>=0.5.3
24
+ Dynamic: author
25
+ Dynamic: author-email
26
+ Dynamic: classifier
27
+ Dynamic: description
28
+ Dynamic: description-content-type
29
+ Dynamic: home-page
30
+ Dynamic: requires-dist
31
+ Dynamic: requires-python
32
+ Dynamic: summary
33
+
34
+ # Airtrain
35
+
36
+ A powerful platform for building and deploying AI agents with structured skills and capabilities.
37
+
38
+ ## Features
39
+
40
+ - **Structured Skills**: Build modular AI skills with defined input/output schemas
41
+ - **Multiple LLM Integrations**: Built-in support for OpenAI and Anthropic models
42
+ - **Structured Outputs**: Parse LLM responses into structured Pydantic models
43
+ - **Credential Management**: Secure handling of API keys and credentials
44
+ - **Type Safety**: Full type hints and Pydantic model support
45
+ - **Image Support**: Handle image inputs for multimodal models
46
+ - **Error Handling**: Robust error handling and logging
47
+
48
+ ## Installation
49
+
50
+ ```bash
51
+ pip install airtrain
52
+ ```
53
+
54
+ ## Quick Start
55
+
56
+ ### 1. Basic OpenAI Chat
57
+
58
+ ```python
59
+ from airtrain.integrations.openai.skills import OpenAIChatSkill, OpenAIInput
60
+
61
+ # Initialize the skill
62
+ skill = OpenAIChatSkill()
63
+
64
+ # Create input
65
+ input_data = OpenAIInput(
66
+ user_input="Explain quantum computing in simple terms.",
67
+ system_prompt="You are a helpful teacher.",
68
+ max_tokens=500,
69
+ temperature=0.7
70
+ )
71
+
72
+ # Get response
73
+ result = skill.process(input_data)
74
+ print(result.response)
75
+ print(f"Tokens Used: {result.usage['total_tokens']}")
76
+ ```
77
+
78
+ ### 2. Anthropic Claude Integration
79
+
80
+ ```python
81
+ from airtrain.integrations.anthropic.skills import AnthropicChatSkill, AnthropicInput
82
+
83
+ # Initialize the skill
84
+ skill = AnthropicChatSkill()
85
+
86
+ # Create input
87
+ input_data = AnthropicInput(
88
+ user_input="Explain the theory of relativity.",
89
+ system_prompt="You are a physics expert.",
90
+ model="claude-3-opus-20240229",
91
+ temperature=0.3
92
+ )
93
+
94
+ # Get response
95
+ result = skill.process(input_data)
96
+ print(result.response)
97
+ print(f"Usage: {result.usage}")
98
+ ```
99
+
100
+ ### 3. Structured Output with OpenAI
101
+
102
+ ```python
103
+ from pydantic import BaseModel
104
+ from typing import List
105
+ from airtrain.integrations.openai.skills import OpenAIParserSkill, OpenAIParserInput
106
+
107
+ # Define your response model
108
+ class PersonInfo(BaseModel):
109
+ name: str
110
+ age: int
111
+ occupation: str
112
+ skills: List[str]
113
+
114
+ # Initialize the parser skill
115
+ parser_skill = OpenAIParserSkill()
116
+
117
+ # Create input with response model
118
+ input_data = OpenAIParserInput(
119
+ user_input="Tell me about John Doe, a 30-year-old software engineer who specializes in Python and AI",
120
+ system_prompt="Extract structured information about the person.",
121
+ response_model=PersonInfo
122
+ )
123
+
124
+ # Get structured response
125
+ result = parser_skill.process(input_data)
126
+ person_info = result.parsed_response
127
+ print(f"Name: {person_info.name}")
128
+ print(f"Skills: {', '.join(person_info.skills)}")
129
+ ```
130
+
131
+ ## Error Handling
132
+
133
+ All skills include built-in error handling:
134
+
135
+ ```python
136
+ from airtrain.core.skills import ProcessingError
137
+
138
+ try:
139
+ result = skill.process(input_data)
140
+ except ProcessingError as e:
141
+ print(f"Processing failed: {e}")
142
+ ```
143
+
144
+ ## Advanced Features
145
+
146
+ - Image Analysis Support
147
+ - Function Calling
148
+ - Custom Validators
149
+ - Async Processing
150
+ - Token Usage Tracking
151
+
152
+ For more examples and detailed documentation, visit our [documentation](https://airtrain.readthedocs.io/).
153
+
154
+ ## Documentation
155
+
156
+ For detailed documentation, visit [our documentation site](https://docs.airtrain.dev/).
157
+
158
+ ## Contributing
159
+
160
+ Contributions are welcome! Please feel free to submit a Pull Request.
161
+
162
+ ## License
163
+
164
+ This project is licensed under the MIT License - see the LICENSE file for details.
@@ -0,0 +1,131 @@
1
+ # Airtrain
2
+
3
+ A powerful platform for building and deploying AI agents with structured skills and capabilities.
4
+
5
+ ## Features
6
+
7
+ - **Structured Skills**: Build modular AI skills with defined input/output schemas
8
+ - **Multiple LLM Integrations**: Built-in support for OpenAI and Anthropic models
9
+ - **Structured Outputs**: Parse LLM responses into structured Pydantic models
10
+ - **Credential Management**: Secure handling of API keys and credentials
11
+ - **Type Safety**: Full type hints and Pydantic model support
12
+ - **Image Support**: Handle image inputs for multimodal models
13
+ - **Error Handling**: Robust error handling and logging
14
+
15
+ ## Installation
16
+
17
+ ```bash
18
+ pip install airtrain
19
+ ```
20
+
21
+ ## Quick Start
22
+
23
+ ### 1. Basic OpenAI Chat
24
+
25
+ ```python
26
+ from airtrain.integrations.openai.skills import OpenAIChatSkill, OpenAIInput
27
+
28
+ # Initialize the skill
29
+ skill = OpenAIChatSkill()
30
+
31
+ # Create input
32
+ input_data = OpenAIInput(
33
+ user_input="Explain quantum computing in simple terms.",
34
+ system_prompt="You are a helpful teacher.",
35
+ max_tokens=500,
36
+ temperature=0.7
37
+ )
38
+
39
+ # Get response
40
+ result = skill.process(input_data)
41
+ print(result.response)
42
+ print(f"Tokens Used: {result.usage['total_tokens']}")
43
+ ```
44
+
45
+ ### 2. Anthropic Claude Integration
46
+
47
+ ```python
48
+ from airtrain.integrations.anthropic.skills import AnthropicChatSkill, AnthropicInput
49
+
50
+ # Initialize the skill
51
+ skill = AnthropicChatSkill()
52
+
53
+ # Create input
54
+ input_data = AnthropicInput(
55
+ user_input="Explain the theory of relativity.",
56
+ system_prompt="You are a physics expert.",
57
+ model="claude-3-opus-20240229",
58
+ temperature=0.3
59
+ )
60
+
61
+ # Get response
62
+ result = skill.process(input_data)
63
+ print(result.response)
64
+ print(f"Usage: {result.usage}")
65
+ ```
66
+
67
+ ### 3. Structured Output with OpenAI
68
+
69
+ ```python
70
+ from pydantic import BaseModel
71
+ from typing import List
72
+ from airtrain.integrations.openai.skills import OpenAIParserSkill, OpenAIParserInput
73
+
74
+ # Define your response model
75
+ class PersonInfo(BaseModel):
76
+ name: str
77
+ age: int
78
+ occupation: str
79
+ skills: List[str]
80
+
81
+ # Initialize the parser skill
82
+ parser_skill = OpenAIParserSkill()
83
+
84
+ # Create input with response model
85
+ input_data = OpenAIParserInput(
86
+ user_input="Tell me about John Doe, a 30-year-old software engineer who specializes in Python and AI",
87
+ system_prompt="Extract structured information about the person.",
88
+ response_model=PersonInfo
89
+ )
90
+
91
+ # Get structured response
92
+ result = parser_skill.process(input_data)
93
+ person_info = result.parsed_response
94
+ print(f"Name: {person_info.name}")
95
+ print(f"Skills: {', '.join(person_info.skills)}")
96
+ ```
97
+
98
+ ## Error Handling
99
+
100
+ All skills include built-in error handling:
101
+
102
+ ```python
103
+ from airtrain.core.skills import ProcessingError
104
+
105
+ try:
106
+ result = skill.process(input_data)
107
+ except ProcessingError as e:
108
+ print(f"Processing failed: {e}")
109
+ ```
110
+
111
+ ## Advanced Features
112
+
113
+ - Image Analysis Support
114
+ - Function Calling
115
+ - Custom Validators
116
+ - Async Processing
117
+ - Token Usage Tracking
118
+
119
+ For more examples and detailed documentation, visit our [documentation](https://airtrain.readthedocs.io/).
120
+
121
+ ## Documentation
122
+
123
+ For detailed documentation, visit [our documentation site](https://docs.airtrain.dev/).
124
+
125
+ ## Contributing
126
+
127
+ Contributions are welcome! Please feel free to submit a Pull Request.
128
+
129
+ ## License
130
+
131
+ This project is licensed under the MIT License - see the LICENSE file for details.
@@ -1,6 +1,6 @@
1
1
  """Airtrain - A platform for building and deploying AI agents with structured skills"""
2
2
 
3
- __version__ = "0.1.3"
3
+ __version__ = "0.1.6"
4
4
 
5
5
  from .core.skills import Skill
6
6
  from .core.schemas import InputSchema, OutputSchema
@@ -5,7 +5,7 @@ from pathlib import Path
5
5
  from abc import ABC, abstractmethod
6
6
  import dotenv
7
7
  from pydantic import BaseModel, Field, SecretStr
8
- import yaml # type: ignore
8
+ import yaml
9
9
 
10
10
 
11
11
  class CredentialError(Exception):
@@ -100,7 +100,7 @@ class BaseCredentials(BaseModel):
100
100
  else:
101
101
  raise ValueError(f"Unsupported file format: {file_path.suffix}")
102
102
 
103
- def validate_credentials(self) -> None:
103
+ async def validate_credentials(self) -> bool:
104
104
  """Validate that all required credentials are present"""
105
105
  missing = []
106
106
  for field_name in self._required_credentials:
@@ -114,6 +114,7 @@ class BaseCredentials(BaseModel):
114
114
  raise CredentialValidationError(
115
115
  f"Missing required credentials: {', '.join(missing)}"
116
116
  )
117
+ return True
117
118
 
118
119
  def clear_from_env(self) -> None:
119
120
  """Remove credentials from environment variables"""
@@ -122,32 +123,3 @@ class BaseCredentials(BaseModel):
122
123
  if env_key in os.environ:
123
124
  del os.environ[env_key]
124
125
  self._loaded = False
125
-
126
-
127
- class OpenAICredentials(BaseCredentials):
128
- """OpenAI API credentials"""
129
-
130
- api_key: SecretStr = Field(..., description="OpenAI API key")
131
- organization_id: Optional[str] = Field(None, description="OpenAI organization ID")
132
-
133
- _required_credentials = {"api_key"}
134
-
135
-
136
- class AWSCredentials(BaseCredentials):
137
- """AWS credentials"""
138
-
139
- aws_access_key_id: SecretStr
140
- aws_secret_access_key: SecretStr
141
- aws_region: str = "us-east-1"
142
- aws_session_token: Optional[SecretStr] = None
143
-
144
- _required_credentials = {"aws_access_key_id", "aws_secret_access_key"}
145
-
146
-
147
- class GoogleCloudCredentials(BaseCredentials):
148
- """Google Cloud credentials"""
149
-
150
- project_id: str
151
- service_account_key: SecretStr
152
-
153
- _required_credentials = {"project_id", "service_account_key"}
@@ -0,0 +1,26 @@
1
+ """Airtrain integrations package"""
2
+
3
+ from .openai.credentials import OpenAICredentials
4
+ from .aws.credentials import AWSCredentials
5
+ from .google.credentials import GoogleCloudCredentials
6
+ from .anthropic.credentials import AnthropicCredentials
7
+ from .groq.credentials import GroqCredentials
8
+ from .together.credentials import TogetherAICredentials
9
+ from .ollama.credentials import OllamaCredentials
10
+ from .sambanova.credentials import SambanovaCredentials
11
+ from .cerebras.credentials import CerebrasCredentials
12
+
13
+ from .anthropic.skills import AnthropicChatSkill
14
+
15
+ __all__ = [
16
+ "OpenAICredentials",
17
+ "AWSCredentials",
18
+ "GoogleCloudCredentials",
19
+ "AnthropicCredentials",
20
+ "AnthropicChatSkill",
21
+ "GroqCredentials",
22
+ "TogetherAICredentials",
23
+ "OllamaCredentials",
24
+ "SambanovaCredentials",
25
+ "CerebrasCredentials",
26
+ ]
@@ -0,0 +1,32 @@
1
+ from pydantic import Field, SecretStr, validator
2
+ from airtrain.core.credentials import BaseCredentials, CredentialValidationError
3
+ from anthropic import Anthropic
4
+
5
+
6
+ class AnthropicCredentials(BaseCredentials):
7
+ """Anthropic API credentials"""
8
+
9
+ anthropic_api_key: SecretStr = Field(..., description="Anthropic API key")
10
+ version: str = Field(default="2023-06-01", description="API Version")
11
+
12
+ _required_credentials = {"anthropic_api_key"}
13
+
14
+ @validator("anthropic_api_key")
15
+ def validate_api_key_format(cls, v: SecretStr) -> SecretStr:
16
+ key = v.get_secret_value()
17
+ if not key.startswith("sk-ant-"):
18
+ raise ValueError("Anthropic API key must start with 'sk-ant-'")
19
+ return v
20
+
21
+ async def validate_credentials(self) -> bool:
22
+ """Validate Anthropic credentials"""
23
+ try:
24
+ client = Anthropic(api_key=self.anthropic_api_key.get_secret_value())
25
+ client.messages.create(
26
+ model="claude-3-opus-20240229",
27
+ max_tokens=1,
28
+ messages=[{"role": "user", "content": "Hi"}],
29
+ )
30
+ return True
31
+ except Exception as e:
32
+ raise CredentialValidationError(f"Invalid Anthropic credentials: {str(e)}")