airtrain 0.1.6__tar.gz → 0.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {airtrain-0.1.6/airtrain.egg-info → airtrain-0.1.8}/PKG-INFO +1 -1
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/__init__.py +1 -1
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/__init__.py +22 -1
- airtrain-0.1.8/airtrain/integrations/anthropic/__init__.py +11 -0
- airtrain-0.1.8/airtrain/integrations/aws/__init__.py +6 -0
- airtrain-0.1.8/airtrain/integrations/aws/skills.py +98 -0
- airtrain-0.1.8/airtrain/integrations/cerebras/__init__.py +6 -0
- airtrain-0.1.8/airtrain/integrations/cerebras/skills.py +41 -0
- airtrain-0.1.8/airtrain/integrations/google/__init__.py +6 -0
- airtrain-0.1.8/airtrain/integrations/google/skills.py +41 -0
- airtrain-0.1.8/airtrain/integrations/groq/__init__.py +6 -0
- airtrain-0.1.8/airtrain/integrations/groq/skills.py +41 -0
- airtrain-0.1.8/airtrain/integrations/ollama/__init__.py +6 -0
- airtrain-0.1.8/airtrain/integrations/ollama/skills.py +41 -0
- airtrain-0.1.8/airtrain/integrations/openai/__init__.py +19 -0
- airtrain-0.1.8/airtrain/integrations/sambanova/__init__.py +6 -0
- airtrain-0.1.8/airtrain/integrations/sambanova/skills.py +41 -0
- airtrain-0.1.8/airtrain/integrations/together/__init__.py +6 -0
- airtrain-0.1.8/airtrain/integrations/together/skills.py +43 -0
- {airtrain-0.1.6 → airtrain-0.1.8/airtrain.egg-info}/PKG-INFO +1 -1
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain.egg-info/SOURCES.txt +16 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/chinese_anthropic_assistant.py +12 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/.flake8 +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/.github/workflows/publish.yml +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/.gitignore +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/.mypy.ini +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/.pre-commit-config.yaml +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/.vscode/extensions.json +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/.vscode/launch.json +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/.vscode/settings.json +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/EXPERIMENTS/integrations_examples/anthropic_with_image.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/EXPERIMENTS/schema_exps/pydantic_schemas.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/README.md +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/core/__init__.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/core/__pycache__/credentials.cpython-310.pyc +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/core/__pycache__/schemas.cpython-310.pyc +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/core/__pycache__/skills.cpython-310.pyc +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/core/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/core/schemas.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/core/skills.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/anthropic/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/anthropic/skills.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/aws/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/cerebras/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/google/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/groq/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/ollama/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/openai/chinese_assistant.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/openai/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/openai/skills.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/sambanova/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain/integrations/together/credentials.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain.egg-info/dependency_links.txt +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain.egg-info/requires.txt +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/airtrain.egg-info/top_level.txt +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/anthropic_skills_usage.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/chinese_anthropic_usage.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/chinese_assistant_usage.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/icon128.png +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/icon16.png +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/image1.jpg +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/image2.jpg +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/openai_skills.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/openai_skills_usage.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/creating-skills/openai_structured_skills.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/credentials_usage.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/images/quantum-circuit.png +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/schema_usage.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/examples/skill_usage.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/pyproject.toml +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/scripts/build.sh +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/scripts/bump_version.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/scripts/publish.sh +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/scripts/release.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/services/firebase_service.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/services/openai_service.py +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/setup.cfg +0 -0
- {airtrain-0.1.6 → airtrain-0.1.8}/setup.py +0 -0
@@ -1,5 +1,6 @@
|
|
1
1
|
"""Airtrain integrations package"""
|
2
2
|
|
3
|
+
# Credentials imports
|
3
4
|
from .openai.credentials import OpenAICredentials
|
4
5
|
from .aws.credentials import AWSCredentials
|
5
6
|
from .google.credentials import GoogleCloudCredentials
|
@@ -10,17 +11,37 @@ from .ollama.credentials import OllamaCredentials
|
|
10
11
|
from .sambanova.credentials import SambanovaCredentials
|
11
12
|
from .cerebras.credentials import CerebrasCredentials
|
12
13
|
|
14
|
+
# Skills imports
|
15
|
+
from .openai.skills import OpenAIChatSkill, OpenAIParserSkill
|
13
16
|
from .anthropic.skills import AnthropicChatSkill
|
17
|
+
from .aws.skills import AWSBedrockSkill
|
18
|
+
from .google.skills import VertexAISkill
|
19
|
+
from .groq.skills import GroqChatSkill
|
20
|
+
from .together.skills import TogetherAIChatSkill
|
21
|
+
from .ollama.skills import OllamaChatSkill
|
22
|
+
from .sambanova.skills import SambanovaChatSkill
|
23
|
+
from .cerebras.skills import CerebrasChatSkill
|
14
24
|
|
15
25
|
__all__ = [
|
26
|
+
# Credentials
|
16
27
|
"OpenAICredentials",
|
17
28
|
"AWSCredentials",
|
18
29
|
"GoogleCloudCredentials",
|
19
30
|
"AnthropicCredentials",
|
20
|
-
"AnthropicChatSkill",
|
21
31
|
"GroqCredentials",
|
22
32
|
"TogetherAICredentials",
|
23
33
|
"OllamaCredentials",
|
24
34
|
"SambanovaCredentials",
|
25
35
|
"CerebrasCredentials",
|
36
|
+
# Skills
|
37
|
+
"OpenAIChatSkill",
|
38
|
+
"OpenAIParserSkill",
|
39
|
+
"AnthropicChatSkill",
|
40
|
+
"AWSBedrockSkill",
|
41
|
+
"VertexAISkill",
|
42
|
+
"GroqChatSkill",
|
43
|
+
"TogetherAIChatSkill",
|
44
|
+
"OllamaChatSkill",
|
45
|
+
"SambanovaChatSkill",
|
46
|
+
"CerebrasChatSkill",
|
26
47
|
]
|
@@ -0,0 +1,11 @@
|
|
1
|
+
"""Anthropic integration for Airtrain"""
|
2
|
+
|
3
|
+
from .credentials import AnthropicCredentials
|
4
|
+
from .skills import AnthropicChatSkill, AnthropicInput, AnthropicOutput
|
5
|
+
|
6
|
+
__all__ = [
|
7
|
+
"AnthropicCredentials",
|
8
|
+
"AnthropicChatSkill",
|
9
|
+
"AnthropicInput",
|
10
|
+
"AnthropicOutput",
|
11
|
+
]
|
@@ -0,0 +1,98 @@
|
|
1
|
+
from typing import List, Optional, Dict, Any
|
2
|
+
from pydantic import Field
|
3
|
+
import boto3
|
4
|
+
from pathlib import Path
|
5
|
+
from loguru import logger
|
6
|
+
|
7
|
+
from airtrain.core.skills import Skill, ProcessingError
|
8
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
9
|
+
from .credentials import AWSCredentials
|
10
|
+
|
11
|
+
|
12
|
+
class AWSBedrockInput(InputSchema):
|
13
|
+
"""Schema for AWS Bedrock chat input"""
|
14
|
+
|
15
|
+
user_input: str = Field(..., description="User's input text")
|
16
|
+
system_prompt: str = Field(
|
17
|
+
default="You are a helpful assistant.",
|
18
|
+
description="System prompt to guide the model's behavior",
|
19
|
+
)
|
20
|
+
model: str = Field(
|
21
|
+
default="anthropic.claude-3-sonnet-20240229-v1:0",
|
22
|
+
description="AWS Bedrock model to use",
|
23
|
+
)
|
24
|
+
max_tokens: int = Field(default=1024, description="Maximum tokens in response")
|
25
|
+
temperature: float = Field(
|
26
|
+
default=0.7, description="Temperature for response generation", ge=0, le=1
|
27
|
+
)
|
28
|
+
images: Optional[List[Path]] = Field(
|
29
|
+
default=None,
|
30
|
+
description="Optional list of image paths to include in the message",
|
31
|
+
)
|
32
|
+
|
33
|
+
|
34
|
+
class AWSBedrockOutput(OutputSchema):
|
35
|
+
"""Schema for AWS Bedrock chat output"""
|
36
|
+
|
37
|
+
response: str = Field(..., description="Model's response text")
|
38
|
+
used_model: str = Field(..., description="Model used for generation")
|
39
|
+
usage: Dict[str, Any] = Field(
|
40
|
+
default_factory=dict, description="Usage statistics from the API"
|
41
|
+
)
|
42
|
+
|
43
|
+
|
44
|
+
class AWSBedrockSkill(Skill[AWSBedrockInput, AWSBedrockOutput]):
|
45
|
+
"""Skill for interacting with AWS Bedrock models"""
|
46
|
+
|
47
|
+
input_schema = AWSBedrockInput
|
48
|
+
output_schema = AWSBedrockOutput
|
49
|
+
|
50
|
+
def __init__(self, credentials: Optional[AWSCredentials] = None):
|
51
|
+
"""Initialize the skill with optional credentials"""
|
52
|
+
super().__init__()
|
53
|
+
self.credentials = credentials or AWSCredentials.from_env()
|
54
|
+
self.client = boto3.client(
|
55
|
+
"bedrock-runtime",
|
56
|
+
aws_access_key_id=self.credentials.aws_access_key_id.get_secret_value(),
|
57
|
+
aws_secret_access_key=self.credentials.aws_secret_access_key.get_secret_value(),
|
58
|
+
region_name=self.credentials.aws_region,
|
59
|
+
)
|
60
|
+
|
61
|
+
def process(self, input_data: AWSBedrockInput) -> AWSBedrockOutput:
|
62
|
+
"""Process the input using AWS Bedrock API"""
|
63
|
+
try:
|
64
|
+
logger.info(f"Processing request with model {input_data.model}")
|
65
|
+
|
66
|
+
# Prepare request body based on model provider
|
67
|
+
if "anthropic" in input_data.model:
|
68
|
+
request_body = {
|
69
|
+
"anthropic_version": "bedrock-2023-05-31",
|
70
|
+
"max_tokens": input_data.max_tokens,
|
71
|
+
"temperature": input_data.temperature,
|
72
|
+
"system": input_data.system_prompt,
|
73
|
+
"messages": [{"role": "user", "content": input_data.user_input}],
|
74
|
+
}
|
75
|
+
else:
|
76
|
+
raise ProcessingError(f"Unsupported model: {input_data.model}")
|
77
|
+
|
78
|
+
response = self.client.invoke_model(
|
79
|
+
modelId=input_data.model, body=request_body
|
80
|
+
)
|
81
|
+
|
82
|
+
# Parse response based on model provider
|
83
|
+
if "anthropic" in input_data.model:
|
84
|
+
response_data = response["body"]["completion"]
|
85
|
+
usage = {
|
86
|
+
"input_tokens": response["body"]["usage"]["input_tokens"],
|
87
|
+
"output_tokens": response["body"]["usage"]["output_tokens"],
|
88
|
+
}
|
89
|
+
else:
|
90
|
+
raise ProcessingError(f"Unsupported model response: {input_data.model}")
|
91
|
+
|
92
|
+
return AWSBedrockOutput(
|
93
|
+
response=response_data, used_model=input_data.model, usage=usage
|
94
|
+
)
|
95
|
+
|
96
|
+
except Exception as e:
|
97
|
+
logger.exception(f"AWS Bedrock processing failed: {str(e)}")
|
98
|
+
raise ProcessingError(f"AWS Bedrock processing failed: {str(e)}")
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from typing import Optional, Dict, Any
|
2
|
+
from pydantic import Field
|
3
|
+
from airtrain.core.skills import Skill, ProcessingError
|
4
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
5
|
+
from .credentials import CerebrasCredentials
|
6
|
+
|
7
|
+
|
8
|
+
class CerebrasInput(InputSchema):
|
9
|
+
"""Schema for Cerebras input"""
|
10
|
+
|
11
|
+
user_input: str = Field(..., description="User's input text")
|
12
|
+
system_prompt: str = Field(
|
13
|
+
default="You are a helpful assistant.",
|
14
|
+
description="System prompt to guide the model's behavior",
|
15
|
+
)
|
16
|
+
model: str = Field(default="cerebras-gpt", description="Cerebras model to use")
|
17
|
+
max_tokens: int = Field(default=1024, description="Maximum tokens in response")
|
18
|
+
temperature: float = Field(
|
19
|
+
default=0.7, description="Temperature for response generation", ge=0, le=1
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
class CerebrasOutput(OutputSchema):
|
24
|
+
"""Schema for Cerebras output"""
|
25
|
+
|
26
|
+
response: str = Field(..., description="Model's response text")
|
27
|
+
used_model: str = Field(..., description="Model used for generation")
|
28
|
+
usage: Dict[str, Any] = Field(default_factory=dict, description="Usage statistics")
|
29
|
+
|
30
|
+
|
31
|
+
class CerebrasChatSkill(Skill[CerebrasInput, CerebrasOutput]):
|
32
|
+
"""Skill for Cerebras - Not Implemented"""
|
33
|
+
|
34
|
+
input_schema = CerebrasInput
|
35
|
+
output_schema = CerebrasOutput
|
36
|
+
|
37
|
+
def __init__(self, credentials: Optional[CerebrasCredentials] = None):
|
38
|
+
raise NotImplementedError("CerebrasChatSkill is not implemented yet")
|
39
|
+
|
40
|
+
def process(self, input_data: CerebrasInput) -> CerebrasOutput:
|
41
|
+
raise NotImplementedError("CerebrasChatSkill is not implemented yet")
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from typing import Optional, Dict, Any
|
2
|
+
from pydantic import Field
|
3
|
+
from airtrain.core.skills import Skill, ProcessingError
|
4
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
5
|
+
from .credentials import GoogleCloudCredentials
|
6
|
+
|
7
|
+
|
8
|
+
class VertexAIInput(InputSchema):
|
9
|
+
"""Schema for Google Vertex AI input"""
|
10
|
+
|
11
|
+
user_input: str = Field(..., description="User's input text")
|
12
|
+
system_prompt: str = Field(
|
13
|
+
default="You are a helpful assistant.",
|
14
|
+
description="System prompt to guide the model's behavior",
|
15
|
+
)
|
16
|
+
model: str = Field(default="text-bison", description="Vertex AI model to use")
|
17
|
+
max_tokens: int = Field(default=1024, description="Maximum tokens in response")
|
18
|
+
temperature: float = Field(
|
19
|
+
default=0.7, description="Temperature for response generation", ge=0, le=1
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
class VertexAIOutput(OutputSchema):
|
24
|
+
"""Schema for Vertex AI output"""
|
25
|
+
|
26
|
+
response: str = Field(..., description="Model's response text")
|
27
|
+
used_model: str = Field(..., description="Model used for generation")
|
28
|
+
usage: Dict[str, Any] = Field(default_factory=dict, description="Usage statistics")
|
29
|
+
|
30
|
+
|
31
|
+
class VertexAISkill(Skill[VertexAIInput, VertexAIOutput]):
|
32
|
+
"""Skill for Google Vertex AI - Not Implemented"""
|
33
|
+
|
34
|
+
input_schema = VertexAIInput
|
35
|
+
output_schema = VertexAIOutput
|
36
|
+
|
37
|
+
def __init__(self, credentials: Optional[GoogleCloudCredentials] = None):
|
38
|
+
raise NotImplementedError("VertexAISkill is not implemented yet")
|
39
|
+
|
40
|
+
def process(self, input_data: VertexAIInput) -> VertexAIOutput:
|
41
|
+
raise NotImplementedError("VertexAISkill is not implemented yet")
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from typing import Optional, Dict, Any
|
2
|
+
from pydantic import Field
|
3
|
+
from airtrain.core.skills import Skill, ProcessingError
|
4
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
5
|
+
from .credentials import GroqCredentials
|
6
|
+
|
7
|
+
|
8
|
+
class GroqInput(InputSchema):
|
9
|
+
"""Schema for Groq input"""
|
10
|
+
|
11
|
+
user_input: str = Field(..., description="User's input text")
|
12
|
+
system_prompt: str = Field(
|
13
|
+
default="You are a helpful assistant.",
|
14
|
+
description="System prompt to guide the model's behavior",
|
15
|
+
)
|
16
|
+
model: str = Field(default="mixtral-8x7b", description="Groq model to use")
|
17
|
+
max_tokens: int = Field(default=1024, description="Maximum tokens in response")
|
18
|
+
temperature: float = Field(
|
19
|
+
default=0.7, description="Temperature for response generation", ge=0, le=1
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
class GroqOutput(OutputSchema):
|
24
|
+
"""Schema for Groq output"""
|
25
|
+
|
26
|
+
response: str = Field(..., description="Model's response text")
|
27
|
+
used_model: str = Field(..., description="Model used for generation")
|
28
|
+
usage: Dict[str, Any] = Field(default_factory=dict, description="Usage statistics")
|
29
|
+
|
30
|
+
|
31
|
+
class GroqChatSkill(Skill[GroqInput, GroqOutput]):
|
32
|
+
"""Skill for Groq - Not Implemented"""
|
33
|
+
|
34
|
+
input_schema = GroqInput
|
35
|
+
output_schema = GroqOutput
|
36
|
+
|
37
|
+
def __init__(self, credentials: Optional[GroqCredentials] = None):
|
38
|
+
raise NotImplementedError("GroqChatSkill is not implemented yet")
|
39
|
+
|
40
|
+
def process(self, input_data: GroqInput) -> GroqOutput:
|
41
|
+
raise NotImplementedError("GroqChatSkill is not implemented yet")
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from typing import Optional, Dict, Any
|
2
|
+
from pydantic import Field
|
3
|
+
from airtrain.core.skills import Skill, ProcessingError
|
4
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
5
|
+
from .credentials import OllamaCredentials
|
6
|
+
|
7
|
+
|
8
|
+
class OllamaInput(InputSchema):
|
9
|
+
"""Schema for Ollama input"""
|
10
|
+
|
11
|
+
user_input: str = Field(..., description="User's input text")
|
12
|
+
system_prompt: str = Field(
|
13
|
+
default="You are a helpful assistant.",
|
14
|
+
description="System prompt to guide the model's behavior",
|
15
|
+
)
|
16
|
+
model: str = Field(default="llama2", description="Ollama model to use")
|
17
|
+
max_tokens: int = Field(default=1024, description="Maximum tokens in response")
|
18
|
+
temperature: float = Field(
|
19
|
+
default=0.7, description="Temperature for response generation", ge=0, le=1
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
class OllamaOutput(OutputSchema):
|
24
|
+
"""Schema for Ollama output"""
|
25
|
+
|
26
|
+
response: str = Field(..., description="Model's response text")
|
27
|
+
used_model: str = Field(..., description="Model used for generation")
|
28
|
+
usage: Dict[str, Any] = Field(default_factory=dict, description="Usage statistics")
|
29
|
+
|
30
|
+
|
31
|
+
class OllamaChatSkill(Skill[OllamaInput, OllamaOutput]):
|
32
|
+
"""Skill for Ollama - Not Implemented"""
|
33
|
+
|
34
|
+
input_schema = OllamaInput
|
35
|
+
output_schema = OllamaOutput
|
36
|
+
|
37
|
+
def __init__(self, credentials: Optional[OllamaCredentials] = None):
|
38
|
+
raise NotImplementedError("OllamaChatSkill is not implemented yet")
|
39
|
+
|
40
|
+
def process(self, input_data: OllamaInput) -> OllamaOutput:
|
41
|
+
raise NotImplementedError("OllamaChatSkill is not implemented yet")
|
@@ -0,0 +1,19 @@
|
|
1
|
+
from .skills import (
|
2
|
+
OpenAIChatSkill,
|
3
|
+
OpenAIInput,
|
4
|
+
OpenAIParserSkill,
|
5
|
+
OpenAIOutput,
|
6
|
+
OpenAIParserInput,
|
7
|
+
OpenAIParserOutput,
|
8
|
+
)
|
9
|
+
from .credentials import OpenAICredentials
|
10
|
+
|
11
|
+
__all__ = [
|
12
|
+
"OpenAIChatSkill",
|
13
|
+
"OpenAIInput",
|
14
|
+
"OpenAIParserSkill",
|
15
|
+
"OpenAIParserInput",
|
16
|
+
"OpenAIParserOutput",
|
17
|
+
"OpenAICredentials",
|
18
|
+
"OpenAIOutput",
|
19
|
+
]
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from typing import Optional, Dict, Any
|
2
|
+
from pydantic import Field
|
3
|
+
from airtrain.core.skills import Skill, ProcessingError
|
4
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
5
|
+
from .credentials import SambanovaCredentials
|
6
|
+
|
7
|
+
|
8
|
+
class SambanovaInput(InputSchema):
|
9
|
+
"""Schema for Sambanova input"""
|
10
|
+
|
11
|
+
user_input: str = Field(..., description="User's input text")
|
12
|
+
system_prompt: str = Field(
|
13
|
+
default="You are a helpful assistant.",
|
14
|
+
description="System prompt to guide the model's behavior",
|
15
|
+
)
|
16
|
+
model: str = Field(default="sambanova-llm", description="Sambanova model to use")
|
17
|
+
max_tokens: int = Field(default=1024, description="Maximum tokens in response")
|
18
|
+
temperature: float = Field(
|
19
|
+
default=0.7, description="Temperature for response generation", ge=0, le=1
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
class SambanovaOutput(OutputSchema):
|
24
|
+
"""Schema for Sambanova output"""
|
25
|
+
|
26
|
+
response: str = Field(..., description="Model's response text")
|
27
|
+
used_model: str = Field(..., description="Model used for generation")
|
28
|
+
usage: Dict[str, Any] = Field(default_factory=dict, description="Usage statistics")
|
29
|
+
|
30
|
+
|
31
|
+
class SambanovaChatSkill(Skill[SambanovaInput, SambanovaOutput]):
|
32
|
+
"""Skill for Sambanova - Not Implemented"""
|
33
|
+
|
34
|
+
input_schema = SambanovaInput
|
35
|
+
output_schema = SambanovaOutput
|
36
|
+
|
37
|
+
def __init__(self, credentials: Optional[SambanovaCredentials] = None):
|
38
|
+
raise NotImplementedError("SambanovaChatSkill is not implemented yet")
|
39
|
+
|
40
|
+
def process(self, input_data: SambanovaInput) -> SambanovaOutput:
|
41
|
+
raise NotImplementedError("SambanovaChatSkill is not implemented yet")
|
@@ -0,0 +1,43 @@
|
|
1
|
+
from typing import Optional, Dict, Any
|
2
|
+
from pydantic import Field
|
3
|
+
from airtrain.core.skills import Skill, ProcessingError
|
4
|
+
from airtrain.core.schemas import InputSchema, OutputSchema
|
5
|
+
from .credentials import TogetherAICredentials
|
6
|
+
|
7
|
+
|
8
|
+
class TogetherAIInput(InputSchema):
|
9
|
+
"""Schema for Together AI input"""
|
10
|
+
|
11
|
+
user_input: str = Field(..., description="User's input text")
|
12
|
+
system_prompt: str = Field(
|
13
|
+
default="You are a helpful assistant.",
|
14
|
+
description="System prompt to guide the model's behavior",
|
15
|
+
)
|
16
|
+
model: str = Field(
|
17
|
+
default="togethercomputer/llama-2-70b", description="Together AI model to use"
|
18
|
+
)
|
19
|
+
max_tokens: int = Field(default=1024, description="Maximum tokens in response")
|
20
|
+
temperature: float = Field(
|
21
|
+
default=0.7, description="Temperature for response generation", ge=0, le=1
|
22
|
+
)
|
23
|
+
|
24
|
+
|
25
|
+
class TogetherAIOutput(OutputSchema):
|
26
|
+
"""Schema for Together AI output"""
|
27
|
+
|
28
|
+
response: str = Field(..., description="Model's response text")
|
29
|
+
used_model: str = Field(..., description="Model used for generation")
|
30
|
+
usage: Dict[str, Any] = Field(default_factory=dict, description="Usage statistics")
|
31
|
+
|
32
|
+
|
33
|
+
class TogetherAIChatSkill(Skill[TogetherAIInput, TogetherAIOutput]):
|
34
|
+
"""Skill for Together AI - Not Implemented"""
|
35
|
+
|
36
|
+
input_schema = TogetherAIInput
|
37
|
+
output_schema = TogetherAIOutput
|
38
|
+
|
39
|
+
def __init__(self, credentials: Optional[TogetherAICredentials] = None):
|
40
|
+
raise NotImplementedError("TogetherAIChatSkill is not implemented yet")
|
41
|
+
|
42
|
+
def process(self, input_data: TogetherAIInput) -> TogetherAIOutput:
|
43
|
+
raise NotImplementedError("TogetherAIChatSkill is not implemented yet")
|
@@ -25,18 +25,34 @@ airtrain/core/__pycache__/credentials.cpython-310.pyc
|
|
25
25
|
airtrain/core/__pycache__/schemas.cpython-310.pyc
|
26
26
|
airtrain/core/__pycache__/skills.cpython-310.pyc
|
27
27
|
airtrain/integrations/__init__.py
|
28
|
+
airtrain/integrations/anthropic/__init__.py
|
28
29
|
airtrain/integrations/anthropic/credentials.py
|
29
30
|
airtrain/integrations/anthropic/skills.py
|
31
|
+
airtrain/integrations/aws/__init__.py
|
30
32
|
airtrain/integrations/aws/credentials.py
|
33
|
+
airtrain/integrations/aws/skills.py
|
34
|
+
airtrain/integrations/cerebras/__init__.py
|
31
35
|
airtrain/integrations/cerebras/credentials.py
|
36
|
+
airtrain/integrations/cerebras/skills.py
|
37
|
+
airtrain/integrations/google/__init__.py
|
32
38
|
airtrain/integrations/google/credentials.py
|
39
|
+
airtrain/integrations/google/skills.py
|
40
|
+
airtrain/integrations/groq/__init__.py
|
33
41
|
airtrain/integrations/groq/credentials.py
|
42
|
+
airtrain/integrations/groq/skills.py
|
43
|
+
airtrain/integrations/ollama/__init__.py
|
34
44
|
airtrain/integrations/ollama/credentials.py
|
45
|
+
airtrain/integrations/ollama/skills.py
|
46
|
+
airtrain/integrations/openai/__init__.py
|
35
47
|
airtrain/integrations/openai/chinese_assistant.py
|
36
48
|
airtrain/integrations/openai/credentials.py
|
37
49
|
airtrain/integrations/openai/skills.py
|
50
|
+
airtrain/integrations/sambanova/__init__.py
|
38
51
|
airtrain/integrations/sambanova/credentials.py
|
52
|
+
airtrain/integrations/sambanova/skills.py
|
53
|
+
airtrain/integrations/together/__init__.py
|
39
54
|
airtrain/integrations/together/credentials.py
|
55
|
+
airtrain/integrations/together/skills.py
|
40
56
|
examples/credentials_usage.py
|
41
57
|
examples/schema_usage.py
|
42
58
|
examples/skill_usage.py
|
@@ -36,6 +36,18 @@ class ChineseAnthropicSkill(AnthropicChatSkill):
|
|
36
36
|
output_schema = AnthropicOutput
|
37
37
|
|
38
38
|
def process(self, input_data: T) -> AnthropicOutput:
|
39
|
+
"""
|
40
|
+
Process the input and ensure Chinese language response.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
input_data: The input data containing user's query and settings
|
44
|
+
|
45
|
+
Returns:
|
46
|
+
AnthropicOutput: The model's response in Chinese
|
47
|
+
|
48
|
+
Raises:
|
49
|
+
ProcessingError: If processing fails
|
50
|
+
"""
|
39
51
|
if "你是" not in input_data.system_prompt:
|
40
52
|
input_data.system_prompt = (
|
41
53
|
"你是一个中文助手。" + input_data.system_prompt + "请用中文回答。"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|