daita-agents 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of daita-agents might be problematic. Click here for more details.
- daita/__init__.py +208 -0
- daita/agents/__init__.py +33 -0
- daita/agents/base.py +722 -0
- daita/agents/substrate.py +895 -0
- daita/cli/__init__.py +145 -0
- daita/cli/__main__.py +7 -0
- daita/cli/ascii_art.py +44 -0
- daita/cli/core/__init__.py +0 -0
- daita/cli/core/create.py +254 -0
- daita/cli/core/deploy.py +473 -0
- daita/cli/core/deployments.py +309 -0
- daita/cli/core/import_detector.py +219 -0
- daita/cli/core/init.py +382 -0
- daita/cli/core/logs.py +239 -0
- daita/cli/core/managed_deploy.py +709 -0
- daita/cli/core/run.py +648 -0
- daita/cli/core/status.py +421 -0
- daita/cli/core/test.py +239 -0
- daita/cli/core/webhooks.py +172 -0
- daita/cli/main.py +588 -0
- daita/cli/utils.py +541 -0
- daita/config/__init__.py +62 -0
- daita/config/base.py +159 -0
- daita/config/settings.py +184 -0
- daita/core/__init__.py +262 -0
- daita/core/decision_tracing.py +701 -0
- daita/core/exceptions.py +480 -0
- daita/core/focus.py +251 -0
- daita/core/interfaces.py +76 -0
- daita/core/plugin_tracing.py +550 -0
- daita/core/relay.py +695 -0
- daita/core/reliability.py +381 -0
- daita/core/scaling.py +444 -0
- daita/core/tools.py +402 -0
- daita/core/tracing.py +770 -0
- daita/core/workflow.py +1084 -0
- daita/display/__init__.py +1 -0
- daita/display/console.py +160 -0
- daita/execution/__init__.py +58 -0
- daita/execution/client.py +856 -0
- daita/execution/exceptions.py +92 -0
- daita/execution/models.py +317 -0
- daita/llm/__init__.py +60 -0
- daita/llm/anthropic.py +166 -0
- daita/llm/base.py +373 -0
- daita/llm/factory.py +101 -0
- daita/llm/gemini.py +152 -0
- daita/llm/grok.py +114 -0
- daita/llm/mock.py +135 -0
- daita/llm/openai.py +109 -0
- daita/plugins/__init__.py +141 -0
- daita/plugins/base.py +37 -0
- daita/plugins/base_db.py +167 -0
- daita/plugins/elasticsearch.py +844 -0
- daita/plugins/mcp.py +481 -0
- daita/plugins/mongodb.py +510 -0
- daita/plugins/mysql.py +351 -0
- daita/plugins/postgresql.py +331 -0
- daita/plugins/redis_messaging.py +500 -0
- daita/plugins/rest.py +529 -0
- daita/plugins/s3.py +761 -0
- daita/plugins/slack.py +729 -0
- daita/utils/__init__.py +18 -0
- daita_agents-0.1.0.dist-info/METADATA +350 -0
- daita_agents-0.1.0.dist-info/RECORD +69 -0
- daita_agents-0.1.0.dist-info/WHEEL +5 -0
- daita_agents-0.1.0.dist-info/entry_points.txt +2 -0
- daita_agents-0.1.0.dist-info/licenses/LICENSE +56 -0
- daita_agents-0.1.0.dist-info/top_level.txt +1 -0
daita/llm/grok.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Grok (xAI) LLM provider implementation with integrated tracing.
|
|
3
|
+
"""
|
|
4
|
+
import os
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Dict, Any, Optional
|
|
7
|
+
|
|
8
|
+
from ..core.exceptions import LLMError
|
|
9
|
+
from .base import BaseLLMProvider
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
class GrokProvider(BaseLLMProvider):
|
|
14
|
+
"""Grok (xAI) LLM provider implementation with automatic call tracing."""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
model: str = "grok-beta",
|
|
19
|
+
api_key: Optional[str] = None,
|
|
20
|
+
**kwargs
|
|
21
|
+
):
|
|
22
|
+
"""
|
|
23
|
+
Initialize Grok provider.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
model: Grok model name (e.g., "grok-beta", "grok-vision-beta")
|
|
27
|
+
api_key: xAI API key
|
|
28
|
+
**kwargs: Additional Grok-specific parameters
|
|
29
|
+
"""
|
|
30
|
+
# Get API key from parameter or environment
|
|
31
|
+
api_key = api_key or os.getenv("XAI_API_KEY") or os.getenv("GROK_API_KEY")
|
|
32
|
+
|
|
33
|
+
super().__init__(model=model, api_key=api_key, **kwargs)
|
|
34
|
+
|
|
35
|
+
# Grok-specific default parameters
|
|
36
|
+
self.default_params.update({
|
|
37
|
+
'stream': kwargs.get('stream', False),
|
|
38
|
+
'timeout': kwargs.get('timeout', 60)
|
|
39
|
+
})
|
|
40
|
+
|
|
41
|
+
# Base URL for xAI API
|
|
42
|
+
self.base_url = kwargs.get('base_url', 'https://api.x.ai/v1')
|
|
43
|
+
|
|
44
|
+
# Lazy-load OpenAI client (Grok uses OpenAI-compatible API)
|
|
45
|
+
self._client = None
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def client(self):
|
|
49
|
+
"""Lazy-load OpenAI client configured for xAI."""
|
|
50
|
+
if self._client is None:
|
|
51
|
+
try:
|
|
52
|
+
import openai
|
|
53
|
+
self._validate_api_key()
|
|
54
|
+
self._client = openai.AsyncOpenAI(
|
|
55
|
+
api_key=self.api_key,
|
|
56
|
+
base_url=self.base_url
|
|
57
|
+
)
|
|
58
|
+
logger.debug("Grok client initialized")
|
|
59
|
+
except ImportError:
|
|
60
|
+
raise LLMError(
|
|
61
|
+
"OpenAI package not installed. Install with: pip install openai"
|
|
62
|
+
)
|
|
63
|
+
return self._client
|
|
64
|
+
|
|
65
|
+
async def _generate_impl(self, prompt: str, **kwargs) -> str:
|
|
66
|
+
"""
|
|
67
|
+
Provider-specific implementation of text generation for Grok.
|
|
68
|
+
|
|
69
|
+
This method contains the actual Grok API call logic and is automatically
|
|
70
|
+
wrapped with tracing by the base class generate() method.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
prompt: Input prompt
|
|
74
|
+
**kwargs: Optional parameters
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Generated text response
|
|
78
|
+
"""
|
|
79
|
+
try:
|
|
80
|
+
# Merge parameters
|
|
81
|
+
params = self._merge_params(kwargs)
|
|
82
|
+
|
|
83
|
+
# Make API call using OpenAI-compatible interface
|
|
84
|
+
response = await self.client.chat.completions.create(
|
|
85
|
+
model=self.model,
|
|
86
|
+
messages=[
|
|
87
|
+
{"role": "user", "content": prompt}
|
|
88
|
+
],
|
|
89
|
+
max_tokens=params.get('max_tokens'),
|
|
90
|
+
temperature=params.get('temperature'),
|
|
91
|
+
top_p=params.get('top_p'),
|
|
92
|
+
stream=params.get('stream'),
|
|
93
|
+
timeout=params.get('timeout')
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Store usage for base class token extraction
|
|
97
|
+
self._last_usage = response.usage
|
|
98
|
+
|
|
99
|
+
return response.choices[0].message.content
|
|
100
|
+
|
|
101
|
+
except Exception as e:
|
|
102
|
+
logger.error(f"Grok generation failed: {str(e)}")
|
|
103
|
+
raise LLMError(f"Grok generation failed: {str(e)}")
|
|
104
|
+
|
|
105
|
+
@property
|
|
106
|
+
def info(self) -> Dict[str, Any]:
|
|
107
|
+
"""Get information about the Grok provider."""
|
|
108
|
+
base_info = super().info
|
|
109
|
+
base_info.update({
|
|
110
|
+
'base_url': self.base_url,
|
|
111
|
+
'provider_name': 'Grok (xAI)',
|
|
112
|
+
'api_compatible': 'OpenAI'
|
|
113
|
+
})
|
|
114
|
+
return base_info
|
daita/llm/mock.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Mock LLM provider for testing with integrated tracing.
|
|
3
|
+
"""
|
|
4
|
+
import asyncio
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Dict, Any, Optional
|
|
7
|
+
|
|
8
|
+
from .base import BaseLLMProvider
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
class MockLLMProvider(BaseLLMProvider):
|
|
13
|
+
"""Mock LLM provider for testing purposes with automatic call tracing."""
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
model: str = "mock-model",
|
|
18
|
+
responses: Optional[Dict[str, str]] = None,
|
|
19
|
+
delay: float = 0.1,
|
|
20
|
+
**kwargs
|
|
21
|
+
):
|
|
22
|
+
"""
|
|
23
|
+
Initialize mock provider.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
model: Mock model name
|
|
27
|
+
responses: Dictionary mapping prompts to responses
|
|
28
|
+
delay: Artificial delay to simulate API calls
|
|
29
|
+
**kwargs: Additional parameters
|
|
30
|
+
"""
|
|
31
|
+
# Remove api_key from kwargs to avoid conflict, then pass it explicitly
|
|
32
|
+
kwargs.pop('api_key', None) # Remove if exists
|
|
33
|
+
super().__init__(model=model, api_key="mock-key", **kwargs)
|
|
34
|
+
|
|
35
|
+
# Predefined responses
|
|
36
|
+
self.responses = responses or {}
|
|
37
|
+
self.delay = delay
|
|
38
|
+
|
|
39
|
+
# Default responses
|
|
40
|
+
self.default_responses = {
|
|
41
|
+
"default": "This is a mock response from the LLM.",
|
|
42
|
+
"analyze": "Based on the data provided, here are the key insights: [mock analysis]",
|
|
43
|
+
"summarize": "Summary: [mock summary of the content]",
|
|
44
|
+
"error": "This is an error response for testing."
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
# Track calls for testing
|
|
48
|
+
self.call_history = []
|
|
49
|
+
|
|
50
|
+
async def _generate_impl(self, prompt: str, **kwargs) -> str:
|
|
51
|
+
"""
|
|
52
|
+
Provider-specific implementation of mock text generation.
|
|
53
|
+
|
|
54
|
+
This method contains the mock generation logic and is automatically
|
|
55
|
+
wrapped with tracing by the base class generate() method.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
prompt: Input prompt
|
|
59
|
+
**kwargs: Optional parameters
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Mock response
|
|
63
|
+
"""
|
|
64
|
+
# Record the call
|
|
65
|
+
self.call_history.append({
|
|
66
|
+
'prompt': prompt,
|
|
67
|
+
'params': kwargs,
|
|
68
|
+
'timestamp': asyncio.get_event_loop().time()
|
|
69
|
+
})
|
|
70
|
+
|
|
71
|
+
# Simulate API delay
|
|
72
|
+
if self.delay > 0:
|
|
73
|
+
await asyncio.sleep(self.delay)
|
|
74
|
+
|
|
75
|
+
# Check for specific response
|
|
76
|
+
if prompt in self.responses:
|
|
77
|
+
return self.responses[prompt]
|
|
78
|
+
|
|
79
|
+
# Check for keyword-based responses
|
|
80
|
+
prompt_lower = prompt.lower()
|
|
81
|
+
for keyword, response in self.default_responses.items():
|
|
82
|
+
if keyword in prompt_lower:
|
|
83
|
+
return response
|
|
84
|
+
|
|
85
|
+
# Default response
|
|
86
|
+
return f"Mock response for: {prompt[:50]}..."
|
|
87
|
+
|
|
88
|
+
def _get_last_token_usage(self) -> Dict[str, int]:
|
|
89
|
+
"""
|
|
90
|
+
Override base class method to return mock token usage.
|
|
91
|
+
|
|
92
|
+
Provides realistic but fake token counts for testing.
|
|
93
|
+
"""
|
|
94
|
+
if self.call_history:
|
|
95
|
+
# Get the last call to estimate tokens
|
|
96
|
+
last_call = self.call_history[-1]
|
|
97
|
+
prompt = last_call.get('prompt', '')
|
|
98
|
+
|
|
99
|
+
# Mock realistic token counts
|
|
100
|
+
estimated_prompt_tokens = max(5, len(prompt) // 4) # Rough estimate
|
|
101
|
+
# Assume a moderate response length for mocking
|
|
102
|
+
estimated_completion_tokens = max(10, estimated_prompt_tokens // 2)
|
|
103
|
+
|
|
104
|
+
return {
|
|
105
|
+
'total_tokens': estimated_prompt_tokens + estimated_completion_tokens,
|
|
106
|
+
'prompt_tokens': estimated_prompt_tokens,
|
|
107
|
+
'completion_tokens': estimated_completion_tokens
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
# Fallback to default
|
|
111
|
+
return super()._get_last_token_usage()
|
|
112
|
+
|
|
113
|
+
def set_response(self, prompt: str, response: str) -> None:
|
|
114
|
+
"""Set a specific response for a prompt."""
|
|
115
|
+
self.responses[prompt] = response
|
|
116
|
+
|
|
117
|
+
def clear_history(self) -> None:
|
|
118
|
+
"""Clear call history."""
|
|
119
|
+
self.call_history.clear()
|
|
120
|
+
|
|
121
|
+
def get_last_call(self) -> Optional[Dict[str, Any]]:
|
|
122
|
+
"""Get the last call made to the provider."""
|
|
123
|
+
return self.call_history[-1] if self.call_history else None
|
|
124
|
+
|
|
125
|
+
@property
|
|
126
|
+
def info(self) -> Dict[str, Any]:
|
|
127
|
+
"""Get information about the mock provider."""
|
|
128
|
+
base_info = super().info
|
|
129
|
+
base_info.update({
|
|
130
|
+
'provider_name': 'Mock LLM (Testing)',
|
|
131
|
+
'call_count': len(self.call_history),
|
|
132
|
+
'configured_responses': len(self.responses),
|
|
133
|
+
'delay_seconds': self.delay
|
|
134
|
+
})
|
|
135
|
+
return base_info
|
daita/llm/openai.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI LLM provider implementation with integrated tracing.
|
|
3
|
+
"""
|
|
4
|
+
import os
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Dict, Any, Optional
|
|
7
|
+
|
|
8
|
+
from ..core.exceptions import LLMError
|
|
9
|
+
from .base import BaseLLMProvider
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
class OpenAIProvider(BaseLLMProvider):
|
|
14
|
+
"""OpenAI LLM provider implementation with automatic call tracing."""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
model: str = "gpt-4",
|
|
19
|
+
api_key: Optional[str] = None,
|
|
20
|
+
**kwargs
|
|
21
|
+
):
|
|
22
|
+
"""
|
|
23
|
+
Initialize OpenAI provider.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
model: OpenAI model name (e.g., "gpt-4", "gpt-3.5-turbo")
|
|
27
|
+
api_key: OpenAI API key
|
|
28
|
+
**kwargs: Additional OpenAI-specific parameters
|
|
29
|
+
"""
|
|
30
|
+
# Get API key from parameter or environment
|
|
31
|
+
api_key = api_key or os.getenv("OPENAI_API_KEY")
|
|
32
|
+
|
|
33
|
+
super().__init__(model=model, api_key=api_key, **kwargs)
|
|
34
|
+
|
|
35
|
+
# OpenAI-specific default parameters
|
|
36
|
+
self.default_params.update({
|
|
37
|
+
'frequency_penalty': kwargs.get('frequency_penalty', 0.0),
|
|
38
|
+
'presence_penalty': kwargs.get('presence_penalty', 0.0),
|
|
39
|
+
'timeout': kwargs.get('timeout', 60)
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
# Lazy-load OpenAI client
|
|
43
|
+
self._client = None
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def client(self):
|
|
47
|
+
"""Lazy-load OpenAI client."""
|
|
48
|
+
if self._client is None:
|
|
49
|
+
try:
|
|
50
|
+
import openai
|
|
51
|
+
self._validate_api_key()
|
|
52
|
+
self._client = openai.AsyncOpenAI(api_key=self.api_key)
|
|
53
|
+
logger.debug("OpenAI client initialized")
|
|
54
|
+
except ImportError:
|
|
55
|
+
raise LLMError(
|
|
56
|
+
"OpenAI package not installed. Install with: pip install openai"
|
|
57
|
+
)
|
|
58
|
+
return self._client
|
|
59
|
+
|
|
60
|
+
async def _generate_impl(self, prompt: str, **kwargs) -> str:
|
|
61
|
+
"""
|
|
62
|
+
Provider-specific implementation of text generation for OpenAI.
|
|
63
|
+
|
|
64
|
+
This method contains the actual OpenAI API call logic and is automatically
|
|
65
|
+
wrapped with tracing by the base class generate() method.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
prompt: Input prompt
|
|
69
|
+
**kwargs: Optional parameters
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Generated text response
|
|
73
|
+
"""
|
|
74
|
+
try:
|
|
75
|
+
# Merge parameters
|
|
76
|
+
params = self._merge_params(kwargs)
|
|
77
|
+
|
|
78
|
+
# Make API call
|
|
79
|
+
response = await self.client.chat.completions.create(
|
|
80
|
+
model=self.model,
|
|
81
|
+
messages=[
|
|
82
|
+
{"role": "user", "content": prompt}
|
|
83
|
+
],
|
|
84
|
+
max_tokens=params.get('max_tokens'),
|
|
85
|
+
temperature=params.get('temperature'),
|
|
86
|
+
top_p=params.get('top_p'),
|
|
87
|
+
frequency_penalty=params.get('frequency_penalty'),
|
|
88
|
+
presence_penalty=params.get('presence_penalty'),
|
|
89
|
+
timeout=params.get('timeout')
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Store usage for base class token extraction
|
|
93
|
+
self._last_usage = response.usage
|
|
94
|
+
|
|
95
|
+
return response.choices[0].message.content
|
|
96
|
+
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logger.error(f"OpenAI generation failed: {str(e)}")
|
|
99
|
+
raise LLMError(f"OpenAI generation failed: {str(e)}")
|
|
100
|
+
|
|
101
|
+
@property
|
|
102
|
+
def info(self) -> Dict[str, Any]:
|
|
103
|
+
"""Get information about the OpenAI provider."""
|
|
104
|
+
base_info = super().info
|
|
105
|
+
base_info.update({
|
|
106
|
+
'provider_name': 'OpenAI',
|
|
107
|
+
'api_compatible': 'OpenAI'
|
|
108
|
+
})
|
|
109
|
+
return base_info
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Plugin system for Daita Agents.
|
|
3
|
+
|
|
4
|
+
This module provides database, API, cloud storage, search, collaboration, and MCP integrations:
|
|
5
|
+
- PostgreSQL plugin for async database operations
|
|
6
|
+
- MySQL plugin for async database operations
|
|
7
|
+
- MongoDB plugin for async document database operations
|
|
8
|
+
- REST API plugin for HTTP client functionality
|
|
9
|
+
- AWS S3 plugin for cloud object storage operations
|
|
10
|
+
- Slack plugin for team collaboration and notifications
|
|
11
|
+
- Elasticsearch plugin for search and analytics
|
|
12
|
+
- MCP plugin for Model Context Protocol server integration
|
|
13
|
+
|
|
14
|
+
All plugins follow async patterns and provide simple, clean interfaces
|
|
15
|
+
without over-engineering.
|
|
16
|
+
|
|
17
|
+
Usage:
|
|
18
|
+
```python
|
|
19
|
+
from daita.plugins import postgresql, mysql, mongodb, rest, s3, slack, elasticsearch, mcp
|
|
20
|
+
from daita import SubstrateAgent
|
|
21
|
+
|
|
22
|
+
# Database plugins
|
|
23
|
+
async with postgresql(host="localhost", database="mydb") as db:
|
|
24
|
+
results = await db.query("SELECT * FROM users")
|
|
25
|
+
|
|
26
|
+
# REST API plugin
|
|
27
|
+
async with rest(base_url="https://api.example.com") as api:
|
|
28
|
+
data = await api.get("/users")
|
|
29
|
+
|
|
30
|
+
# S3 plugin
|
|
31
|
+
async with s3(bucket="my-bucket", region="us-west-2") as storage:
|
|
32
|
+
data = await storage.get_object("data/file.csv", format="pandas")
|
|
33
|
+
|
|
34
|
+
# Slack plugin
|
|
35
|
+
async with slack(token="xoxb-token") as slack_client:
|
|
36
|
+
await slack_client.send_agent_summary("#alerts", agent_results)
|
|
37
|
+
|
|
38
|
+
# Elasticsearch plugin
|
|
39
|
+
async with elasticsearch(hosts=["localhost:9200"]) as es:
|
|
40
|
+
results = await es.search("logs", {"match": {"level": "ERROR"}}, focus=["timestamp", "message"])
|
|
41
|
+
|
|
42
|
+
# MCP plugin with agent integration
|
|
43
|
+
agent = SubstrateAgent(
|
|
44
|
+
name="file_analyzer",
|
|
45
|
+
mcp=mcp.server(command="uvx", args=["mcp-server-filesystem", "/data"])
|
|
46
|
+
)
|
|
47
|
+
result = await agent.process("Read report.csv and calculate totals")
|
|
48
|
+
```
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
# Database plugins
|
|
52
|
+
from .postgresql import PostgreSQLPlugin, postgresql
|
|
53
|
+
from .mysql import MySQLPlugin, mysql
|
|
54
|
+
from .mongodb import MongoDBPlugin, mongodb
|
|
55
|
+
|
|
56
|
+
# API plugins
|
|
57
|
+
from .rest import RESTPlugin, rest
|
|
58
|
+
|
|
59
|
+
# Cloud storage plugins
|
|
60
|
+
from .s3 import S3Plugin, s3
|
|
61
|
+
|
|
62
|
+
# Collaboration plugins
|
|
63
|
+
from .slack import SlackPlugin, slack
|
|
64
|
+
|
|
65
|
+
# Search and analytics plugins
|
|
66
|
+
from .elasticsearch import ElasticsearchPlugin, elasticsearch
|
|
67
|
+
|
|
68
|
+
# Messaging plugins
|
|
69
|
+
from .redis_messaging import RedisMessagingPlugin, redis_messaging
|
|
70
|
+
|
|
71
|
+
# MCP plugin
|
|
72
|
+
from . import mcp
|
|
73
|
+
|
|
74
|
+
# Simple plugin access class for SDK
|
|
75
|
+
class PluginAccess:
|
|
76
|
+
"""
|
|
77
|
+
Simple plugin access for the SDK.
|
|
78
|
+
|
|
79
|
+
Provides clean interface: sdk.plugins.postgresql(...)
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def postgresql(self, **kwargs) -> PostgreSQLPlugin:
|
|
83
|
+
"""Create PostgreSQL plugin."""
|
|
84
|
+
return postgresql(**kwargs)
|
|
85
|
+
|
|
86
|
+
def mysql(self, **kwargs) -> MySQLPlugin:
|
|
87
|
+
"""Create MySQL plugin."""
|
|
88
|
+
return mysql(**kwargs)
|
|
89
|
+
|
|
90
|
+
def mongodb(self, **kwargs) -> MongoDBPlugin:
|
|
91
|
+
"""Create MongoDB plugin."""
|
|
92
|
+
return mongodb(**kwargs)
|
|
93
|
+
|
|
94
|
+
def rest(self, **kwargs) -> RESTPlugin:
|
|
95
|
+
"""Create REST API plugin."""
|
|
96
|
+
return rest(**kwargs)
|
|
97
|
+
|
|
98
|
+
def s3(self, **kwargs) -> S3Plugin:
|
|
99
|
+
"""Create S3 plugin."""
|
|
100
|
+
return s3(**kwargs)
|
|
101
|
+
|
|
102
|
+
def slack(self, **kwargs) -> SlackPlugin:
|
|
103
|
+
"""Create Slack plugin."""
|
|
104
|
+
return slack(**kwargs)
|
|
105
|
+
|
|
106
|
+
def elasticsearch(self, **kwargs) -> ElasticsearchPlugin:
|
|
107
|
+
"""Create Elasticsearch plugin."""
|
|
108
|
+
return elasticsearch(**kwargs)
|
|
109
|
+
|
|
110
|
+
def redis_messaging(self, **kwargs) -> RedisMessagingPlugin:
|
|
111
|
+
"""Create Redis messaging plugin."""
|
|
112
|
+
return redis_messaging(**kwargs)
|
|
113
|
+
|
|
114
|
+
# Export everything needed
|
|
115
|
+
__all__ = [
|
|
116
|
+
# Plugin classes
|
|
117
|
+
'PostgreSQLPlugin',
|
|
118
|
+
'MySQLPlugin',
|
|
119
|
+
'MongoDBPlugin',
|
|
120
|
+
'RESTPlugin',
|
|
121
|
+
'S3Plugin',
|
|
122
|
+
'SlackPlugin',
|
|
123
|
+
'ElasticsearchPlugin',
|
|
124
|
+
'RedisMessagingPlugin',
|
|
125
|
+
|
|
126
|
+
# Factory functions
|
|
127
|
+
'postgresql',
|
|
128
|
+
'mysql',
|
|
129
|
+
'mongodb',
|
|
130
|
+
'rest',
|
|
131
|
+
's3',
|
|
132
|
+
'slack',
|
|
133
|
+
'elasticsearch',
|
|
134
|
+
'redis_messaging',
|
|
135
|
+
|
|
136
|
+
# MCP module
|
|
137
|
+
'mcp',
|
|
138
|
+
|
|
139
|
+
# SDK access class
|
|
140
|
+
'PluginAccess',
|
|
141
|
+
]
|
daita/plugins/base.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base classes for Daita plugins.
|
|
3
|
+
|
|
4
|
+
Plugins are infrastructure utilities (databases, APIs, storage) that can
|
|
5
|
+
optionally expose their capabilities as agent tools.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from abc import ABC
|
|
9
|
+
from typing import List, TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ..core.tools import AgentTool
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class BasePlugin(ABC):
|
|
16
|
+
"""
|
|
17
|
+
Base class for all Daita plugins.
|
|
18
|
+
|
|
19
|
+
Plugins provide infrastructure utilities (S3, Slack, REST APIs, etc).
|
|
20
|
+
They can optionally expose their capabilities as agent tools via get_tools().
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def get_tools(self) -> List['AgentTool']:
|
|
24
|
+
"""
|
|
25
|
+
Get agent-usable tools from this plugin.
|
|
26
|
+
|
|
27
|
+
Override in subclasses to expose plugin capabilities as LLM tools.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
List of AgentTool instances
|
|
31
|
+
"""
|
|
32
|
+
return []
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def has_tools(self) -> bool:
|
|
36
|
+
"""Check if plugin exposes any tools"""
|
|
37
|
+
return len(self.get_tools()) > 0
|
daita/plugins/base_db.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base class for database plugins.
|
|
3
|
+
|
|
4
|
+
Provides common connection management, error handling, and context manager
|
|
5
|
+
support for all database plugins in the Daita framework.
|
|
6
|
+
"""
|
|
7
|
+
import logging
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
|
10
|
+
from ..core.exceptions import PluginError, ConnectionError as DaitaConnectionError, ValidationError
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from ..core.tools import AgentTool
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
class BaseDatabasePlugin(ABC):
|
|
18
|
+
"""
|
|
19
|
+
Base class for all database plugins with common connection management.
|
|
20
|
+
|
|
21
|
+
This class provides:
|
|
22
|
+
- Standardized connection/disconnection lifecycle
|
|
23
|
+
- Context manager support for automatic cleanup
|
|
24
|
+
- Common error handling patterns
|
|
25
|
+
- Consistent configuration patterns
|
|
26
|
+
|
|
27
|
+
Database-specific plugins should inherit from this class and implement
|
|
28
|
+
the abstract methods for their specific database requirements.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self, **kwargs):
|
|
32
|
+
"""
|
|
33
|
+
Initialize base database plugin.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
**kwargs: Database-specific configuration parameters
|
|
37
|
+
"""
|
|
38
|
+
# Common connection state
|
|
39
|
+
self._connection = None
|
|
40
|
+
self._pool = None
|
|
41
|
+
self._client = None
|
|
42
|
+
self._db = None
|
|
43
|
+
|
|
44
|
+
# Connection configuration
|
|
45
|
+
self.config = kwargs
|
|
46
|
+
self.timeout = kwargs.get('timeout', 30)
|
|
47
|
+
self.max_retries = kwargs.get('max_retries', 3)
|
|
48
|
+
|
|
49
|
+
logger.debug(f"{self.__class__.__name__} initialized with config keys: {list(kwargs.keys())}")
|
|
50
|
+
|
|
51
|
+
@abstractmethod
|
|
52
|
+
async def connect(self) -> None:
|
|
53
|
+
"""
|
|
54
|
+
Connect to the database.
|
|
55
|
+
|
|
56
|
+
This method must be implemented by each database plugin to handle
|
|
57
|
+
the specific connection logic for that database type.
|
|
58
|
+
"""
|
|
59
|
+
pass
|
|
60
|
+
|
|
61
|
+
@abstractmethod
|
|
62
|
+
async def disconnect(self) -> None:
|
|
63
|
+
"""
|
|
64
|
+
Disconnect from the database and clean up resources.
|
|
65
|
+
|
|
66
|
+
This method must be implemented by each database plugin to handle
|
|
67
|
+
the specific disconnection and cleanup logic for that database type.
|
|
68
|
+
"""
|
|
69
|
+
pass
|
|
70
|
+
|
|
71
|
+
@property
|
|
72
|
+
def is_connected(self) -> bool:
|
|
73
|
+
"""
|
|
74
|
+
Check if the database connection is active.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
True if connected, False otherwise
|
|
78
|
+
"""
|
|
79
|
+
return (
|
|
80
|
+
self._connection is not None or
|
|
81
|
+
self._pool is not None or
|
|
82
|
+
self._client is not None
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
async def __aenter__(self):
|
|
86
|
+
"""Async context manager entry - automatically connect."""
|
|
87
|
+
await self.connect()
|
|
88
|
+
return self
|
|
89
|
+
|
|
90
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
91
|
+
"""Async context manager exit - automatically disconnect."""
|
|
92
|
+
await self.disconnect()
|
|
93
|
+
|
|
94
|
+
def _validate_connection(self) -> None:
|
|
95
|
+
"""
|
|
96
|
+
Validate that the database connection is available.
|
|
97
|
+
|
|
98
|
+
Raises:
|
|
99
|
+
ValidationError: If not connected to database
|
|
100
|
+
"""
|
|
101
|
+
if not self.is_connected:
|
|
102
|
+
raise ValidationError(
|
|
103
|
+
f"{self.__class__.__name__} is not connected to database",
|
|
104
|
+
field="connection_state"
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
def _handle_connection_error(self, error: Exception, operation: str) -> None:
|
|
108
|
+
"""
|
|
109
|
+
Handle database connection errors with consistent logging.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
error: The exception that occurred
|
|
113
|
+
operation: Description of the operation that failed
|
|
114
|
+
|
|
115
|
+
Raises:
|
|
116
|
+
PluginError: Wrapped database error with context
|
|
117
|
+
"""
|
|
118
|
+
error_msg = f"{self.__class__.__name__} {operation} failed: {str(error)}"
|
|
119
|
+
logger.error(error_msg)
|
|
120
|
+
|
|
121
|
+
# Choose appropriate exception type based on the original error
|
|
122
|
+
if isinstance(error, ImportError):
|
|
123
|
+
# Missing dependency - permanent error
|
|
124
|
+
raise PluginError(
|
|
125
|
+
error_msg,
|
|
126
|
+
plugin_name=self.__class__.__name__,
|
|
127
|
+
retry_hint="permanent",
|
|
128
|
+
context={"operation": operation, "original_error": str(error)}
|
|
129
|
+
) from error
|
|
130
|
+
else:
|
|
131
|
+
# Connection issues - typically transient
|
|
132
|
+
raise DaitaConnectionError(
|
|
133
|
+
error_msg,
|
|
134
|
+
context={"plugin": self.__class__.__name__, "operation": operation}
|
|
135
|
+
) from error
|
|
136
|
+
|
|
137
|
+
def get_tools(self) -> List['AgentTool']:
|
|
138
|
+
"""
|
|
139
|
+
Get agent-usable tools from this database plugin.
|
|
140
|
+
|
|
141
|
+
Override in subclasses to expose database operations as LLM tools.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
List of AgentTool instances
|
|
145
|
+
"""
|
|
146
|
+
return []
|
|
147
|
+
|
|
148
|
+
@property
|
|
149
|
+
def has_tools(self) -> bool:
|
|
150
|
+
"""Check if plugin exposes any tools"""
|
|
151
|
+
return len(self.get_tools()) > 0
|
|
152
|
+
|
|
153
|
+
@property
|
|
154
|
+
def info(self) -> Dict[str, Any]:
|
|
155
|
+
"""
|
|
156
|
+
Get information about the database plugin.
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
Dictionary with plugin information
|
|
160
|
+
"""
|
|
161
|
+
return {
|
|
162
|
+
'plugin_type': self.__class__.__name__,
|
|
163
|
+
'connected': self.is_connected,
|
|
164
|
+
'timeout': self.timeout,
|
|
165
|
+
'max_retries': self.max_retries,
|
|
166
|
+
'config_keys': list(self.config.keys())
|
|
167
|
+
}
|