daita-agents 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- daita/__init__.py +216 -0
- daita/agents/__init__.py +33 -0
- daita/agents/base.py +743 -0
- daita/agents/substrate.py +1141 -0
- daita/cli/__init__.py +145 -0
- daita/cli/__main__.py +7 -0
- daita/cli/ascii_art.py +44 -0
- daita/cli/core/__init__.py +0 -0
- daita/cli/core/create.py +254 -0
- daita/cli/core/deploy.py +473 -0
- daita/cli/core/deployments.py +309 -0
- daita/cli/core/import_detector.py +219 -0
- daita/cli/core/init.py +481 -0
- daita/cli/core/logs.py +239 -0
- daita/cli/core/managed_deploy.py +709 -0
- daita/cli/core/run.py +648 -0
- daita/cli/core/status.py +421 -0
- daita/cli/core/test.py +239 -0
- daita/cli/core/webhooks.py +172 -0
- daita/cli/main.py +588 -0
- daita/cli/utils.py +541 -0
- daita/config/__init__.py +62 -0
- daita/config/base.py +159 -0
- daita/config/settings.py +184 -0
- daita/core/__init__.py +262 -0
- daita/core/decision_tracing.py +701 -0
- daita/core/exceptions.py +480 -0
- daita/core/focus.py +251 -0
- daita/core/interfaces.py +76 -0
- daita/core/plugin_tracing.py +550 -0
- daita/core/relay.py +779 -0
- daita/core/reliability.py +381 -0
- daita/core/scaling.py +459 -0
- daita/core/tools.py +554 -0
- daita/core/tracing.py +770 -0
- daita/core/workflow.py +1144 -0
- daita/display/__init__.py +1 -0
- daita/display/console.py +160 -0
- daita/execution/__init__.py +58 -0
- daita/execution/client.py +856 -0
- daita/execution/exceptions.py +92 -0
- daita/execution/models.py +317 -0
- daita/llm/__init__.py +60 -0
- daita/llm/anthropic.py +291 -0
- daita/llm/base.py +530 -0
- daita/llm/factory.py +101 -0
- daita/llm/gemini.py +355 -0
- daita/llm/grok.py +219 -0
- daita/llm/mock.py +172 -0
- daita/llm/openai.py +220 -0
- daita/plugins/__init__.py +141 -0
- daita/plugins/base.py +37 -0
- daita/plugins/base_db.py +167 -0
- daita/plugins/elasticsearch.py +849 -0
- daita/plugins/mcp.py +481 -0
- daita/plugins/mongodb.py +520 -0
- daita/plugins/mysql.py +362 -0
- daita/plugins/postgresql.py +342 -0
- daita/plugins/redis_messaging.py +500 -0
- daita/plugins/rest.py +537 -0
- daita/plugins/s3.py +770 -0
- daita/plugins/slack.py +729 -0
- daita/utils/__init__.py +18 -0
- daita_agents-0.2.0.dist-info/METADATA +409 -0
- daita_agents-0.2.0.dist-info/RECORD +69 -0
- daita_agents-0.2.0.dist-info/WHEEL +5 -0
- daita_agents-0.2.0.dist-info/entry_points.txt +2 -0
- daita_agents-0.2.0.dist-info/licenses/LICENSE +56 -0
- daita_agents-0.2.0.dist-info/top_level.txt +1 -0
daita/llm/mock.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Mock LLM provider for testing with integrated tracing.
|
|
3
|
+
"""
|
|
4
|
+
import asyncio
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Dict, Any, Optional
|
|
7
|
+
|
|
8
|
+
from .base import BaseLLMProvider
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
class MockLLMProvider(BaseLLMProvider):
|
|
13
|
+
"""Mock LLM provider for testing purposes with automatic call tracing."""
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
model: str = "mock-model",
|
|
18
|
+
responses: Optional[Dict[str, str]] = None,
|
|
19
|
+
delay: float = 0.1,
|
|
20
|
+
**kwargs
|
|
21
|
+
):
|
|
22
|
+
"""
|
|
23
|
+
Initialize mock provider.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
model: Mock model name
|
|
27
|
+
responses: Dictionary mapping prompts to responses
|
|
28
|
+
delay: Artificial delay to simulate API calls
|
|
29
|
+
**kwargs: Additional parameters
|
|
30
|
+
"""
|
|
31
|
+
# Remove api_key from kwargs to avoid conflict, then pass it explicitly
|
|
32
|
+
kwargs.pop('api_key', None) # Remove if exists
|
|
33
|
+
super().__init__(model=model, api_key="mock-key", **kwargs)
|
|
34
|
+
|
|
35
|
+
# Predefined responses
|
|
36
|
+
self.responses = responses or {}
|
|
37
|
+
self.delay = delay
|
|
38
|
+
|
|
39
|
+
# Default responses
|
|
40
|
+
self.default_responses = {
|
|
41
|
+
"default": "This is a mock response from the LLM.",
|
|
42
|
+
"analyze": "Based on the data provided, here are the key insights: [mock analysis]",
|
|
43
|
+
"summarize": "Summary: [mock summary of the content]",
|
|
44
|
+
"error": "This is an error response for testing."
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
# Track calls for testing
|
|
48
|
+
self.call_history = []
|
|
49
|
+
|
|
50
|
+
async def _generate_impl(self, prompt: str, **kwargs) -> str:
|
|
51
|
+
"""
|
|
52
|
+
Provider-specific implementation of mock text generation.
|
|
53
|
+
|
|
54
|
+
This method contains the mock generation logic and is automatically
|
|
55
|
+
wrapped with tracing by the base class generate() method.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
prompt: Input prompt
|
|
59
|
+
**kwargs: Optional parameters
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Mock response
|
|
63
|
+
"""
|
|
64
|
+
# Record the call
|
|
65
|
+
self.call_history.append({
|
|
66
|
+
'prompt': prompt,
|
|
67
|
+
'params': kwargs,
|
|
68
|
+
'timestamp': asyncio.get_event_loop().time()
|
|
69
|
+
})
|
|
70
|
+
|
|
71
|
+
# Simulate API delay
|
|
72
|
+
if self.delay > 0:
|
|
73
|
+
await asyncio.sleep(self.delay)
|
|
74
|
+
|
|
75
|
+
# Check for specific response
|
|
76
|
+
if prompt in self.responses:
|
|
77
|
+
return self.responses[prompt]
|
|
78
|
+
|
|
79
|
+
# Check for keyword-based responses
|
|
80
|
+
prompt_lower = prompt.lower()
|
|
81
|
+
for keyword, response in self.default_responses.items():
|
|
82
|
+
if keyword in prompt_lower:
|
|
83
|
+
return response
|
|
84
|
+
|
|
85
|
+
# Default response
|
|
86
|
+
return f"Mock response for: {prompt[:50]}..."
|
|
87
|
+
|
|
88
|
+
async def _generate_with_tools_single(
|
|
89
|
+
self,
|
|
90
|
+
messages: list,
|
|
91
|
+
tools: list,
|
|
92
|
+
**kwargs
|
|
93
|
+
) -> dict:
|
|
94
|
+
"""
|
|
95
|
+
Mock implementation of single LLM call with tools.
|
|
96
|
+
|
|
97
|
+
Returns a mock response without tool calls (final answer).
|
|
98
|
+
Override this in tests if you need to test tool calling behavior.
|
|
99
|
+
"""
|
|
100
|
+
# Extract the last user message
|
|
101
|
+
user_message = ""
|
|
102
|
+
for msg in reversed(messages):
|
|
103
|
+
if msg.get("role") == "user":
|
|
104
|
+
user_message = msg.get("content", "")
|
|
105
|
+
break
|
|
106
|
+
|
|
107
|
+
# Record the call
|
|
108
|
+
self.call_history.append({
|
|
109
|
+
'messages': messages,
|
|
110
|
+
'tools': tools,
|
|
111
|
+
'params': kwargs,
|
|
112
|
+
'timestamp': asyncio.get_event_loop().time()
|
|
113
|
+
})
|
|
114
|
+
|
|
115
|
+
# Simulate API delay
|
|
116
|
+
if self.delay > 0:
|
|
117
|
+
await asyncio.sleep(self.delay)
|
|
118
|
+
|
|
119
|
+
# Return final answer (no tool calls for basic mock)
|
|
120
|
+
return {
|
|
121
|
+
"content": f"Mock response for: {user_message[:50]}...",
|
|
122
|
+
"tool_calls": None
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
def _get_last_token_usage(self) -> Dict[str, int]:
|
|
126
|
+
"""
|
|
127
|
+
Override base class method to return mock token usage.
|
|
128
|
+
|
|
129
|
+
Provides realistic but fake token counts for testing.
|
|
130
|
+
"""
|
|
131
|
+
if self.call_history:
|
|
132
|
+
# Get the last call to estimate tokens
|
|
133
|
+
last_call = self.call_history[-1]
|
|
134
|
+
prompt = last_call.get('prompt', '')
|
|
135
|
+
|
|
136
|
+
# Mock realistic token counts
|
|
137
|
+
estimated_prompt_tokens = max(5, len(prompt) // 4) # Rough estimate
|
|
138
|
+
# Assume a moderate response length for mocking
|
|
139
|
+
estimated_completion_tokens = max(10, estimated_prompt_tokens // 2)
|
|
140
|
+
|
|
141
|
+
return {
|
|
142
|
+
'total_tokens': estimated_prompt_tokens + estimated_completion_tokens,
|
|
143
|
+
'prompt_tokens': estimated_prompt_tokens,
|
|
144
|
+
'completion_tokens': estimated_completion_tokens
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
# Fallback to default
|
|
148
|
+
return super()._get_last_token_usage()
|
|
149
|
+
|
|
150
|
+
def set_response(self, prompt: str, response: str) -> None:
|
|
151
|
+
"""Set a specific response for a prompt."""
|
|
152
|
+
self.responses[prompt] = response
|
|
153
|
+
|
|
154
|
+
def clear_history(self) -> None:
|
|
155
|
+
"""Clear call history."""
|
|
156
|
+
self.call_history.clear()
|
|
157
|
+
|
|
158
|
+
def get_last_call(self) -> Optional[Dict[str, Any]]:
|
|
159
|
+
"""Get the last call made to the provider."""
|
|
160
|
+
return self.call_history[-1] if self.call_history else None
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def info(self) -> Dict[str, Any]:
|
|
164
|
+
"""Get information about the mock provider."""
|
|
165
|
+
base_info = super().info
|
|
166
|
+
base_info.update({
|
|
167
|
+
'provider_name': 'Mock LLM (Testing)',
|
|
168
|
+
'call_count': len(self.call_history),
|
|
169
|
+
'configured_responses': len(self.responses),
|
|
170
|
+
'delay_seconds': self.delay
|
|
171
|
+
})
|
|
172
|
+
return base_info
|
daita/llm/openai.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI LLM provider implementation with integrated tracing.
|
|
3
|
+
"""
|
|
4
|
+
import os
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Dict, Any, Optional
|
|
7
|
+
|
|
8
|
+
from ..core.exceptions import LLMError
|
|
9
|
+
from .base import BaseLLMProvider
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
class OpenAIProvider(BaseLLMProvider):
|
|
14
|
+
"""OpenAI LLM provider implementation with automatic call tracing."""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
model: str = "gpt-4",
|
|
19
|
+
api_key: Optional[str] = None,
|
|
20
|
+
**kwargs
|
|
21
|
+
):
|
|
22
|
+
"""
|
|
23
|
+
Initialize OpenAI provider.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
model: OpenAI model name (e.g., "gpt-4", "gpt-3.5-turbo")
|
|
27
|
+
api_key: OpenAI API key
|
|
28
|
+
**kwargs: Additional OpenAI-specific parameters
|
|
29
|
+
"""
|
|
30
|
+
# Get API key from parameter or environment
|
|
31
|
+
api_key = api_key or os.getenv("OPENAI_API_KEY")
|
|
32
|
+
|
|
33
|
+
super().__init__(model=model, api_key=api_key, **kwargs)
|
|
34
|
+
|
|
35
|
+
# OpenAI-specific default parameters
|
|
36
|
+
self.default_params.update({
|
|
37
|
+
'frequency_penalty': kwargs.get('frequency_penalty', 0.0),
|
|
38
|
+
'presence_penalty': kwargs.get('presence_penalty', 0.0),
|
|
39
|
+
'timeout': kwargs.get('timeout', 60)
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
# Lazy-load OpenAI client
|
|
43
|
+
self._client = None
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def client(self):
|
|
47
|
+
"""Lazy-load OpenAI client."""
|
|
48
|
+
if self._client is None:
|
|
49
|
+
try:
|
|
50
|
+
import openai
|
|
51
|
+
self._validate_api_key()
|
|
52
|
+
self._client = openai.AsyncOpenAI(api_key=self.api_key)
|
|
53
|
+
logger.debug("OpenAI client initialized")
|
|
54
|
+
except ImportError:
|
|
55
|
+
raise LLMError(
|
|
56
|
+
"OpenAI package not installed. Install with: pip install openai"
|
|
57
|
+
)
|
|
58
|
+
return self._client
|
|
59
|
+
|
|
60
|
+
async def _generate_impl(self, prompt: str, **kwargs) -> str:
|
|
61
|
+
"""
|
|
62
|
+
Provider-specific implementation of text generation for OpenAI.
|
|
63
|
+
|
|
64
|
+
This method contains the actual OpenAI API call logic and is automatically
|
|
65
|
+
wrapped with tracing by the base class generate() method.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
prompt: Input prompt
|
|
69
|
+
**kwargs: Optional parameters
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Generated text response
|
|
73
|
+
"""
|
|
74
|
+
try:
|
|
75
|
+
# Merge parameters
|
|
76
|
+
params = self._merge_params(kwargs)
|
|
77
|
+
|
|
78
|
+
# Make API call
|
|
79
|
+
response = await self.client.chat.completions.create(
|
|
80
|
+
model=self.model,
|
|
81
|
+
messages=[
|
|
82
|
+
{"role": "user", "content": prompt}
|
|
83
|
+
],
|
|
84
|
+
max_tokens=params.get('max_tokens'),
|
|
85
|
+
temperature=params.get('temperature'),
|
|
86
|
+
top_p=params.get('top_p'),
|
|
87
|
+
frequency_penalty=params.get('frequency_penalty'),
|
|
88
|
+
presence_penalty=params.get('presence_penalty'),
|
|
89
|
+
timeout=params.get('timeout')
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Store usage for base class token extraction
|
|
93
|
+
self._last_usage = response.usage
|
|
94
|
+
|
|
95
|
+
return response.choices[0].message.content
|
|
96
|
+
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logger.error(f"OpenAI generation failed: {str(e)}")
|
|
99
|
+
raise LLMError(f"OpenAI generation failed: {str(e)}")
|
|
100
|
+
|
|
101
|
+
def _convert_messages_to_openai(
|
|
102
|
+
self,
|
|
103
|
+
messages: list[Dict[str, Any]]
|
|
104
|
+
) -> list[Dict[str, Any]]:
|
|
105
|
+
"""
|
|
106
|
+
Convert universal flat format to OpenAI's nested format.
|
|
107
|
+
|
|
108
|
+
OpenAI expects tool_calls in nested format:
|
|
109
|
+
{"id": "x", "type": "function", "function": {"name": "...", "arguments": "..."}}
|
|
110
|
+
|
|
111
|
+
Our internal format is flat:
|
|
112
|
+
{"id": "x", "name": "...", "arguments": {...}}
|
|
113
|
+
"""
|
|
114
|
+
import json
|
|
115
|
+
|
|
116
|
+
openai_messages = []
|
|
117
|
+
for msg in messages:
|
|
118
|
+
if msg.get("role") == "assistant" and msg.get("tool_calls"):
|
|
119
|
+
# Convert flat format to OpenAI's nested format
|
|
120
|
+
converted_tool_calls = []
|
|
121
|
+
for tc in msg["tool_calls"]:
|
|
122
|
+
converted_tool_calls.append({
|
|
123
|
+
"id": tc.get("id", ""),
|
|
124
|
+
"type": "function",
|
|
125
|
+
"function": {
|
|
126
|
+
"name": tc["name"],
|
|
127
|
+
"arguments": json.dumps(tc["arguments"]) if isinstance(tc["arguments"], dict) else tc["arguments"]
|
|
128
|
+
}
|
|
129
|
+
})
|
|
130
|
+
|
|
131
|
+
openai_messages.append({
|
|
132
|
+
"role": "assistant",
|
|
133
|
+
"tool_calls": converted_tool_calls
|
|
134
|
+
})
|
|
135
|
+
else:
|
|
136
|
+
# Pass through other messages unchanged
|
|
137
|
+
openai_messages.append(msg)
|
|
138
|
+
|
|
139
|
+
return openai_messages
|
|
140
|
+
|
|
141
|
+
async def _generate_with_tools_single(
|
|
142
|
+
self,
|
|
143
|
+
messages: list[Dict[str, Any]],
|
|
144
|
+
tools: list[Dict[str, Any]],
|
|
145
|
+
**kwargs
|
|
146
|
+
) -> Dict[str, Any]:
|
|
147
|
+
"""
|
|
148
|
+
OpenAI-specific tool calling implementation.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
messages: Conversation history in universal flat format
|
|
152
|
+
tools: Tool specifications in OpenAI format
|
|
153
|
+
**kwargs: Optional parameters
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
{
|
|
157
|
+
"tool_calls": [...], # If LLM wants to call tools
|
|
158
|
+
"content": "...", # If LLM has final answer
|
|
159
|
+
}
|
|
160
|
+
"""
|
|
161
|
+
import json
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
# Merge parameters
|
|
165
|
+
params = self._merge_params(kwargs)
|
|
166
|
+
|
|
167
|
+
# Convert flat format to OpenAI's nested format
|
|
168
|
+
openai_messages = self._convert_messages_to_openai(messages)
|
|
169
|
+
|
|
170
|
+
# Make API call with tools
|
|
171
|
+
response = await self.client.chat.completions.create(
|
|
172
|
+
model=self.model,
|
|
173
|
+
messages=openai_messages,
|
|
174
|
+
tools=tools,
|
|
175
|
+
tool_choice="auto",
|
|
176
|
+
max_tokens=params.get('max_tokens'),
|
|
177
|
+
temperature=params.get('temperature'),
|
|
178
|
+
top_p=params.get('top_p'),
|
|
179
|
+
frequency_penalty=params.get('frequency_penalty'),
|
|
180
|
+
presence_penalty=params.get('presence_penalty'),
|
|
181
|
+
timeout=params.get('timeout')
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
message = response.choices[0].message
|
|
185
|
+
|
|
186
|
+
# Store usage for token tracking
|
|
187
|
+
if hasattr(response, 'usage'):
|
|
188
|
+
self._last_usage = response.usage
|
|
189
|
+
|
|
190
|
+
if message.tool_calls:
|
|
191
|
+
# LLM wants to call tools
|
|
192
|
+
return {
|
|
193
|
+
"tool_calls": [
|
|
194
|
+
{
|
|
195
|
+
"id": tc.id,
|
|
196
|
+
"name": tc.function.name,
|
|
197
|
+
"arguments": json.loads(tc.function.arguments)
|
|
198
|
+
}
|
|
199
|
+
for tc in message.tool_calls
|
|
200
|
+
]
|
|
201
|
+
}
|
|
202
|
+
else:
|
|
203
|
+
# LLM has final answer
|
|
204
|
+
return {
|
|
205
|
+
"content": message.content
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
except Exception as e:
|
|
209
|
+
logger.error(f"OpenAI tool calling failed: {str(e)}")
|
|
210
|
+
raise LLMError(f"OpenAI tool calling failed: {str(e)}")
|
|
211
|
+
|
|
212
|
+
@property
|
|
213
|
+
def info(self) -> Dict[str, Any]:
|
|
214
|
+
"""Get information about the OpenAI provider."""
|
|
215
|
+
base_info = super().info
|
|
216
|
+
base_info.update({
|
|
217
|
+
'provider_name': 'OpenAI',
|
|
218
|
+
'api_compatible': 'OpenAI'
|
|
219
|
+
})
|
|
220
|
+
return base_info
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Plugin system for Daita Agents.
|
|
3
|
+
|
|
4
|
+
This module provides database, API, cloud storage, search, collaboration, and MCP integrations:
|
|
5
|
+
- PostgreSQL plugin for async database operations
|
|
6
|
+
- MySQL plugin for async database operations
|
|
7
|
+
- MongoDB plugin for async document database operations
|
|
8
|
+
- REST API plugin for HTTP client functionality
|
|
9
|
+
- AWS S3 plugin for cloud object storage operations
|
|
10
|
+
- Slack plugin for team collaboration and notifications
|
|
11
|
+
- Elasticsearch plugin for search and analytics
|
|
12
|
+
- MCP plugin for Model Context Protocol server integration
|
|
13
|
+
|
|
14
|
+
All plugins follow async patterns and provide simple, clean interfaces
|
|
15
|
+
without over-engineering.
|
|
16
|
+
|
|
17
|
+
Usage:
|
|
18
|
+
```python
|
|
19
|
+
from daita.plugins import postgresql, mysql, mongodb, rest, s3, slack, elasticsearch, mcp
|
|
20
|
+
from daita import SubstrateAgent
|
|
21
|
+
|
|
22
|
+
# Database plugins
|
|
23
|
+
async with postgresql(host="localhost", database="mydb") as db:
|
|
24
|
+
results = await db.query("SELECT * FROM users")
|
|
25
|
+
|
|
26
|
+
# REST API plugin
|
|
27
|
+
async with rest(base_url="https://api.example.com") as api:
|
|
28
|
+
data = await api.get("/users")
|
|
29
|
+
|
|
30
|
+
# S3 plugin
|
|
31
|
+
async with s3(bucket="my-bucket", region="us-west-2") as storage:
|
|
32
|
+
data = await storage.get_object("data/file.csv", format="pandas")
|
|
33
|
+
|
|
34
|
+
# Slack plugin
|
|
35
|
+
async with slack(token="xoxb-token") as slack_client:
|
|
36
|
+
await slack_client.send_agent_summary("#alerts", agent_results)
|
|
37
|
+
|
|
38
|
+
# Elasticsearch plugin
|
|
39
|
+
async with elasticsearch(hosts=["localhost:9200"]) as es:
|
|
40
|
+
results = await es.search("logs", {"match": {"level": "ERROR"}}, focus=["timestamp", "message"])
|
|
41
|
+
|
|
42
|
+
# MCP plugin with agent integration
|
|
43
|
+
agent = SubstrateAgent(
|
|
44
|
+
name="file_analyzer",
|
|
45
|
+
mcp=mcp.server(command="uvx", args=["mcp-server-filesystem", "/data"])
|
|
46
|
+
)
|
|
47
|
+
result = await agent.process("Read report.csv and calculate totals")
|
|
48
|
+
```
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
# Database plugins
|
|
52
|
+
from .postgresql import PostgreSQLPlugin, postgresql
|
|
53
|
+
from .mysql import MySQLPlugin, mysql
|
|
54
|
+
from .mongodb import MongoDBPlugin, mongodb
|
|
55
|
+
|
|
56
|
+
# API plugins
|
|
57
|
+
from .rest import RESTPlugin, rest
|
|
58
|
+
|
|
59
|
+
# Cloud storage plugins
|
|
60
|
+
from .s3 import S3Plugin, s3
|
|
61
|
+
|
|
62
|
+
# Collaboration plugins
|
|
63
|
+
from .slack import SlackPlugin, slack
|
|
64
|
+
|
|
65
|
+
# Search and analytics plugins
|
|
66
|
+
from .elasticsearch import ElasticsearchPlugin, elasticsearch
|
|
67
|
+
|
|
68
|
+
# Messaging plugins
|
|
69
|
+
from .redis_messaging import RedisMessagingPlugin, redis_messaging
|
|
70
|
+
|
|
71
|
+
# MCP plugin
|
|
72
|
+
from . import mcp
|
|
73
|
+
|
|
74
|
+
# Simple plugin access class for SDK
|
|
75
|
+
class PluginAccess:
|
|
76
|
+
"""
|
|
77
|
+
Simple plugin access for the SDK.
|
|
78
|
+
|
|
79
|
+
Provides clean interface: sdk.plugins.postgresql(...)
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def postgresql(self, **kwargs) -> PostgreSQLPlugin:
|
|
83
|
+
"""Create PostgreSQL plugin."""
|
|
84
|
+
return postgresql(**kwargs)
|
|
85
|
+
|
|
86
|
+
def mysql(self, **kwargs) -> MySQLPlugin:
|
|
87
|
+
"""Create MySQL plugin."""
|
|
88
|
+
return mysql(**kwargs)
|
|
89
|
+
|
|
90
|
+
def mongodb(self, **kwargs) -> MongoDBPlugin:
|
|
91
|
+
"""Create MongoDB plugin."""
|
|
92
|
+
return mongodb(**kwargs)
|
|
93
|
+
|
|
94
|
+
def rest(self, **kwargs) -> RESTPlugin:
|
|
95
|
+
"""Create REST API plugin."""
|
|
96
|
+
return rest(**kwargs)
|
|
97
|
+
|
|
98
|
+
def s3(self, **kwargs) -> S3Plugin:
|
|
99
|
+
"""Create S3 plugin."""
|
|
100
|
+
return s3(**kwargs)
|
|
101
|
+
|
|
102
|
+
def slack(self, **kwargs) -> SlackPlugin:
|
|
103
|
+
"""Create Slack plugin."""
|
|
104
|
+
return slack(**kwargs)
|
|
105
|
+
|
|
106
|
+
def elasticsearch(self, **kwargs) -> ElasticsearchPlugin:
|
|
107
|
+
"""Create Elasticsearch plugin."""
|
|
108
|
+
return elasticsearch(**kwargs)
|
|
109
|
+
|
|
110
|
+
def redis_messaging(self, **kwargs) -> RedisMessagingPlugin:
|
|
111
|
+
"""Create Redis messaging plugin."""
|
|
112
|
+
return redis_messaging(**kwargs)
|
|
113
|
+
|
|
114
|
+
# Export everything needed
|
|
115
|
+
__all__ = [
|
|
116
|
+
# Plugin classes
|
|
117
|
+
'PostgreSQLPlugin',
|
|
118
|
+
'MySQLPlugin',
|
|
119
|
+
'MongoDBPlugin',
|
|
120
|
+
'RESTPlugin',
|
|
121
|
+
'S3Plugin',
|
|
122
|
+
'SlackPlugin',
|
|
123
|
+
'ElasticsearchPlugin',
|
|
124
|
+
'RedisMessagingPlugin',
|
|
125
|
+
|
|
126
|
+
# Factory functions
|
|
127
|
+
'postgresql',
|
|
128
|
+
'mysql',
|
|
129
|
+
'mongodb',
|
|
130
|
+
'rest',
|
|
131
|
+
's3',
|
|
132
|
+
'slack',
|
|
133
|
+
'elasticsearch',
|
|
134
|
+
'redis_messaging',
|
|
135
|
+
|
|
136
|
+
# MCP module
|
|
137
|
+
'mcp',
|
|
138
|
+
|
|
139
|
+
# SDK access class
|
|
140
|
+
'PluginAccess',
|
|
141
|
+
]
|
daita/plugins/base.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base classes for Daita plugins.
|
|
3
|
+
|
|
4
|
+
Plugins are infrastructure utilities (databases, APIs, storage) that can
|
|
5
|
+
optionally expose their capabilities as agent tools.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from abc import ABC
|
|
9
|
+
from typing import List, TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ..core.tools import AgentTool
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class BasePlugin(ABC):
|
|
16
|
+
"""
|
|
17
|
+
Base class for all Daita plugins.
|
|
18
|
+
|
|
19
|
+
Plugins provide infrastructure utilities (S3, Slack, REST APIs, etc).
|
|
20
|
+
They can optionally expose their capabilities as agent tools via get_tools().
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def get_tools(self) -> List['AgentTool']:
|
|
24
|
+
"""
|
|
25
|
+
Get agent-usable tools from this plugin.
|
|
26
|
+
|
|
27
|
+
Override in subclasses to expose plugin capabilities as LLM tools.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
List of AgentTool instances
|
|
31
|
+
"""
|
|
32
|
+
return []
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def has_tools(self) -> bool:
|
|
36
|
+
"""Check if plugin exposes any tools"""
|
|
37
|
+
return len(self.get_tools()) > 0
|