smartify-ai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. smartify/__init__.py +3 -0
  2. smartify/agents/__init__.py +0 -0
  3. smartify/agents/adapters/__init__.py +13 -0
  4. smartify/agents/adapters/anthropic.py +253 -0
  5. smartify/agents/adapters/openai.py +289 -0
  6. smartify/api/__init__.py +26 -0
  7. smartify/api/auth.py +352 -0
  8. smartify/api/errors.py +380 -0
  9. smartify/api/events.py +345 -0
  10. smartify/api/server.py +992 -0
  11. smartify/cli/__init__.py +1 -0
  12. smartify/cli/main.py +430 -0
  13. smartify/engine/__init__.py +64 -0
  14. smartify/engine/approval.py +479 -0
  15. smartify/engine/orchestrator.py +1365 -0
  16. smartify/engine/scheduler.py +380 -0
  17. smartify/engine/spark.py +294 -0
  18. smartify/guardrails/__init__.py +22 -0
  19. smartify/guardrails/breakers.py +409 -0
  20. smartify/models/__init__.py +61 -0
  21. smartify/models/grid.py +625 -0
  22. smartify/notifications/__init__.py +22 -0
  23. smartify/notifications/webhook.py +556 -0
  24. smartify/state/__init__.py +46 -0
  25. smartify/state/checkpoint.py +558 -0
  26. smartify/state/resume.py +301 -0
  27. smartify/state/store.py +370 -0
  28. smartify/tools/__init__.py +17 -0
  29. smartify/tools/base.py +196 -0
  30. smartify/tools/builtin/__init__.py +79 -0
  31. smartify/tools/builtin/file.py +464 -0
  32. smartify/tools/builtin/http.py +195 -0
  33. smartify/tools/builtin/shell.py +137 -0
  34. smartify/tools/mcp/__init__.py +33 -0
  35. smartify/tools/mcp/adapter.py +157 -0
  36. smartify/tools/mcp/client.py +334 -0
  37. smartify/tools/mcp/registry.py +130 -0
  38. smartify/validator/__init__.py +0 -0
  39. smartify/validator/validate.py +271 -0
  40. smartify/workspace/__init__.py +5 -0
  41. smartify/workspace/manager.py +248 -0
  42. smartify_ai-0.1.0.dist-info/METADATA +201 -0
  43. smartify_ai-0.1.0.dist-info/RECORD +46 -0
  44. smartify_ai-0.1.0.dist-info/WHEEL +4 -0
  45. smartify_ai-0.1.0.dist-info/entry_points.txt +2 -0
  46. smartify_ai-0.1.0.dist-info/licenses/LICENSE +21 -0
smartify/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ """Smartify - Local runtime for Grid specifications."""
2
+
3
+ __version__ = "0.1.0"
File without changes
@@ -0,0 +1,13 @@
1
+ """LLM and tool adapters for Smartify.
2
+
3
+ Adapters implement the LLMAdapter and ToolAdapter protocols
4
+ defined in smartify.engine.orchestrator.
5
+ """
6
+
7
+ from smartify.agents.adapters.anthropic import AnthropicAdapter
8
+ from smartify.agents.adapters.openai import OpenAIAdapter
9
+
10
+ __all__ = [
11
+ "AnthropicAdapter",
12
+ "OpenAIAdapter",
13
+ ]
@@ -0,0 +1,253 @@
1
+ """Anthropic Claude LLM adapter for Smartify.
2
+
3
+ Implements the LLMAdapter protocol using the Anthropic Python SDK.
4
+ Supports Claude 3.5 Sonnet, Claude 3 Opus, and other Claude models.
5
+ """
6
+
7
+ import os
8
+ import logging
9
+ from typing import Any, Dict, List, Optional
10
+
11
+ from anthropic import AsyncAnthropic
12
+ from anthropic.types import Message, ContentBlock, ToolUseBlock, TextBlock
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ # Token pricing per million tokens (as of early 2026, check for updates)
19
+ MODEL_PRICING = {
20
+ "claude-sonnet-4-20250514": {"input": 3.00, "output": 15.00},
21
+ "claude-3-5-sonnet-20241022": {"input": 3.00, "output": 15.00},
22
+ "claude-3-opus-20240229": {"input": 15.00, "output": 75.00},
23
+ "claude-3-sonnet-20240229": {"input": 3.00, "output": 15.00},
24
+ "claude-3-haiku-20240307": {"input": 0.25, "output": 1.25},
25
+ # Aliases
26
+ "claude-3-5-sonnet-latest": {"input": 3.00, "output": 15.00},
27
+ "claude-3-opus-latest": {"input": 15.00, "output": 75.00},
28
+ }
29
+
30
+ DEFAULT_MODEL = "claude-sonnet-4-20250514"
31
+
32
+
33
+ class AnthropicAdapter:
34
+ """Anthropic Claude LLM adapter.
35
+
36
+ Implements the LLMAdapter protocol for use with Smartify's orchestrator.
37
+
38
+ Example:
39
+ adapter = AnthropicAdapter(api_key="sk-...")
40
+ response = await adapter.complete(
41
+ messages=[{"role": "user", "content": "Hello!"}],
42
+ system="You are a helpful assistant."
43
+ )
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ api_key: Optional[str] = None,
49
+ model: str = DEFAULT_MODEL,
50
+ base_url: Optional[str] = None,
51
+ timeout: float = 120.0,
52
+ ):
53
+ """Initialize the Anthropic adapter.
54
+
55
+ Args:
56
+ api_key: Anthropic API key. Defaults to ANTHROPIC_API_KEY env var.
57
+ model: Default model to use for completions.
58
+ base_url: Optional custom API base URL.
59
+ timeout: Request timeout in seconds.
60
+ """
61
+ self.api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
62
+ if not self.api_key:
63
+ raise ValueError(
64
+ "Anthropic API key required. Set ANTHROPIC_API_KEY or pass api_key."
65
+ )
66
+
67
+ self.model = model
68
+ self.timeout = timeout
69
+
70
+ self._client = AsyncAnthropic(
71
+ api_key=self.api_key,
72
+ base_url=base_url,
73
+ timeout=timeout,
74
+ )
75
+
76
+ async def complete(
77
+ self,
78
+ messages: List[Dict[str, str]],
79
+ system: Optional[str] = None,
80
+ temperature: float = 0.7,
81
+ max_tokens: Optional[int] = None,
82
+ tools: Optional[List[Dict]] = None,
83
+ model: Optional[str] = None,
84
+ ) -> Dict[str, Any]:
85
+ """Generate a completion from Claude.
86
+
87
+ Args:
88
+ messages: List of message dicts with 'role' and 'content' keys.
89
+ system: Optional system prompt.
90
+ temperature: Sampling temperature (0-1).
91
+ max_tokens: Maximum tokens to generate. Defaults to 4096.
92
+ tools: Optional list of tool definitions for function calling.
93
+ model: Override the default model for this request.
94
+
95
+ Returns:
96
+ Dict containing:
97
+ - content: The assistant's response text
98
+ - tool_calls: List of tool calls if any
99
+ - tokens_in: Input token count
100
+ - tokens_out: Output token count
101
+ - cost: Estimated cost in USD
102
+ - model: Model used
103
+ - stop_reason: Why generation stopped
104
+ """
105
+ use_model = model or self.model
106
+ max_tokens = max_tokens or 4096
107
+
108
+ # Build request kwargs
109
+ kwargs: Dict[str, Any] = {
110
+ "model": use_model,
111
+ "messages": self._convert_messages(messages),
112
+ "max_tokens": max_tokens,
113
+ "temperature": temperature,
114
+ }
115
+
116
+ if system:
117
+ kwargs["system"] = system
118
+
119
+ if tools:
120
+ kwargs["tools"] = self._convert_tools(tools)
121
+
122
+ logger.debug(f"Anthropic request: model={use_model}, messages={len(messages)}")
123
+
124
+ response: Message = await self._client.messages.create(**kwargs)
125
+
126
+ # Extract content
127
+ content_text = ""
128
+ tool_calls = []
129
+
130
+ for block in response.content:
131
+ if isinstance(block, TextBlock):
132
+ content_text += block.text
133
+ elif isinstance(block, ToolUseBlock):
134
+ tool_calls.append({
135
+ "id": block.id,
136
+ "name": block.name,
137
+ "arguments": block.input,
138
+ })
139
+
140
+ # Calculate cost
141
+ tokens_in = response.usage.input_tokens
142
+ tokens_out = response.usage.output_tokens
143
+ cost = self._calculate_cost(use_model, tokens_in, tokens_out)
144
+
145
+ logger.debug(
146
+ f"Anthropic response: tokens_in={tokens_in}, tokens_out={tokens_out}, "
147
+ f"cost=${cost:.4f}, stop_reason={response.stop_reason}"
148
+ )
149
+
150
+ return {
151
+ "content": content_text,
152
+ "tool_calls": tool_calls if tool_calls else None,
153
+ "tokens_in": tokens_in,
154
+ "tokens_out": tokens_out,
155
+ "cost": cost,
156
+ "model": use_model,
157
+ "stop_reason": response.stop_reason,
158
+ }
159
+
160
+ def _convert_messages(self, messages: List[Dict[str, str]]) -> List[Dict[str, Any]]:
161
+ """Convert generic messages to Anthropic format.
162
+
163
+ Handles:
164
+ - Basic user/assistant messages
165
+ - Tool results
166
+ - Multi-part content
167
+ """
168
+ converted = []
169
+
170
+ for msg in messages:
171
+ role = msg.get("role", "user")
172
+ content = msg.get("content", "")
173
+
174
+ # Anthropic uses "user" and "assistant" roles
175
+ if role == "system":
176
+ # System messages should be passed via system parameter
177
+ continue
178
+
179
+ anthropic_msg: Dict[str, Any] = {"role": role}
180
+
181
+ # Handle tool results
182
+ if role == "tool" or msg.get("tool_call_id"):
183
+ anthropic_msg = {
184
+ "role": "user",
185
+ "content": [
186
+ {
187
+ "type": "tool_result",
188
+ "tool_use_id": msg.get("tool_call_id", ""),
189
+ "content": content,
190
+ }
191
+ ],
192
+ }
193
+ else:
194
+ anthropic_msg["content"] = content
195
+
196
+ converted.append(anthropic_msg)
197
+
198
+ return converted
199
+
200
+ def _convert_tools(self, tools: List[Dict]) -> List[Dict[str, Any]]:
201
+ """Convert generic tool definitions to Anthropic format.
202
+
203
+ Expected input format (OpenAI-style):
204
+ {
205
+ "type": "function",
206
+ "function": {
207
+ "name": "tool_name",
208
+ "description": "What the tool does",
209
+ "parameters": { JSON Schema }
210
+ }
211
+ }
212
+
213
+ Anthropic format:
214
+ {
215
+ "name": "tool_name",
216
+ "description": "What the tool does",
217
+ "input_schema": { JSON Schema }
218
+ }
219
+ """
220
+ converted = []
221
+
222
+ for tool in tools:
223
+ if tool.get("type") == "function":
224
+ func = tool.get("function", {})
225
+ converted.append({
226
+ "name": func.get("name", ""),
227
+ "description": func.get("description", ""),
228
+ "input_schema": func.get("parameters", {"type": "object", "properties": {}}),
229
+ })
230
+ elif "name" in tool:
231
+ # Already in Anthropic format
232
+ converted.append(tool)
233
+
234
+ return converted
235
+
236
+ def _calculate_cost(self, model: str, tokens_in: int, tokens_out: int) -> float:
237
+ """Calculate estimated cost in USD."""
238
+ pricing = MODEL_PRICING.get(model, MODEL_PRICING[DEFAULT_MODEL])
239
+
240
+ cost_in = (tokens_in / 1_000_000) * pricing["input"]
241
+ cost_out = (tokens_out / 1_000_000) * pricing["output"]
242
+
243
+ return cost_in + cost_out
244
+
245
+ async def close(self) -> None:
246
+ """Close the underlying HTTP client."""
247
+ await self._client.close()
248
+
249
+ async def __aenter__(self) -> "AnthropicAdapter":
250
+ return self
251
+
252
+ async def __aexit__(self, *args) -> None:
253
+ await self.close()
@@ -0,0 +1,289 @@
1
+ """OpenAI GPT LLM adapter for Smartify.
2
+
3
+ Implements the LLMAdapter protocol using the OpenAI Python SDK.
4
+ Supports GPT-4o, GPT-4, GPT-4 Turbo, and other OpenAI models.
5
+ """
6
+
7
+ import os
8
+ import json
9
+ import logging
10
+ from typing import Any, Dict, List, Optional
11
+
12
+ from openai import AsyncOpenAI
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ # Token pricing per million tokens (as of early 2026, check for updates)
19
+ MODEL_PRICING = {
20
+ # GPT-4o family
21
+ "gpt-4o": {"input": 2.50, "output": 10.00},
22
+ "gpt-4o-2024-11-20": {"input": 2.50, "output": 10.00},
23
+ "gpt-4o-2024-08-06": {"input": 2.50, "output": 10.00},
24
+ "gpt-4o-mini": {"input": 0.15, "output": 0.60},
25
+ "gpt-4o-mini-2024-07-18": {"input": 0.15, "output": 0.60},
26
+ # GPT-4 Turbo
27
+ "gpt-4-turbo": {"input": 10.00, "output": 30.00},
28
+ "gpt-4-turbo-2024-04-09": {"input": 10.00, "output": 30.00},
29
+ "gpt-4-turbo-preview": {"input": 10.00, "output": 30.00},
30
+ # GPT-4
31
+ "gpt-4": {"input": 30.00, "output": 60.00},
32
+ "gpt-4-0613": {"input": 30.00, "output": 60.00},
33
+ # GPT-3.5 Turbo
34
+ "gpt-3.5-turbo": {"input": 0.50, "output": 1.50},
35
+ "gpt-3.5-turbo-0125": {"input": 0.50, "output": 1.50},
36
+ # o1 reasoning models
37
+ "o1": {"input": 15.00, "output": 60.00},
38
+ "o1-preview": {"input": 15.00, "output": 60.00},
39
+ "o1-mini": {"input": 3.00, "output": 12.00},
40
+ }
41
+
42
+ DEFAULT_MODEL = "gpt-4o"
43
+
44
+
45
+ class OpenAIAdapter:
46
+ """OpenAI GPT LLM adapter.
47
+
48
+ Implements the LLMAdapter protocol for use with Smartify's orchestrator.
49
+
50
+ Example:
51
+ adapter = OpenAIAdapter(api_key="sk-...")
52
+ response = await adapter.complete(
53
+ messages=[{"role": "user", "content": "Hello!"}],
54
+ system="You are a helpful assistant."
55
+ )
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ api_key: Optional[str] = None,
61
+ model: str = DEFAULT_MODEL,
62
+ base_url: Optional[str] = None,
63
+ timeout: float = 120.0,
64
+ organization: Optional[str] = None,
65
+ ):
66
+ """Initialize the OpenAI adapter.
67
+
68
+ Args:
69
+ api_key: OpenAI API key. Defaults to OPENAI_API_KEY env var.
70
+ model: Default model to use for completions.
71
+ base_url: Optional custom API base URL (for Azure, proxies, etc.).
72
+ timeout: Request timeout in seconds.
73
+ organization: Optional OpenAI organization ID.
74
+ """
75
+ self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
76
+ if not self.api_key:
77
+ raise ValueError(
78
+ "OpenAI API key required. Set OPENAI_API_KEY or pass api_key."
79
+ )
80
+
81
+ self.model = model
82
+ self.timeout = timeout
83
+
84
+ self._client = AsyncOpenAI(
85
+ api_key=self.api_key,
86
+ base_url=base_url,
87
+ timeout=timeout,
88
+ organization=organization,
89
+ )
90
+
91
+ async def complete(
92
+ self,
93
+ messages: List[Dict[str, str]],
94
+ system: Optional[str] = None,
95
+ temperature: float = 0.7,
96
+ max_tokens: Optional[int] = None,
97
+ tools: Optional[List[Dict]] = None,
98
+ model: Optional[str] = None,
99
+ ) -> Dict[str, Any]:
100
+ """Generate a completion from GPT.
101
+
102
+ Args:
103
+ messages: List of message dicts with 'role' and 'content' keys.
104
+ system: Optional system prompt.
105
+ temperature: Sampling temperature (0-2 for OpenAI).
106
+ max_tokens: Maximum tokens to generate. Defaults to 4096.
107
+ tools: Optional list of tool definitions for function calling.
108
+ model: Override the default model for this request.
109
+
110
+ Returns:
111
+ Dict containing:
112
+ - content: The assistant's response text
113
+ - tool_calls: List of tool calls if any
114
+ - tokens_in: Input token count
115
+ - tokens_out: Output token count
116
+ - cost: Estimated cost in USD
117
+ - model: Model used
118
+ - stop_reason: Why generation stopped
119
+ """
120
+ use_model = model or self.model
121
+ max_tokens = max_tokens or 4096
122
+
123
+ # Build messages with system prompt prepended
124
+ api_messages = self._convert_messages(messages, system)
125
+
126
+ # Build request kwargs (max_tokens for chat completions API)
127
+ kwargs: Dict[str, Any] = {
128
+ "model": use_model,
129
+ "messages": api_messages,
130
+ "max_tokens": max_tokens,
131
+ "temperature": temperature,
132
+ }
133
+
134
+ if tools:
135
+ kwargs["tools"] = self._convert_tools(tools)
136
+ kwargs["tool_choice"] = "auto"
137
+
138
+ logger.debug(f"OpenAI request: model={use_model}, messages={len(api_messages)}")
139
+
140
+ response = await self._client.chat.completions.create(**kwargs)
141
+
142
+ # Extract content
143
+ choice = response.choices[0]
144
+ content_text = choice.message.content or ""
145
+ tool_calls = []
146
+
147
+ if choice.message.tool_calls:
148
+ for tc in choice.message.tool_calls:
149
+ # Parse arguments - OpenAI returns JSON string
150
+ try:
151
+ args = json.loads(tc.function.arguments)
152
+ except json.JSONDecodeError:
153
+ args = {"raw": tc.function.arguments}
154
+
155
+ tool_calls.append({
156
+ "id": tc.id,
157
+ "name": tc.function.name,
158
+ "arguments": args,
159
+ })
160
+
161
+ # Calculate cost
162
+ tokens_in = response.usage.prompt_tokens if response.usage else 0
163
+ tokens_out = response.usage.completion_tokens if response.usage else 0
164
+ cost = self._calculate_cost(use_model, tokens_in, tokens_out)
165
+
166
+ # Map finish_reason to consistent format
167
+ stop_reason = self._map_stop_reason(choice.finish_reason)
168
+
169
+ logger.debug(
170
+ f"OpenAI response: tokens_in={tokens_in}, tokens_out={tokens_out}, "
171
+ f"cost=${cost:.4f}, stop_reason={stop_reason}"
172
+ )
173
+
174
+ return {
175
+ "content": content_text,
176
+ "tool_calls": tool_calls if tool_calls else None,
177
+ "tokens_in": tokens_in,
178
+ "tokens_out": tokens_out,
179
+ "cost": cost,
180
+ "model": use_model,
181
+ "stop_reason": stop_reason,
182
+ }
183
+
184
+ def _convert_messages(
185
+ self,
186
+ messages: List[Dict[str, str]],
187
+ system: Optional[str] = None
188
+ ) -> List[Dict[str, Any]]:
189
+ """Convert generic messages to OpenAI format.
190
+
191
+ OpenAI accepts system messages in the messages array.
192
+ """
193
+ converted = []
194
+
195
+ # Prepend system message if provided
196
+ if system:
197
+ converted.append({"role": "system", "content": system})
198
+
199
+ for msg in messages:
200
+ role = msg.get("role", "user")
201
+ content = msg.get("content", "")
202
+
203
+ # Handle tool results
204
+ if role == "tool" or msg.get("tool_call_id"):
205
+ converted.append({
206
+ "role": "tool",
207
+ "tool_call_id": msg.get("tool_call_id", ""),
208
+ "content": content,
209
+ })
210
+ else:
211
+ converted.append({"role": role, "content": content})
212
+
213
+ return converted
214
+
215
+ def _convert_tools(self, tools: List[Dict]) -> List[Dict[str, Any]]:
216
+ """Convert generic tool definitions to OpenAI format.
217
+
218
+ OpenAI expects:
219
+ {
220
+ "type": "function",
221
+ "function": {
222
+ "name": "tool_name",
223
+ "description": "What the tool does",
224
+ "parameters": { JSON Schema }
225
+ }
226
+ }
227
+ """
228
+ converted = []
229
+
230
+ for tool in tools:
231
+ if tool.get("type") == "function":
232
+ # Already in OpenAI format
233
+ converted.append(tool)
234
+ elif "name" in tool:
235
+ # Convert from Anthropic-style format
236
+ converted.append({
237
+ "type": "function",
238
+ "function": {
239
+ "name": tool.get("name", ""),
240
+ "description": tool.get("description", ""),
241
+ "parameters": tool.get("input_schema", tool.get("parameters", {
242
+ "type": "object",
243
+ "properties": {}
244
+ })),
245
+ },
246
+ })
247
+
248
+ return converted
249
+
250
+ def _map_stop_reason(self, finish_reason: Optional[str]) -> str:
251
+ """Map OpenAI finish_reason to consistent stop_reason format."""
252
+ mapping = {
253
+ "stop": "end_turn",
254
+ "length": "max_tokens",
255
+ "tool_calls": "tool_use",
256
+ "content_filter": "content_filter",
257
+ "function_call": "tool_use", # Legacy
258
+ }
259
+ return mapping.get(finish_reason or "stop", finish_reason or "end_turn")
260
+
261
+ def _calculate_cost(self, model: str, tokens_in: int, tokens_out: int) -> float:
262
+ """Calculate estimated cost in USD."""
263
+ # Find pricing - check exact match first, then prefix match
264
+ pricing = MODEL_PRICING.get(model)
265
+
266
+ if not pricing:
267
+ # Try prefix matching for dated model versions
268
+ for model_key in MODEL_PRICING:
269
+ if model.startswith(model_key):
270
+ pricing = MODEL_PRICING[model_key]
271
+ break
272
+
273
+ if not pricing:
274
+ pricing = MODEL_PRICING[DEFAULT_MODEL]
275
+
276
+ cost_in = (tokens_in / 1_000_000) * pricing["input"]
277
+ cost_out = (tokens_out / 1_000_000) * pricing["output"]
278
+
279
+ return cost_in + cost_out
280
+
281
+ async def close(self) -> None:
282
+ """Close the underlying HTTP client."""
283
+ await self._client.close()
284
+
285
+ async def __aenter__(self) -> "OpenAIAdapter":
286
+ return self
287
+
288
+ async def __aexit__(self, *args) -> None:
289
+ await self.close()
@@ -0,0 +1,26 @@
1
+ """Smartify HTTP API.
2
+
3
+ FastAPI-based REST API for grid management and execution.
4
+ """
5
+
6
+ from smartify.api.server import app, run_server
7
+ from smartify.api.auth import (
8
+ AuthConfig,
9
+ AuthMiddleware,
10
+ generate_api_key,
11
+ hash_api_key,
12
+ verify_api_key,
13
+ get_auth_config,
14
+ )
15
+
16
+ __all__ = [
17
+ "app",
18
+ "run_server",
19
+ # Auth
20
+ "AuthConfig",
21
+ "AuthMiddleware",
22
+ "generate_api_key",
23
+ "hash_api_key",
24
+ "verify_api_key",
25
+ "get_auth_config",
26
+ ]