dialetica 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dialetica/__init__.py +55 -0
- dialetica/client.py +791 -0
- dialetica/models.py +336 -0
- dialetica-1.0.2.dist-info/METADATA +113 -0
- dialetica-1.0.2.dist-info/RECORD +7 -0
- dialetica-1.0.2.dist-info/WHEEL +5 -0
- dialetica-1.0.2.dist-info/top_level.txt +1 -0
dialetica/__init__.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dialetica AI SDK - Public API
|
|
3
|
+
|
|
4
|
+
This package provides the public API for the Dialetica AI SDK.
|
|
5
|
+
Only the client and models are exposed - internal implementation details are kept private.
|
|
6
|
+
|
|
7
|
+
Example usage:
|
|
8
|
+
from dialetica import Dialetica, AgentRequest, ContextRequest, MessageRequest
|
|
9
|
+
|
|
10
|
+
client = Dialetica(api_key="dai_your_api_key")
|
|
11
|
+
agent = client.agents.create(AgentRequest(name="Assistant", model="gpt-4o"))
|
|
12
|
+
"""
|
|
13
|
+
# Import from local modules (self-contained public API)
|
|
14
|
+
from .client import Dialetica
|
|
15
|
+
from .models import (
|
|
16
|
+
# Request models (what users send)
|
|
17
|
+
AgentRequest,
|
|
18
|
+
ContextRequest,
|
|
19
|
+
KnowledgeRequest,
|
|
20
|
+
MessageRequest,
|
|
21
|
+
ToolConfigRequest,
|
|
22
|
+
CronRequest,
|
|
23
|
+
# Response models (what users receive)
|
|
24
|
+
AgentResponse,
|
|
25
|
+
ContextResponse,
|
|
26
|
+
KnowledgeResponse,
|
|
27
|
+
MessageResponse,
|
|
28
|
+
ToolConfigResponse,
|
|
29
|
+
CronResponse,
|
|
30
|
+
UsageSummary,
|
|
31
|
+
RouteResponse,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
__version__ = "1.0.0"
|
|
35
|
+
__all__ = [
|
|
36
|
+
# Client
|
|
37
|
+
"Dialetica",
|
|
38
|
+
# Request models
|
|
39
|
+
"AgentRequest",
|
|
40
|
+
"ContextRequest",
|
|
41
|
+
"KnowledgeRequest",
|
|
42
|
+
"MessageRequest",
|
|
43
|
+
"ToolConfigRequest",
|
|
44
|
+
"CronRequest",
|
|
45
|
+
# Response models
|
|
46
|
+
"AgentResponse",
|
|
47
|
+
"ContextResponse",
|
|
48
|
+
"KnowledgeResponse",
|
|
49
|
+
"MessageResponse",
|
|
50
|
+
"ToolConfigResponse",
|
|
51
|
+
"CronResponse",
|
|
52
|
+
"UsageSummary",
|
|
53
|
+
"RouteResponse",
|
|
54
|
+
]
|
|
55
|
+
|
dialetica/client.py
ADDED
|
@@ -0,0 +1,791 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Python client SDK for the Dialetica AI API
|
|
3
|
+
|
|
4
|
+
Example usage:
|
|
5
|
+
# Initialize with explicit API key
|
|
6
|
+
client = Dialetica(api_key="dai_your_api_key_here")
|
|
7
|
+
|
|
8
|
+
# Initialize with environment variable (recommended)
|
|
9
|
+
# Set DIALETICA_AI_API_KEY in your environment
|
|
10
|
+
client = Dialetica()
|
|
11
|
+
|
|
12
|
+
# Custom base URL
|
|
13
|
+
client = Dialetica(base_url="https://api.dialetica.ai")
|
|
14
|
+
"""
|
|
15
|
+
import os
|
|
16
|
+
import requests
|
|
17
|
+
from typing import List, Optional, Dict, Any, AsyncIterator
|
|
18
|
+
from datetime import datetime
|
|
19
|
+
import json
|
|
20
|
+
import httpx
|
|
21
|
+
|
|
22
|
+
from .models import (
|
|
23
|
+
AgentRequest, AgentResponse,
|
|
24
|
+
ContextRequest, ContextResponse,
|
|
25
|
+
KnowledgeRequest, KnowledgeResponse,
|
|
26
|
+
MessageRequest, MessageResponse,
|
|
27
|
+
CronRequest, CronResponse,
|
|
28
|
+
RouteResponse,
|
|
29
|
+
ToolConfigRequest, ToolConfigResponse
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class Dialetica:
|
|
34
|
+
"""
|
|
35
|
+
Dialetica AI Client
|
|
36
|
+
|
|
37
|
+
The client handles authentication and API communication with the Dialetica AI API.
|
|
38
|
+
API keys should be kept secure and never committed to version control.
|
|
39
|
+
|
|
40
|
+
Best Practices:
|
|
41
|
+
- Store API keys in environment variables
|
|
42
|
+
- Never hardcode API keys in source code
|
|
43
|
+
- Use different API keys for different environments (dev, staging, prod)
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def __init__(
|
|
47
|
+
self,
|
|
48
|
+
api_key: Optional[str] = None,
|
|
49
|
+
base_url: str = "http://localhost:8000"
|
|
50
|
+
):
|
|
51
|
+
"""
|
|
52
|
+
Initialize the Dialetica AI client.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
api_key: Your API key. If not provided, will look for DIALETICA_AI_API_KEY
|
|
56
|
+
environment variable. Following the pattern of OpenAI and Anthropic SDKs.
|
|
57
|
+
base_url: The base URL for the API. Defaults to localhost:8000 for development.
|
|
58
|
+
|
|
59
|
+
Raises:
|
|
60
|
+
ValueError: If no API key is provided and DIALETICA_AI_API_KEY is not set.
|
|
61
|
+
|
|
62
|
+
Example:
|
|
63
|
+
>>> # Using environment variable (recommended)
|
|
64
|
+
>>> import os
|
|
65
|
+
>>> os.environ["DIALETICA_AI_API_KEY"] = "dai_your_api_key"
|
|
66
|
+
>>> client = Dialetica()
|
|
67
|
+
|
|
68
|
+
>>> # Using explicit API key (not recommended for production)
|
|
69
|
+
>>> client = Dialetica(api_key="dai_your_api_key")
|
|
70
|
+
"""
|
|
71
|
+
# Follow OpenAI/Anthropic pattern: try explicit key, then environment variable
|
|
72
|
+
self.api_key = api_key or os.environ.get("DIALETICA_AI_API_KEY")
|
|
73
|
+
|
|
74
|
+
if not self.api_key:
|
|
75
|
+
raise ValueError(
|
|
76
|
+
"No API key provided. Either pass api_key parameter or set "
|
|
77
|
+
"DIALETICA_AI_API_KEY environment variable. "
|
|
78
|
+
"Get your API key from your dashboard."
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
self.base_url = base_url
|
|
82
|
+
self.headers = {
|
|
83
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
84
|
+
"Content-Type": "application/json"
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
# Initialize sub-clients
|
|
88
|
+
self._setup_clients()
|
|
89
|
+
|
|
90
|
+
def _make_request(self, method: str, endpoint: str, data: Optional[Dict] = None) -> Dict:
|
|
91
|
+
"""Make HTTP request to the API"""
|
|
92
|
+
url = f"{self.base_url}/v1{endpoint}"
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
if method.upper() == "GET":
|
|
96
|
+
response = requests.get(url, headers=self.headers)
|
|
97
|
+
elif method.upper() == "POST":
|
|
98
|
+
response = requests.post(url, headers=self.headers, json=data)
|
|
99
|
+
elif method.upper() == "PUT":
|
|
100
|
+
response = requests.put(url, headers=self.headers, json=data)
|
|
101
|
+
elif method.upper() == "DELETE":
|
|
102
|
+
response = requests.delete(url, headers=self.headers)
|
|
103
|
+
else:
|
|
104
|
+
raise ValueError(f"Unsupported HTTP method: {method}")
|
|
105
|
+
|
|
106
|
+
response.raise_for_status()
|
|
107
|
+
return response.json()
|
|
108
|
+
except requests.exceptions.RequestException as e:
|
|
109
|
+
raise Exception(f"API request failed: {str(e)}")
|
|
110
|
+
|
|
111
|
+
class AgentsClient:
|
|
112
|
+
"""Client for agent operations"""
|
|
113
|
+
|
|
114
|
+
def __init__(self, client: 'Dialetica'):
|
|
115
|
+
self.client = client
|
|
116
|
+
|
|
117
|
+
def create(self, agent: AgentRequest) -> AgentResponse:
|
|
118
|
+
"""
|
|
119
|
+
Create a single agent.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
agent: AgentRequest object (configuration only)
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
AgentResponse with all fields
|
|
126
|
+
|
|
127
|
+
Example:
|
|
128
|
+
agent = AgentRequest(
|
|
129
|
+
name="Customer Support",
|
|
130
|
+
description="Handles inquiries",
|
|
131
|
+
instructions=["Be helpful"],
|
|
132
|
+
model="gpt-4o"
|
|
133
|
+
)
|
|
134
|
+
created = client.agents.create(agent)
|
|
135
|
+
"""
|
|
136
|
+
response = self.client._make_request("POST", "/agents", agent.model_dump(mode = "json"))
|
|
137
|
+
return AgentResponse(**response)
|
|
138
|
+
|
|
139
|
+
def bulk_create(self, agents: List[AgentRequest]) -> List[AgentResponse]:
|
|
140
|
+
"""Create multiple agents at once"""
|
|
141
|
+
created_agents = []
|
|
142
|
+
for agent in agents:
|
|
143
|
+
response = self.client._make_request("POST", "/agents", agent.model_dump(mode = "json"))
|
|
144
|
+
created_agents.append(AgentResponse(**response))
|
|
145
|
+
return created_agents
|
|
146
|
+
|
|
147
|
+
def get(self, agent_id: str) -> Optional[AgentResponse]:
|
|
148
|
+
"""Get an agent by ID"""
|
|
149
|
+
try:
|
|
150
|
+
response = self.client._make_request("GET", f"/agents/{agent_id}")
|
|
151
|
+
return AgentResponse(**response)
|
|
152
|
+
except Exception:
|
|
153
|
+
return None
|
|
154
|
+
|
|
155
|
+
def list(self) -> List[AgentResponse]:
|
|
156
|
+
"""List all agents"""
|
|
157
|
+
response = self.client._make_request("GET", "/agents")
|
|
158
|
+
return [AgentResponse(**agent_data) for agent_data in response]
|
|
159
|
+
|
|
160
|
+
def update(self, agent_id: str, agent: AgentRequest) -> Optional[AgentResponse]:
|
|
161
|
+
"""Update an agent"""
|
|
162
|
+
try:
|
|
163
|
+
response = self.client._make_request("PUT", f"/agents/{agent_id}", agent.model_dump(mode = "json"))
|
|
164
|
+
return AgentResponse(**response)
|
|
165
|
+
except Exception:
|
|
166
|
+
return None
|
|
167
|
+
|
|
168
|
+
def delete(self, agent_id: str) -> bool:
|
|
169
|
+
"""Delete an agent"""
|
|
170
|
+
try:
|
|
171
|
+
self.client._make_request("DELETE", f"/agents/{agent_id}")
|
|
172
|
+
return True
|
|
173
|
+
except Exception:
|
|
174
|
+
return False
|
|
175
|
+
|
|
176
|
+
class ContextsClient:
|
|
177
|
+
"""Client for context operations"""
|
|
178
|
+
|
|
179
|
+
def __init__(self, client: 'Dialetica'):
|
|
180
|
+
self.client = client
|
|
181
|
+
|
|
182
|
+
def create(self, context: ContextRequest) -> ContextResponse:
|
|
183
|
+
"""
|
|
184
|
+
Create a single context.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
context: ContextRequest object
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
ContextResponse with all fields
|
|
191
|
+
|
|
192
|
+
Example:
|
|
193
|
+
context = ContextRequest(
|
|
194
|
+
name="Customer Support",
|
|
195
|
+
description="Multi-agent support",
|
|
196
|
+
agents=[agent1.id, agent2.id],
|
|
197
|
+
knowledge=[policy.id]
|
|
198
|
+
)
|
|
199
|
+
created = client.contexts.create(context)
|
|
200
|
+
"""
|
|
201
|
+
response = self.client._make_request("POST", "/contexts", context.model_dump(mode = "json"))
|
|
202
|
+
return ContextResponse(**response)
|
|
203
|
+
|
|
204
|
+
def bulk_create(self, contexts: List[ContextRequest]) -> List[ContextResponse]:
|
|
205
|
+
"""Create multiple contexts at once"""
|
|
206
|
+
created_contexts = []
|
|
207
|
+
for context in contexts:
|
|
208
|
+
response = self.client._make_request("POST", "/contexts", context.model_dump(mode = "json"))
|
|
209
|
+
created_contexts.append(ContextResponse(**response))
|
|
210
|
+
return created_contexts
|
|
211
|
+
|
|
212
|
+
def get(self, context_id: str) -> Optional[ContextResponse]:
|
|
213
|
+
"""Get a context by ID"""
|
|
214
|
+
try:
|
|
215
|
+
response = self.client._make_request("GET", f"/contexts/{context_id}")
|
|
216
|
+
return ContextResponse(**response)
|
|
217
|
+
except Exception:
|
|
218
|
+
return None
|
|
219
|
+
|
|
220
|
+
def list(self) -> List[ContextResponse]:
|
|
221
|
+
"""List all contexts"""
|
|
222
|
+
response = self.client._make_request("GET", "/contexts")
|
|
223
|
+
return [ContextResponse(**context_data) for context_data in response]
|
|
224
|
+
|
|
225
|
+
def update(self, context_id: str, context: ContextRequest) -> Optional[ContextResponse]:
|
|
226
|
+
"""Update a context"""
|
|
227
|
+
try:
|
|
228
|
+
response = self.client._make_request("PUT", f"/contexts/{context_id}", context.model_dump(mode = "json"))
|
|
229
|
+
return ContextResponse(**response)
|
|
230
|
+
except Exception:
|
|
231
|
+
return None
|
|
232
|
+
|
|
233
|
+
def delete(self, context_id: str) -> bool:
|
|
234
|
+
"""Delete a context"""
|
|
235
|
+
try:
|
|
236
|
+
self.client._make_request("DELETE", f"/contexts/{context_id}")
|
|
237
|
+
return True
|
|
238
|
+
except Exception:
|
|
239
|
+
return False
|
|
240
|
+
|
|
241
|
+
def run(self, context: ContextResponse, messages: List[MessageRequest]) -> Optional[List[MessageResponse]]:
|
|
242
|
+
"""
|
|
243
|
+
Run a context with messages.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
context: The context to run
|
|
247
|
+
messages: List of MessageRequest objects (minimal fields: role, sender_name, content)
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
List of MessageResponse objects (includes metadata, excludes embeddings)
|
|
251
|
+
|
|
252
|
+
Example:
|
|
253
|
+
msg = MessageRequest(role="user", sender_name="Alice", content="Hello!")
|
|
254
|
+
responses = client.contexts.run(context, [msg])
|
|
255
|
+
for resp in responses:
|
|
256
|
+
print(f"{resp.sender_name}: {resp.content}")
|
|
257
|
+
"""
|
|
258
|
+
try:
|
|
259
|
+
data = {"messages": [msg.model_dump(mode = "json") for msg in messages]}
|
|
260
|
+
response = self.client._make_request("POST", f"/contexts/{context.id}/run", data)
|
|
261
|
+
return [MessageResponse(**msg_data) for msg_data in response]
|
|
262
|
+
except Exception:
|
|
263
|
+
return []
|
|
264
|
+
|
|
265
|
+
async def run_streamed(self, context: ContextResponse, messages: List[MessageRequest]) -> AsyncIterator[Dict[str, Any]]:
|
|
266
|
+
"""
|
|
267
|
+
Run a context with messages and stream the response in real-time using SSE.
|
|
268
|
+
|
|
269
|
+
This method streams events as the agent processes the request, providing:
|
|
270
|
+
- Token-by-token content streaming
|
|
271
|
+
- Real-time tool call visibility
|
|
272
|
+
- Progress updates
|
|
273
|
+
- Agent handoff notifications (multi-agent contexts)
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
context: The context to run
|
|
277
|
+
messages: List of MessageRequest objects
|
|
278
|
+
|
|
279
|
+
Yields:
|
|
280
|
+
Dict[str, Any]: Stream events with different types:
|
|
281
|
+
- context_started: Context execution begins
|
|
282
|
+
- agent_started: Agent starts processing
|
|
283
|
+
- content_delta: Token-by-token content (the main content stream)
|
|
284
|
+
- run_item: Tool calls, completions
|
|
285
|
+
- agent_updated: Agent handoff occurred
|
|
286
|
+
- agent_completed: Agent finished with complete response
|
|
287
|
+
- context_completed: Context execution finished
|
|
288
|
+
- error: An error occurred
|
|
289
|
+
|
|
290
|
+
Example (simple):
|
|
291
|
+
msg = MessageRequest(role="user", sender_name="User", content="Hello!")
|
|
292
|
+
async for event in client.contexts.run_streamed(context, [msg]):
|
|
293
|
+
if event["type"] == "content_delta":
|
|
294
|
+
print(event["delta"], end="", flush=True)
|
|
295
|
+
elif event["type"] == "error":
|
|
296
|
+
print(f"Error: {event['error']}")
|
|
297
|
+
|
|
298
|
+
Example (full event handling):
|
|
299
|
+
async for event in client.contexts.run_streamed(context, [msg]):
|
|
300
|
+
event_type = event.get("type")
|
|
301
|
+
|
|
302
|
+
if event_type == "context_started":
|
|
303
|
+
print(f"🎬 Context: {event['context_name']}")
|
|
304
|
+
|
|
305
|
+
elif event_type == "agent_started":
|
|
306
|
+
print(f"🤖 Agent: {event['agent_name']}")
|
|
307
|
+
|
|
308
|
+
elif event_type == "content_delta":
|
|
309
|
+
print(event["delta"], end="", flush=True)
|
|
310
|
+
|
|
311
|
+
elif event_type == "agent_completed":
|
|
312
|
+
print(f"\\n✅ Complete: {len(event['content'])} chars")
|
|
313
|
+
|
|
314
|
+
elif event_type == "error":
|
|
315
|
+
print(f"❌ Error: {event['error']}")
|
|
316
|
+
|
|
317
|
+
Example (cancellation):
|
|
318
|
+
# To cancel streaming, simply break out of the loop or cancel the task
|
|
319
|
+
try:
|
|
320
|
+
async for event in client.contexts.run_streamed(context, [msg]):
|
|
321
|
+
if event["type"] == "content_delta":
|
|
322
|
+
print(event["delta"], end="", flush=True)
|
|
323
|
+
if some_cancel_condition:
|
|
324
|
+
break # Automatically closes connection and cancels backend processing
|
|
325
|
+
except asyncio.CancelledError:
|
|
326
|
+
print("Stream cancelled")
|
|
327
|
+
|
|
328
|
+
Note:
|
|
329
|
+
- This is an async generator and must be used with 'async for'.
|
|
330
|
+
- Requires httpx library for async HTTP streaming.
|
|
331
|
+
- Stream can be cancelled by breaking out of the loop or cancelling the asyncio task.
|
|
332
|
+
- When cancelled, the HTTP connection is closed and backend processing stops.
|
|
333
|
+
"""
|
|
334
|
+
url = f"{self.client.base_url}/v1/contexts/{context.id}/run/stream"
|
|
335
|
+
data = {"messages": [msg.model_dump(mode="json") for msg in messages]}
|
|
336
|
+
|
|
337
|
+
async with httpx.AsyncClient(timeout=60.0) as http_client:
|
|
338
|
+
async with http_client.stream(
|
|
339
|
+
"POST",
|
|
340
|
+
url,
|
|
341
|
+
headers=self.client.headers,
|
|
342
|
+
json=data
|
|
343
|
+
) as response:
|
|
344
|
+
if response.status_code != 200:
|
|
345
|
+
error_text = await response.aread()
|
|
346
|
+
yield {
|
|
347
|
+
"type": "error",
|
|
348
|
+
"error": f"HTTP {response.status_code}: {error_text.decode()}",
|
|
349
|
+
"error_type": "HTTPError"
|
|
350
|
+
}
|
|
351
|
+
return
|
|
352
|
+
|
|
353
|
+
async for line in response.aiter_lines():
|
|
354
|
+
if line.startswith("data: "):
|
|
355
|
+
try:
|
|
356
|
+
event = json.loads(line[6:]) # Remove "data: " prefix
|
|
357
|
+
yield event
|
|
358
|
+
except json.JSONDecodeError as e:
|
|
359
|
+
yield {
|
|
360
|
+
"type": "error",
|
|
361
|
+
"error": f"Failed to parse event: {e}",
|
|
362
|
+
"error_type": "JSONDecodeError"
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
def get_history(self, context: ContextResponse, sender_name: Optional[str] = None) -> List[MessageResponse]:
|
|
366
|
+
"""
|
|
367
|
+
Get conversation history for a context.
|
|
368
|
+
|
|
369
|
+
Args:
|
|
370
|
+
context: The context to get history from
|
|
371
|
+
sender_name: Optional filter by sender name
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
List of MessageResponse objects (excludes embeddings for performance)
|
|
375
|
+
|
|
376
|
+
Example:
|
|
377
|
+
history = client.contexts.get_history(context)
|
|
378
|
+
for msg in history:
|
|
379
|
+
print(f"[{msg.timestamp}] {msg.sender_name}: {msg.content}")
|
|
380
|
+
"""
|
|
381
|
+
try:
|
|
382
|
+
params = {}
|
|
383
|
+
if sender_name:
|
|
384
|
+
params["sender_name"] = sender_name
|
|
385
|
+
|
|
386
|
+
url = f"/contexts/{context.id}/history"
|
|
387
|
+
if params:
|
|
388
|
+
url += "?" + "&".join([f"{k}={v}" for k, v in params.items()])
|
|
389
|
+
|
|
390
|
+
response = self.client._make_request("GET", url)
|
|
391
|
+
return [MessageResponse(**msg_data) for msg_data in response["messages"]]
|
|
392
|
+
except Exception:
|
|
393
|
+
return []
|
|
394
|
+
|
|
395
|
+
def route(self, context_id: str, messages: List[MessageRequest]) -> Optional[RouteResponse]:
|
|
396
|
+
"""
|
|
397
|
+
Route messages in a context to determine the next speaker.
|
|
398
|
+
|
|
399
|
+
This method analyzes the conversation history and uses the orchestrator
|
|
400
|
+
to determine which agent (or user) should speak next based on the context
|
|
401
|
+
configuration and conversation flow.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
context_id: The ID of the context to route messages in
|
|
405
|
+
messages: List of MessageRequest objects representing the conversation history
|
|
406
|
+
|
|
407
|
+
Returns:
|
|
408
|
+
RouteResponse containing the name of the next speaker, or None if error
|
|
409
|
+
|
|
410
|
+
Example:
|
|
411
|
+
messages = [
|
|
412
|
+
MessageRequest(
|
|
413
|
+
role="user",
|
|
414
|
+
sender_name="Alice",
|
|
415
|
+
content="I need help with billing"
|
|
416
|
+
)
|
|
417
|
+
]
|
|
418
|
+
result = client.contexts.route("context-uuid", messages)
|
|
419
|
+
if result:
|
|
420
|
+
print(f"Next speaker: {result.next_speaker}")
|
|
421
|
+
# Output might be: "Next speaker: Billing Agent"
|
|
422
|
+
"""
|
|
423
|
+
try:
|
|
424
|
+
data = [msg.model_dump(mode="json") for msg in messages]
|
|
425
|
+
response = self.client._make_request("POST", f"/contexts/{context_id}/route", data)
|
|
426
|
+
return RouteResponse(**response)
|
|
427
|
+
except Exception:
|
|
428
|
+
return None
|
|
429
|
+
|
|
430
|
+
class CronsClient:
|
|
431
|
+
"""Client for cron job operations"""
|
|
432
|
+
|
|
433
|
+
def __init__(self, client: 'Dialetica'):
|
|
434
|
+
self.client = client
|
|
435
|
+
|
|
436
|
+
def create(self, cron: CronRequest) -> CronResponse:
|
|
437
|
+
"""Create a new cron job"""
|
|
438
|
+
response = self.client._make_request("POST", "/crons", cron.model_dump(mode="json"))
|
|
439
|
+
return CronResponse(**response)
|
|
440
|
+
|
|
441
|
+
def get(self, cron_id: str) -> Optional[CronResponse]:
|
|
442
|
+
"""Get a cron job by ID"""
|
|
443
|
+
try:
|
|
444
|
+
response = self.client._make_request("GET", f"/crons/{cron_id}")
|
|
445
|
+
return CronResponse(**response)
|
|
446
|
+
except Exception:
|
|
447
|
+
return None
|
|
448
|
+
|
|
449
|
+
def list(self) -> List[CronResponse]:
|
|
450
|
+
"""List all cron jobs for the authenticated user"""
|
|
451
|
+
response = self.client._make_request("GET", "/crons")
|
|
452
|
+
return [CronResponse(**cron_data) for cron_data in response]
|
|
453
|
+
|
|
454
|
+
def update(self, cron_id: str, cron: CronRequest) -> Optional[CronResponse]:
|
|
455
|
+
"""Update a cron job"""
|
|
456
|
+
try:
|
|
457
|
+
response = self.client._make_request("PUT", f"/crons/{cron_id}", cron.model_dump(mode="json"))
|
|
458
|
+
return CronResponse(**response)
|
|
459
|
+
except Exception:
|
|
460
|
+
return None
|
|
461
|
+
|
|
462
|
+
def delete(self, cron_id: str) -> bool:
|
|
463
|
+
"""Delete a cron job"""
|
|
464
|
+
try:
|
|
465
|
+
self.client._make_request("DELETE", f"/crons/{cron_id}")
|
|
466
|
+
return True
|
|
467
|
+
except Exception:
|
|
468
|
+
return False
|
|
469
|
+
|
|
470
|
+
class KnowledgeClient:
|
|
471
|
+
"""Client for knowledge operations (unified recipes/memories/facts)"""
|
|
472
|
+
|
|
473
|
+
def __init__(self, client: 'Dialetica'):
|
|
474
|
+
self.client = client
|
|
475
|
+
|
|
476
|
+
def create(self, knowledge: KnowledgeRequest) -> KnowledgeResponse:
|
|
477
|
+
"""
|
|
478
|
+
Create a single knowledge entry.
|
|
479
|
+
|
|
480
|
+
Args:
|
|
481
|
+
knowledge: KnowledgeRequest object (minimal fields)
|
|
482
|
+
|
|
483
|
+
Returns:
|
|
484
|
+
KnowledgeResponse (excludes embedding)
|
|
485
|
+
|
|
486
|
+
Example:
|
|
487
|
+
knowledge = KnowledgeRequest(
|
|
488
|
+
knowledge="Always validate user input",
|
|
489
|
+
metadata={"category": "security"}
|
|
490
|
+
)
|
|
491
|
+
created = client.knowledge.create(knowledge)
|
|
492
|
+
print(f"Created knowledge: {created.id}")
|
|
493
|
+
"""
|
|
494
|
+
response = self.client._make_request("POST", "/knowledge", knowledge.model_dump(mode = "json"))
|
|
495
|
+
return KnowledgeResponse(**response)
|
|
496
|
+
|
|
497
|
+
def bulk_create(self, knowledge_items: List[KnowledgeRequest]) -> List[KnowledgeResponse]:
|
|
498
|
+
"""
|
|
499
|
+
Create multiple knowledge entries at once.
|
|
500
|
+
|
|
501
|
+
Args:
|
|
502
|
+
knowledge_items: List of KnowledgeRequest objects
|
|
503
|
+
|
|
504
|
+
Returns:
|
|
505
|
+
List of KnowledgeResponse objects (excludes embeddings)
|
|
506
|
+
"""
|
|
507
|
+
created_knowledge = []
|
|
508
|
+
for knowledge in knowledge_items:
|
|
509
|
+
response = self.client._make_request("POST", "/knowledge", knowledge.model_dump(mode = "json"))
|
|
510
|
+
created_knowledge.append(KnowledgeResponse(**response))
|
|
511
|
+
return created_knowledge
|
|
512
|
+
|
|
513
|
+
def get(self, knowledge_id: str) -> Optional[KnowledgeResponse]:
|
|
514
|
+
"""
|
|
515
|
+
Get knowledge by ID.
|
|
516
|
+
|
|
517
|
+
Returns:
|
|
518
|
+
KnowledgeResponse (excludes embedding)
|
|
519
|
+
"""
|
|
520
|
+
try:
|
|
521
|
+
response = self.client._make_request("GET", f"/knowledge/{knowledge_id}")
|
|
522
|
+
return KnowledgeResponse(**response)
|
|
523
|
+
except Exception:
|
|
524
|
+
return None
|
|
525
|
+
|
|
526
|
+
def list(self) -> List[KnowledgeResponse]:
|
|
527
|
+
"""
|
|
528
|
+
List all knowledge.
|
|
529
|
+
|
|
530
|
+
Returns:
|
|
531
|
+
List of KnowledgeResponse objects (excludes embeddings)
|
|
532
|
+
"""
|
|
533
|
+
response = self.client._make_request("GET", "/knowledge")
|
|
534
|
+
return [KnowledgeResponse(**knowledge_data) for knowledge_data in response]
|
|
535
|
+
|
|
536
|
+
def update(self, knowledge_id: str, knowledge: KnowledgeRequest) -> Optional[KnowledgeResponse]:
|
|
537
|
+
"""
|
|
538
|
+
Update knowledge.
|
|
539
|
+
|
|
540
|
+
Args:
|
|
541
|
+
knowledge_id: ID of knowledge to update
|
|
542
|
+
knowledge: KnowledgeRequest object with updated data
|
|
543
|
+
|
|
544
|
+
Returns:
|
|
545
|
+
KnowledgeResponse (excludes embedding)
|
|
546
|
+
"""
|
|
547
|
+
try:
|
|
548
|
+
response = self.client._make_request("PUT", f"/knowledge/{knowledge_id}", knowledge.model_dump(mode = "json"))
|
|
549
|
+
return KnowledgeResponse(**response)
|
|
550
|
+
except Exception:
|
|
551
|
+
return None
|
|
552
|
+
|
|
553
|
+
def delete(self, knowledge_id: str) -> bool:
|
|
554
|
+
"""Delete knowledge"""
|
|
555
|
+
try:
|
|
556
|
+
self.client._make_request("DELETE", f"/knowledge/{knowledge_id}")
|
|
557
|
+
return True
|
|
558
|
+
except Exception:
|
|
559
|
+
return False
|
|
560
|
+
|
|
561
|
+
def get_for_context(self, context_id: str, agent_id: Optional[str] = None) -> List[KnowledgeResponse]:
|
|
562
|
+
"""
|
|
563
|
+
Get knowledge for a context.
|
|
564
|
+
|
|
565
|
+
Args:
|
|
566
|
+
context_id: The context ID
|
|
567
|
+
agent_id: Optional agent ID to filter for agent-visible knowledge only
|
|
568
|
+
|
|
569
|
+
Returns:
|
|
570
|
+
List of KnowledgeResponse objects
|
|
571
|
+
|
|
572
|
+
Example:
|
|
573
|
+
# Get all knowledge in a context
|
|
574
|
+
all_knowledge = client.knowledge.get_for_context(context_id)
|
|
575
|
+
|
|
576
|
+
# Get knowledge visible to a specific agent
|
|
577
|
+
agent_knowledge = client.knowledge.get_for_context(context_id, agent_id)
|
|
578
|
+
"""
|
|
579
|
+
try:
|
|
580
|
+
url = f"/contexts/{context_id}/knowledge"
|
|
581
|
+
if agent_id:
|
|
582
|
+
url += f"?agent_id={agent_id}"
|
|
583
|
+
response = self.client._make_request("GET", url)
|
|
584
|
+
return [KnowledgeResponse(**k) for k in response]
|
|
585
|
+
except Exception:
|
|
586
|
+
return []
|
|
587
|
+
|
|
588
|
+
def create_for_context(
|
|
589
|
+
self,
|
|
590
|
+
context_id: str,
|
|
591
|
+
knowledge: str,
|
|
592
|
+
agent_id: Optional[str] = None,
|
|
593
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
594
|
+
) -> KnowledgeResponse:
|
|
595
|
+
"""
|
|
596
|
+
Helper to create knowledge for a context.
|
|
597
|
+
|
|
598
|
+
Args:
|
|
599
|
+
context_id: The context ID
|
|
600
|
+
knowledge: The knowledge content
|
|
601
|
+
agent_id: If provided, creates agent-specific knowledge; if None, creates context-wide knowledge
|
|
602
|
+
metadata: Optional metadata
|
|
603
|
+
|
|
604
|
+
Returns:
|
|
605
|
+
KnowledgeResponse
|
|
606
|
+
|
|
607
|
+
Example:
|
|
608
|
+
# Create context-wide knowledge (all agents see)
|
|
609
|
+
knowledge = client.knowledge.create_for_context(
|
|
610
|
+
context_id=context.id,
|
|
611
|
+
knowledge="All agents should validate input"
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
# Create agent-specific knowledge
|
|
615
|
+
knowledge = client.knowledge.create_for_context(
|
|
616
|
+
context_id=context.id,
|
|
617
|
+
agent_id=agent.id,
|
|
618
|
+
knowledge="This agent remembers user preferences"
|
|
619
|
+
)
|
|
620
|
+
"""
|
|
621
|
+
knowledge_req = KnowledgeRequest(
|
|
622
|
+
knowledge=knowledge,
|
|
623
|
+
context_id=context_id,
|
|
624
|
+
agent_id=agent_id,
|
|
625
|
+
metadata=metadata
|
|
626
|
+
)
|
|
627
|
+
return self.create(knowledge_req)
|
|
628
|
+
|
|
629
|
+
def query(
|
|
630
|
+
self,
|
|
631
|
+
query: str,
|
|
632
|
+
context_id: Optional[str] = None,
|
|
633
|
+
agent_id: Optional[str] = None,
|
|
634
|
+
limit: int = 5
|
|
635
|
+
) -> List[KnowledgeResponse]:
|
|
636
|
+
"""
|
|
637
|
+
Query knowledge in a context using semantic search.
|
|
638
|
+
|
|
639
|
+
Args:
|
|
640
|
+
context_id: The context to search in
|
|
641
|
+
query: Search query string
|
|
642
|
+
agent_id: If provided, searches knowledge visible to that agent
|
|
643
|
+
limit: Maximum number of results (default 5)
|
|
644
|
+
|
|
645
|
+
Returns:
|
|
646
|
+
List of KnowledgeResponse objects ranked by semantic similarity
|
|
647
|
+
|
|
648
|
+
Example:
|
|
649
|
+
# Search all context-wide knowledge
|
|
650
|
+
results = client.knowledge.query(
|
|
651
|
+
context_id=context.id,
|
|
652
|
+
query="email validation rules"
|
|
653
|
+
)
|
|
654
|
+
|
|
655
|
+
# Search knowledge visible to a specific agent
|
|
656
|
+
results = client.knowledge.query(
|
|
657
|
+
context_id=context.id,
|
|
658
|
+
query="user preferences",
|
|
659
|
+
agent_id=agent.id
|
|
660
|
+
)
|
|
661
|
+
"""
|
|
662
|
+
try:
|
|
663
|
+
url = f"/contexts/{context_id}/knowledge/query?query={query}&limit={limit}"
|
|
664
|
+
if agent_id:
|
|
665
|
+
url += f"&agent_id={agent_id}"
|
|
666
|
+
response = self.client._make_request("GET", url)
|
|
667
|
+
return [KnowledgeResponse(**k) for k in response]
|
|
668
|
+
except Exception:
|
|
669
|
+
return []
|
|
670
|
+
|
|
671
|
+
class ToolsClient:
|
|
672
|
+
"""Client for tool configuration operations"""
|
|
673
|
+
|
|
674
|
+
def __init__(self, client: 'Dialetica'):
|
|
675
|
+
self.client = client
|
|
676
|
+
|
|
677
|
+
def create(self, tool_config: ToolConfigRequest) -> ToolConfigResponse:
|
|
678
|
+
"""
|
|
679
|
+
Create a new tool configuration (MCP server connection).
|
|
680
|
+
|
|
681
|
+
Args:
|
|
682
|
+
tool_config: ToolConfigRequest with name, endpoint, auth_token, type, etc.
|
|
683
|
+
|
|
684
|
+
Returns:
|
|
685
|
+
ToolConfigResponse with the created tool configuration
|
|
686
|
+
|
|
687
|
+
Example:
|
|
688
|
+
tool = ToolConfigRequest(
|
|
689
|
+
name="Notion MCP",
|
|
690
|
+
description="Access Notion databases",
|
|
691
|
+
endpoint="https://mcp.notion.com/mcp",
|
|
692
|
+
auth_token="ntn_secret_...",
|
|
693
|
+
type="streamable_http"
|
|
694
|
+
)
|
|
695
|
+
created = client.tools.create(tool)
|
|
696
|
+
"""
|
|
697
|
+
response = self.client._make_request("POST", "/tool-configs", tool_config.model_dump(mode="json"))
|
|
698
|
+
return ToolConfigResponse(**response)
|
|
699
|
+
|
|
700
|
+
def list(self) -> List[ToolConfigResponse]:
|
|
701
|
+
"""
|
|
702
|
+
List all tool configurations for the authenticated user.
|
|
703
|
+
|
|
704
|
+
Returns:
|
|
705
|
+
List of ToolConfigResponse objects
|
|
706
|
+
|
|
707
|
+
Example:
|
|
708
|
+
tools = client.tools.list()
|
|
709
|
+
for tool in tools:
|
|
710
|
+
print(f"{tool.name}: {tool.endpoint}")
|
|
711
|
+
"""
|
|
712
|
+
response = self.client._make_request("GET", "/tool-configs")
|
|
713
|
+
return [ToolConfigResponse(**config) for config in response]
|
|
714
|
+
|
|
715
|
+
def get(self, tool_config_id: str) -> Optional[ToolConfigResponse]:
|
|
716
|
+
"""
|
|
717
|
+
Get a specific tool configuration by ID.
|
|
718
|
+
|
|
719
|
+
Args:
|
|
720
|
+
tool_config_id: The tool configuration ID
|
|
721
|
+
|
|
722
|
+
Returns:
|
|
723
|
+
ToolConfigResponse or None if not found
|
|
724
|
+
|
|
725
|
+
Example:
|
|
726
|
+
tool = client.tools.get("tool-config-uuid")
|
|
727
|
+
if tool:
|
|
728
|
+
print(f"Tool: {tool.name}")
|
|
729
|
+
"""
|
|
730
|
+
try:
|
|
731
|
+
response = self.client._make_request("GET", f"/tool-configs/{tool_config_id}")
|
|
732
|
+
return ToolConfigResponse(**response)
|
|
733
|
+
except Exception:
|
|
734
|
+
return None
|
|
735
|
+
|
|
736
|
+
def update(self, tool_config_id: str, tool_config: ToolConfigRequest) -> Optional[ToolConfigResponse]:
|
|
737
|
+
"""
|
|
738
|
+
Update an existing tool configuration.
|
|
739
|
+
|
|
740
|
+
Args:
|
|
741
|
+
tool_config_id: The tool configuration ID to update
|
|
742
|
+
tool_config: Updated ToolConfigRequest
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
Updated ToolConfigResponse or None if not found
|
|
746
|
+
|
|
747
|
+
Example:
|
|
748
|
+
updated = ToolConfigRequest(
|
|
749
|
+
name="Notion MCP Updated",
|
|
750
|
+
description="Updated description",
|
|
751
|
+
endpoint="https://mcp.notion.com/mcp",
|
|
752
|
+
auth_token="new_token",
|
|
753
|
+
type="streamable_http"
|
|
754
|
+
)
|
|
755
|
+
result = client.tools.update(tool_id, updated)
|
|
756
|
+
"""
|
|
757
|
+
try:
|
|
758
|
+
response = self.client._make_request("PUT", f"/tool-configs/{tool_config_id}", tool_config.model_dump(mode="json"))
|
|
759
|
+
return ToolConfigResponse(**response)
|
|
760
|
+
except Exception:
|
|
761
|
+
return None
|
|
762
|
+
|
|
763
|
+
def delete(self, tool_config_id: str) -> bool:
|
|
764
|
+
"""
|
|
765
|
+
Delete a tool configuration.
|
|
766
|
+
|
|
767
|
+
Args:
|
|
768
|
+
tool_config_id: The tool configuration ID to delete
|
|
769
|
+
|
|
770
|
+
Returns:
|
|
771
|
+
True if successful, False otherwise
|
|
772
|
+
|
|
773
|
+
Example:
|
|
774
|
+
success = client.tools.delete("tool-config-uuid")
|
|
775
|
+
if success:
|
|
776
|
+
print("Tool config deleted")
|
|
777
|
+
"""
|
|
778
|
+
try:
|
|
779
|
+
self.client._make_request("DELETE", f"/tool-configs/{tool_config_id}")
|
|
780
|
+
return True
|
|
781
|
+
except Exception:
|
|
782
|
+
return False
|
|
783
|
+
|
|
784
|
+
# Initialize sub-clients when Dialetica is instantiated
|
|
785
|
+
def _setup_clients(self):
|
|
786
|
+
"""Initialize all sub-clients for the SDK"""
|
|
787
|
+
self.agents = self.AgentsClient(self)
|
|
788
|
+
self.contexts = self.ContextsClient(self)
|
|
789
|
+
self.knowledge = self.KnowledgeClient(self)
|
|
790
|
+
self.crons = self.CronsClient(self)
|
|
791
|
+
self.tools = self.ToolsClient(self)
|
dialetica/models.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Public data models for the Dialetica AI SDK
|
|
3
|
+
|
|
4
|
+
This module contains only the Request and Response models that are part of the public API.
|
|
5
|
+
Internal models are kept private in the foundation package.
|
|
6
|
+
"""
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import List, Optional, Dict, Any, Literal
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# ==================== MESSAGE MODELS ====================
|
|
14
|
+
|
|
15
|
+
class MessageRequest(BaseModel):
|
|
16
|
+
"""
|
|
17
|
+
Message creation request model.
|
|
18
|
+
|
|
19
|
+
This is what SDK users provide when sending messages.
|
|
20
|
+
Only includes the essential fields needed to create a message.
|
|
21
|
+
|
|
22
|
+
Example:
|
|
23
|
+
msg = MessageRequest(
|
|
24
|
+
role="user",
|
|
25
|
+
sender_name="Alice",
|
|
26
|
+
content="Hello!"
|
|
27
|
+
)
|
|
28
|
+
"""
|
|
29
|
+
role: str = Field(..., pattern=r'^(user|assistant)$', description="Message role: 'user' or 'assistant'")
|
|
30
|
+
sender_name: str = Field(..., min_length=1, max_length=100, description="Name of the message sender")
|
|
31
|
+
content: str = Field(..., min_length=1, description="Message content")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class MessageResponse(BaseModel):
|
|
35
|
+
"""
|
|
36
|
+
Message response model - returned to API users.
|
|
37
|
+
|
|
38
|
+
Includes metadata fields but excludes internal implementation details
|
|
39
|
+
like embeddings (which can be 12KB+ per message).
|
|
40
|
+
|
|
41
|
+
This keeps API responses clean and performant.
|
|
42
|
+
"""
|
|
43
|
+
id: str
|
|
44
|
+
context_id: str
|
|
45
|
+
sender_id: str
|
|
46
|
+
timestamp: datetime
|
|
47
|
+
role: str
|
|
48
|
+
sender_name: str
|
|
49
|
+
content: str
|
|
50
|
+
# Note: embedding field is intentionally excluded
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# ==================== KNOWLEDGE MODELS ====================
|
|
54
|
+
|
|
55
|
+
class KnowledgeRequest(BaseModel):
|
|
56
|
+
"""
|
|
57
|
+
Knowledge creation request model.
|
|
58
|
+
|
|
59
|
+
This is what SDK users provide when creating knowledge entries.
|
|
60
|
+
"""
|
|
61
|
+
knowledge: str = Field(..., min_length=1, description="Knowledge content/instruction")
|
|
62
|
+
context_id: Optional[str] = Field(default=None, description="Context ID (None=user-level)")
|
|
63
|
+
agent_id: Optional[str] = Field(default=None, description="Agent ID for agent-specific knowledge")
|
|
64
|
+
metadata: Optional[Dict[str, Any]] = Field(default=None, description="Flexible metadata")
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class KnowledgeResponse(BaseModel):
|
|
68
|
+
"""
|
|
69
|
+
Knowledge response model - returned to API users.
|
|
70
|
+
|
|
71
|
+
Excludes embedding field to keep responses clean and performant.
|
|
72
|
+
"""
|
|
73
|
+
id: str
|
|
74
|
+
creator_id: str
|
|
75
|
+
creator_type: str # 'user' or 'agent'
|
|
76
|
+
user_id: str
|
|
77
|
+
context_id: Optional[str]
|
|
78
|
+
agent_id: Optional[str]
|
|
79
|
+
knowledge: str
|
|
80
|
+
created_at: datetime
|
|
81
|
+
updated_at: datetime
|
|
82
|
+
metadata: Optional[Dict[str, Any]]
|
|
83
|
+
# Note: embedding field is intentionally excluded
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
# ==================== AGENT MODELS ====================
|
|
87
|
+
|
|
88
|
+
class AgentRequest(BaseModel):
|
|
89
|
+
"""
|
|
90
|
+
Agent creation request model.
|
|
91
|
+
|
|
92
|
+
This is what SDK users provide when creating agents.
|
|
93
|
+
Only includes configuration fields - system fields are set by the server.
|
|
94
|
+
|
|
95
|
+
Example:
|
|
96
|
+
agent = AgentRequest(
|
|
97
|
+
name="Customer Support Agent",
|
|
98
|
+
description="Handles customer inquiries",
|
|
99
|
+
instructions=["Be professional", "Be helpful"],
|
|
100
|
+
model="gpt-4o"
|
|
101
|
+
)
|
|
102
|
+
"""
|
|
103
|
+
# Core config
|
|
104
|
+
name: str = Field(..., min_length=1, max_length=100, description="Agent name")
|
|
105
|
+
description: str = Field(default="", description="Agent description")
|
|
106
|
+
instructions: List[str] = Field(default_factory=list, description="System instructions for the agent")
|
|
107
|
+
|
|
108
|
+
# Model config
|
|
109
|
+
model: str = Field(default="openai/gpt-4o-mini", description="LLM model to use (e.g., 'openai/gpt-4o-mini', 'anthropic/claude-3-haiku-20240307', 'gemini/gemini-2.5-flash', or 'auto' for automatic selection)")
|
|
110
|
+
temperature: float = Field(default=0.7, ge=0.0, le=2.0, description="Model temperature")
|
|
111
|
+
max_tokens: int = Field(default=1000, gt=0, le=100000, description="Max tokens per response")
|
|
112
|
+
|
|
113
|
+
# Capabilities
|
|
114
|
+
tools: List[str] = Field(default_factory=list, description="List of ToolConfig UUIDs that this agent can use")
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class AgentResponse(BaseModel):
|
|
118
|
+
"""
|
|
119
|
+
Agent response model - returned to API users.
|
|
120
|
+
|
|
121
|
+
Includes all fields for transparency.
|
|
122
|
+
"""
|
|
123
|
+
id: str
|
|
124
|
+
creator_id: Optional[str] # None for public agents (where creator_id IS NULL)
|
|
125
|
+
name: str
|
|
126
|
+
description: str
|
|
127
|
+
instructions: List[str]
|
|
128
|
+
model: str
|
|
129
|
+
temperature: float
|
|
130
|
+
max_tokens: int
|
|
131
|
+
tools: List[str] # List of ToolConfig UUIDs
|
|
132
|
+
created_at: datetime
|
|
133
|
+
updated_at: datetime
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
# ==================== CONTEXT MODELS ====================
|
|
137
|
+
|
|
138
|
+
class ContextRequest(BaseModel):
|
|
139
|
+
"""
|
|
140
|
+
Context creation request model.
|
|
141
|
+
|
|
142
|
+
This is what SDK users provide when creating contexts.
|
|
143
|
+
|
|
144
|
+
Example:
|
|
145
|
+
context = ContextRequest(
|
|
146
|
+
name="Customer Support Chat",
|
|
147
|
+
description="Multi-agent customer support",
|
|
148
|
+
instructions=["Agent 1 handles technical", "Agent 2 handles billing"],
|
|
149
|
+
agents=[agent1.id, agent2.id],
|
|
150
|
+
is_public=False
|
|
151
|
+
)
|
|
152
|
+
"""
|
|
153
|
+
# Core
|
|
154
|
+
name: str = Field(..., min_length=1, max_length=100, description="Context name")
|
|
155
|
+
description: str = Field(default="", description="Context description")
|
|
156
|
+
instructions: List[str] = Field(default_factory=list, description="System instructions for the context")
|
|
157
|
+
context_window_size: int = Field(default=16000, ge=0, description="Context window size in tokens")
|
|
158
|
+
|
|
159
|
+
# Participants (UUID arrays)
|
|
160
|
+
agents: List[str] = Field(default_factory=list, description="List of agent IDs")
|
|
161
|
+
users: Optional[List[str]] = Field(default_factory=list, description="List of user IDs")
|
|
162
|
+
|
|
163
|
+
# Visibility
|
|
164
|
+
is_public: bool = Field(default=False, description="If true, anyone with the link can access this context")
|
|
165
|
+
|
|
166
|
+
def __init__(self, **data):
|
|
167
|
+
agents = []
|
|
168
|
+
for agent in data.get("agents", []):
|
|
169
|
+
if isinstance(agent, str):
|
|
170
|
+
agents.append(agent)
|
|
171
|
+
elif hasattr(agent, "id"):
|
|
172
|
+
agents.append(agent.id)
|
|
173
|
+
else:
|
|
174
|
+
raise ValueError(f"Invalid agent: {agent}")
|
|
175
|
+
data["agents"] = agents
|
|
176
|
+
users = []
|
|
177
|
+
for user in data.get("users", []):
|
|
178
|
+
if isinstance(user, str):
|
|
179
|
+
users.append(user)
|
|
180
|
+
elif hasattr(user, "id"):
|
|
181
|
+
users.append(user.id)
|
|
182
|
+
else:
|
|
183
|
+
raise ValueError(f"Invalid user: {user}")
|
|
184
|
+
data["users"] = users
|
|
185
|
+
|
|
186
|
+
super().__init__(**data)
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
class ContextResponse(BaseModel):
|
|
190
|
+
"""
|
|
191
|
+
Context response model - returned to API users.
|
|
192
|
+
|
|
193
|
+
Includes all fields for transparency.
|
|
194
|
+
"""
|
|
195
|
+
id: str
|
|
196
|
+
creator_id: Optional[str]
|
|
197
|
+
is_public: bool
|
|
198
|
+
name: str
|
|
199
|
+
description: str
|
|
200
|
+
instructions: List[str]
|
|
201
|
+
context_window_size: int
|
|
202
|
+
agents: List[str]
|
|
203
|
+
users: Optional[List[str]]
|
|
204
|
+
created_at: datetime
|
|
205
|
+
updated_at: datetime
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
class RouteResponse(BaseModel):
|
|
209
|
+
"""
|
|
210
|
+
Response model containing the routing decision.
|
|
211
|
+
|
|
212
|
+
This model is returned when routing messages in a context. It indicates
|
|
213
|
+
which agent (or user) should speak next based on the conversation flow.
|
|
214
|
+
|
|
215
|
+
Attributes:
|
|
216
|
+
next_speaker: The name of the agent or participant who should speak next.
|
|
217
|
+
This can be:
|
|
218
|
+
- An agent name (e.g., "Support Agent", "Billing Agent")
|
|
219
|
+
- "user" if the orchestrator determines a human should respond
|
|
220
|
+
- "none" if the conversation should end
|
|
221
|
+
|
|
222
|
+
Example:
|
|
223
|
+
{
|
|
224
|
+
"next_speaker": "Billing Agent"
|
|
225
|
+
}
|
|
226
|
+
"""
|
|
227
|
+
next_speaker: str = Field(..., description="Name of the next speaker (agent name, 'user', or 'none')")
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
# ==================== TOOL CONFIG MODELS ====================
|
|
231
|
+
|
|
232
|
+
class ToolConfigRequest(BaseModel):
|
|
233
|
+
"""
|
|
234
|
+
Tool configuration creation request model.
|
|
235
|
+
|
|
236
|
+
This is what SDK users provide when creating tool configurations.
|
|
237
|
+
|
|
238
|
+
Example:
|
|
239
|
+
tool_config = ToolConfigRequest(
|
|
240
|
+
name="Notion MCP",
|
|
241
|
+
description="Notion integration via MCP",
|
|
242
|
+
endpoint="https://mcp.notion.com/mcp",
|
|
243
|
+
auth_token="ntn_...",
|
|
244
|
+
type="streamable_http"
|
|
245
|
+
)
|
|
246
|
+
"""
|
|
247
|
+
name: str = Field(..., min_length=1, max_length=100, description="Tool configuration name")
|
|
248
|
+
description: Optional[str] = Field(default=None, description="Tool configuration description")
|
|
249
|
+
endpoint: str = Field(..., min_length=1, max_length=500, description="MCP server endpoint URL")
|
|
250
|
+
auth_token: Optional[str] = Field(default=None, description="Authentication token for the MCP server")
|
|
251
|
+
type: Literal["streamable_http", "sse"] = Field(..., description="MCP server type")
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
class ToolConfigResponse(BaseModel):
|
|
255
|
+
"""
|
|
256
|
+
Tool configuration response model - returned to API users.
|
|
257
|
+
|
|
258
|
+
Excludes auth_token for security (users should only see if it's set, not the value).
|
|
259
|
+
"""
|
|
260
|
+
id: str
|
|
261
|
+
creator_id: Optional[str] # None for public tool configs (where creator_id IS NULL)
|
|
262
|
+
name: str
|
|
263
|
+
description: Optional[str]
|
|
264
|
+
endpoint: str
|
|
265
|
+
has_auth_token: bool # Indicates if auth_token is set (but not the value)
|
|
266
|
+
type: str
|
|
267
|
+
created_at: datetime
|
|
268
|
+
updated_at: datetime
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
# ==================== USAGE TRACKING MODELS ====================
|
|
272
|
+
|
|
273
|
+
class UsageSummary(BaseModel):
|
|
274
|
+
"""
|
|
275
|
+
Usage summary response model - returned to API users.
|
|
276
|
+
|
|
277
|
+
Aggregated usage data for dashboard display.
|
|
278
|
+
"""
|
|
279
|
+
total_spend: float
|
|
280
|
+
previous_period_spend: float
|
|
281
|
+
total_tokens: int
|
|
282
|
+
total_requests: int
|
|
283
|
+
daily_usage: List[Dict[str, Any]] # List of daily usage records
|
|
284
|
+
capabilities: List[Dict[str, Any]] # Breakdown by capability/model
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
# ==================== CRON MODELS ====================
|
|
288
|
+
|
|
289
|
+
class CronRequest(BaseModel):
|
|
290
|
+
"""
|
|
291
|
+
Cron creation request model.
|
|
292
|
+
|
|
293
|
+
This is what SDK users provide when creating cron jobs.
|
|
294
|
+
|
|
295
|
+
Example:
|
|
296
|
+
cron = CronRequest(
|
|
297
|
+
name="Daily Report",
|
|
298
|
+
prompt="Generate a daily summary",
|
|
299
|
+
context_id="context-uuid",
|
|
300
|
+
cron_expression="0 9 * * *" # Daily at 9 AM
|
|
301
|
+
)
|
|
302
|
+
# Or for one-time execution at a specific time:
|
|
303
|
+
cron = CronRequest(
|
|
304
|
+
name="One-time Task",
|
|
305
|
+
prompt="Run this once",
|
|
306
|
+
context_id="context-uuid",
|
|
307
|
+
scheduled_time="2024-12-25T09:00:00Z" # Specific date/time
|
|
308
|
+
)
|
|
309
|
+
"""
|
|
310
|
+
name: str = Field(..., min_length=1, max_length=100, description="Cron job name")
|
|
311
|
+
prompt: str = Field(..., min_length=1, description="Prompt to execute")
|
|
312
|
+
context_id: str = Field(..., description="Context ID where the prompt will be executed")
|
|
313
|
+
cron_expression: Optional[str] = Field(default=None, description="Standard cron expression (e.g., '0 9 * * *' for daily at 9 AM). If None, runs once at scheduled_time.")
|
|
314
|
+
scheduled_time: Optional[datetime] = Field(default=None, description="Scheduled time for one-time execution (used when cron_expression is None). If None and no cron_expression, runs immediately.")
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class CronResponse(BaseModel):
|
|
318
|
+
"""
|
|
319
|
+
Cron response model - returned to API users.
|
|
320
|
+
|
|
321
|
+
Includes all fields for transparency.
|
|
322
|
+
"""
|
|
323
|
+
id: str
|
|
324
|
+
creator_id: str
|
|
325
|
+
owner_id: str
|
|
326
|
+
name: str
|
|
327
|
+
prompt: str
|
|
328
|
+
context_id: str
|
|
329
|
+
cron_expression: Optional[str]
|
|
330
|
+
scheduled_time: Optional[datetime]
|
|
331
|
+
cron_next_run: datetime
|
|
332
|
+
cron_last_run: Optional[datetime]
|
|
333
|
+
cron_status: str
|
|
334
|
+
created_at: datetime
|
|
335
|
+
updated_at: datetime
|
|
336
|
+
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: dialetica
|
|
3
|
+
Version: 1.0.2
|
|
4
|
+
Summary: Python SDK for Dialetica AI - Multi-agent conversational AI platform
|
|
5
|
+
Author-email: Dialetica AI <support@dialetica-ai.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/your-org/dialetica-ai
|
|
8
|
+
Project-URL: Documentation, https://docs.dialetica-ai.com
|
|
9
|
+
Project-URL: Repository, https://github.com/your-org/dialetica-ai
|
|
10
|
+
Project-URL: Issues, https://github.com/your-org/dialetica-ai/issues
|
|
11
|
+
Keywords: ai,llm,multi-agent,conversational-ai,dialetica
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
23
|
+
Requires-Python: >=3.8
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
Requires-Dist: pydantic>=2.9.0
|
|
26
|
+
Requires-Dist: requests>=2.31.0
|
|
27
|
+
Requires-Dist: httpx>=0.24.0
|
|
28
|
+
Provides-Extra: dev
|
|
29
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
30
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
31
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
32
|
+
|
|
33
|
+
# Dialetica AI Python SDK
|
|
34
|
+
|
|
35
|
+
Official Python SDK for the Dialetica AI platform - a multi-agent conversational AI system.
|
|
36
|
+
|
|
37
|
+
## Installation
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
pip install dialetica
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Quick Start
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
from dialetica import Dialetica, AgentRequest, ContextRequest, MessageRequest
|
|
47
|
+
|
|
48
|
+
# Initialize the client
|
|
49
|
+
client = Dialetica(api_key="dai_your_api_key_here")
|
|
50
|
+
# Or use environment variable: DIALETICA_AI_API_KEY
|
|
51
|
+
|
|
52
|
+
# Create an agent
|
|
53
|
+
agent = client.agents.create(AgentRequest(
|
|
54
|
+
name="Assistant",
|
|
55
|
+
description="Helpful assistant",
|
|
56
|
+
instructions=["Be helpful and concise"],
|
|
57
|
+
model="gpt-4o"
|
|
58
|
+
))
|
|
59
|
+
|
|
60
|
+
# Create a context
|
|
61
|
+
context = client.contexts.create(ContextRequest(
|
|
62
|
+
name="Support Chat",
|
|
63
|
+
agents=[agent.id]
|
|
64
|
+
))
|
|
65
|
+
|
|
66
|
+
# Send a message
|
|
67
|
+
message = MessageRequest(
|
|
68
|
+
role="user",
|
|
69
|
+
sender_name="User",
|
|
70
|
+
content="Hello!"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Get response
|
|
74
|
+
responses = client.contexts.run(context, [message])
|
|
75
|
+
for response in responses:
|
|
76
|
+
print(f"{response.sender_name}: {response.content}")
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## Features
|
|
80
|
+
|
|
81
|
+
- **Multi-Agent Conversations**: Create contexts with multiple AI agents
|
|
82
|
+
- **Knowledge Management**: Store and query knowledge using semantic search
|
|
83
|
+
- **Streaming Support**: Real-time streaming responses with SSE
|
|
84
|
+
- **Type-Safe**: Full Pydantic models for request/response validation
|
|
85
|
+
- **Simple API**: Clean, intuitive interface following industry best practices
|
|
86
|
+
|
|
87
|
+
## Building and Publishing
|
|
88
|
+
|
|
89
|
+
To build the package:
|
|
90
|
+
|
|
91
|
+
```bash
|
|
92
|
+
cd backend
|
|
93
|
+
python -m build
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
To publish to PyPI (test first with TestPyPI):
|
|
97
|
+
|
|
98
|
+
```bash
|
|
99
|
+
# Test on TestPyPI first
|
|
100
|
+
python -m twine upload --repository testpypi dist/*
|
|
101
|
+
|
|
102
|
+
# Then publish to PyPI
|
|
103
|
+
python -m twine upload dist/*
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
## Documentation
|
|
107
|
+
|
|
108
|
+
For full documentation, visit [https://docs.dialetica-ai.com](https://docs.dialetica-ai.com)
|
|
109
|
+
|
|
110
|
+
## License
|
|
111
|
+
|
|
112
|
+
MIT License
|
|
113
|
+
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
dialetica/__init__.py,sha256=O0cdYPsYl-udc5KhG79-TIJWJ-LjZvgiwZF55T1I1yw,1380
|
|
2
|
+
dialetica/client.py,sha256=r6W8KjOR7sxSxxGgwggt5ZppXzLKkBMguheGeyJE3E4,32967
|
|
3
|
+
dialetica/models.py,sha256=Hi5sP4blaErZ7JNzs8QZjqdUjwSXKnwGT7AxdWKBr7s,11806
|
|
4
|
+
dialetica-1.0.2.dist-info/METADATA,sha256=cPgldi_QiRp3KVWE2bRH2L8fGFov6de1fiJpcTZaZ-M,3290
|
|
5
|
+
dialetica-1.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
6
|
+
dialetica-1.0.2.dist-info/top_level.txt,sha256=rsUgpyBZFM6NsJE6uEx2VOsmgZFPPftwL62iNEaSL9E,10
|
|
7
|
+
dialetica-1.0.2.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
dialetica
|