march-agent 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- march_agent/__init__.py +52 -0
- march_agent/agent.py +341 -0
- march_agent/agent_state_client.py +149 -0
- march_agent/app.py +416 -0
- march_agent/artifact.py +58 -0
- march_agent/checkpoint_client.py +169 -0
- march_agent/checkpointer.py +16 -0
- march_agent/cli.py +139 -0
- march_agent/conversation.py +103 -0
- march_agent/conversation_client.py +86 -0
- march_agent/conversation_message.py +48 -0
- march_agent/exceptions.py +36 -0
- march_agent/extensions/__init__.py +1 -0
- march_agent/extensions/langgraph.py +526 -0
- march_agent/extensions/pydantic_ai.py +180 -0
- march_agent/gateway_client.py +506 -0
- march_agent/gateway_pb2.py +73 -0
- march_agent/gateway_pb2_grpc.py +101 -0
- march_agent/heartbeat.py +84 -0
- march_agent/memory.py +73 -0
- march_agent/memory_client.py +155 -0
- march_agent/message.py +80 -0
- march_agent/streamer.py +220 -0
- march_agent-0.1.1.dist-info/METADATA +503 -0
- march_agent-0.1.1.dist-info/RECORD +29 -0
- march_agent-0.1.1.dist-info/WHEEL +5 -0
- march_agent-0.1.1.dist-info/entry_points.txt +2 -0
- march_agent-0.1.1.dist-info/licenses/LICENSE +21 -0
- march_agent-0.1.1.dist-info/top_level.txt +1 -0
march_agent/heartbeat.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Heartbeat manager for keeping agent status active."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import time
|
|
5
|
+
import threading
|
|
6
|
+
from typing import Optional, TYPE_CHECKING
|
|
7
|
+
|
|
8
|
+
from .exceptions import HeartbeatError
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from .gateway_client import GatewayClient
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class HeartbeatManager:
|
|
17
|
+
"""Manages periodic heartbeat signals to the API via the gateway proxy."""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
gateway_client: "GatewayClient",
|
|
22
|
+
agent_name: str,
|
|
23
|
+
interval: int = 60
|
|
24
|
+
):
|
|
25
|
+
self.gateway_client = gateway_client
|
|
26
|
+
self.agent_name = agent_name
|
|
27
|
+
self.interval = interval
|
|
28
|
+
self._running = False
|
|
29
|
+
self._thread: Optional[threading.Thread] = None
|
|
30
|
+
|
|
31
|
+
def start(self):
|
|
32
|
+
"""Start the heartbeat thread."""
|
|
33
|
+
if self._running:
|
|
34
|
+
raise HeartbeatError("Heartbeat already running")
|
|
35
|
+
|
|
36
|
+
self._running = True
|
|
37
|
+
self._thread = threading.Thread(target=self._heartbeat_loop, daemon=True)
|
|
38
|
+
self._thread.start()
|
|
39
|
+
logger.info(f"Heartbeat started for agent '{self.agent_name}'")
|
|
40
|
+
|
|
41
|
+
def stop(self):
|
|
42
|
+
"""Stop the heartbeat thread."""
|
|
43
|
+
if not self._running:
|
|
44
|
+
return
|
|
45
|
+
|
|
46
|
+
self._running = False
|
|
47
|
+
if self._thread:
|
|
48
|
+
self._thread.join(timeout=5.0)
|
|
49
|
+
logger.info("Heartbeat stopped")
|
|
50
|
+
|
|
51
|
+
def _heartbeat_loop(self):
|
|
52
|
+
"""Background loop that sends heartbeat periodically."""
|
|
53
|
+
while self._running:
|
|
54
|
+
try:
|
|
55
|
+
self._send_heartbeat()
|
|
56
|
+
except Exception as e:
|
|
57
|
+
logger.error(f"Heartbeat send failed: {e}", exc_info=True)
|
|
58
|
+
|
|
59
|
+
# Sleep in small intervals to allow quick shutdown
|
|
60
|
+
for _ in range(self.interval):
|
|
61
|
+
if not self._running:
|
|
62
|
+
break
|
|
63
|
+
time.sleep(1)
|
|
64
|
+
|
|
65
|
+
def _send_heartbeat(self):
|
|
66
|
+
"""Send a single heartbeat to the API via gateway proxy."""
|
|
67
|
+
payload = {"name": self.agent_name}
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
response = self.gateway_client.http_post(
|
|
71
|
+
"ai-inventory",
|
|
72
|
+
"/api/v1/health/heartbeat",
|
|
73
|
+
json=payload,
|
|
74
|
+
timeout=5.0
|
|
75
|
+
)
|
|
76
|
+
if response.status_code == 404:
|
|
77
|
+
logger.warning(f"Agent '{self.agent_name}' not found. Re-registration may be needed.")
|
|
78
|
+
elif response.status_code not in (200, 201):
|
|
79
|
+
logger.warning(f"Heartbeat returned status {response.status_code}: {response.text}")
|
|
80
|
+
else:
|
|
81
|
+
logger.debug(f"Heartbeat sent successfully for agent '{self.agent_name}'")
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.error(f"Heartbeat request failed: {e}")
|
|
84
|
+
raise HeartbeatError(f"Failed to send heartbeat: {e}")
|
march_agent/memory.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Memory context for accessing AI memory from message handler."""
|
|
2
|
+
|
|
3
|
+
from typing import List, Optional
|
|
4
|
+
|
|
5
|
+
from .memory_client import MemoryClient, MemorySearchResult, UserSummary
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Memory:
|
|
9
|
+
"""User-facing memory context, attached to Message.
|
|
10
|
+
|
|
11
|
+
Provides easy access to memory functions scoped to the current user/conversation.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def __init__(self, user_id: str, conversation_id: str, client: MemoryClient):
|
|
15
|
+
self.user_id = user_id
|
|
16
|
+
self.conversation_id = conversation_id
|
|
17
|
+
self._client = client
|
|
18
|
+
|
|
19
|
+
async def query_about_user(
|
|
20
|
+
self,
|
|
21
|
+
query: str,
|
|
22
|
+
limit: int = 10,
|
|
23
|
+
threshold: int = 70,
|
|
24
|
+
context_messages: int = 0,
|
|
25
|
+
) -> List[MemorySearchResult]:
|
|
26
|
+
"""Query user's long-term memory with semantic search.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
query: Semantic search query
|
|
30
|
+
limit: Maximum results (1-100)
|
|
31
|
+
threshold: Similarity threshold 0-100
|
|
32
|
+
context_messages: Surrounding messages to include (0-20)
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
List of MemorySearchResult
|
|
36
|
+
"""
|
|
37
|
+
return await self._client.search(
|
|
38
|
+
query=query,
|
|
39
|
+
user_id=self.user_id,
|
|
40
|
+
limit=limit,
|
|
41
|
+
min_similarity=threshold,
|
|
42
|
+
context_messages=context_messages,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
async def query_about_conversation(
|
|
46
|
+
self,
|
|
47
|
+
query: str,
|
|
48
|
+
limit: int = 10,
|
|
49
|
+
threshold: int = 70,
|
|
50
|
+
context_messages: int = 0,
|
|
51
|
+
) -> List[MemorySearchResult]:
|
|
52
|
+
"""Query conversation's long-term memory with semantic search.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
query: Semantic search query
|
|
56
|
+
limit: Maximum results (1-100)
|
|
57
|
+
threshold: Similarity threshold 0-100
|
|
58
|
+
context_messages: Surrounding messages to include (0-20)
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
List of MemorySearchResult
|
|
62
|
+
"""
|
|
63
|
+
return await self._client.search(
|
|
64
|
+
query=query,
|
|
65
|
+
conversation_id=self.conversation_id,
|
|
66
|
+
limit=limit,
|
|
67
|
+
min_similarity=threshold,
|
|
68
|
+
context_messages=context_messages,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
async def get_user_summary(self) -> Optional[UserSummary]:
|
|
72
|
+
"""Get user's summary (aggregated across all conversations)."""
|
|
73
|
+
return await self._client.get_user_summary(self.user_id)
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""HTTP client for AI Memory service."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from typing import Dict, Any, Optional, List
|
|
6
|
+
from urllib.parse import urlencode
|
|
7
|
+
import aiohttp
|
|
8
|
+
|
|
9
|
+
from .exceptions import APIException
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class MemoryMessage:
|
|
16
|
+
"""A message from ai-memory storage.
|
|
17
|
+
|
|
18
|
+
Exactly matches the ai-memory API MessageStored schema.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
id: str
|
|
22
|
+
role: str # "user", "assistant", "system"
|
|
23
|
+
content: str
|
|
24
|
+
tenant_id: Optional[str] = None # Agent scope/namespace
|
|
25
|
+
user_id: Optional[str] = None
|
|
26
|
+
conversation_id: Optional[str] = None
|
|
27
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
28
|
+
timestamp: Optional[str] = None
|
|
29
|
+
sequence_number: Optional[int] = None
|
|
30
|
+
|
|
31
|
+
@classmethod
|
|
32
|
+
def from_dict(cls, data: Dict[str, Any]) -> "MemoryMessage":
|
|
33
|
+
"""Create from ai-memory API response."""
|
|
34
|
+
return cls(
|
|
35
|
+
id=data.get("id", ""),
|
|
36
|
+
role=data.get("role", ""),
|
|
37
|
+
content=data.get("content", ""),
|
|
38
|
+
tenant_id=data.get("tenant_id"),
|
|
39
|
+
user_id=data.get("user_id"),
|
|
40
|
+
conversation_id=data.get("conversation_id"),
|
|
41
|
+
metadata=data.get("metadata"),
|
|
42
|
+
timestamp=data.get("timestamp"),
|
|
43
|
+
sequence_number=data.get("sequence_number"),
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class MemorySearchResult:
|
|
49
|
+
"""A memory search result with similarity score."""
|
|
50
|
+
|
|
51
|
+
message: MemoryMessage
|
|
52
|
+
score: float
|
|
53
|
+
context: List[MemoryMessage] = field(default_factory=list)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class UserSummary:
|
|
58
|
+
"""User conversation summary."""
|
|
59
|
+
|
|
60
|
+
text: str
|
|
61
|
+
last_updated: str
|
|
62
|
+
message_count: int
|
|
63
|
+
version: int
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class MemoryClient:
|
|
67
|
+
"""Low-level async HTTP client for AI Memory service."""
|
|
68
|
+
|
|
69
|
+
def __init__(self, base_url: str):
|
|
70
|
+
self.base_url = base_url.rstrip("/")
|
|
71
|
+
self._session: Optional[aiohttp.ClientSession] = None
|
|
72
|
+
|
|
73
|
+
async def _get_session(self) -> aiohttp.ClientSession:
|
|
74
|
+
if self._session is None or self._session.closed:
|
|
75
|
+
timeout = aiohttp.ClientTimeout(total=30.0)
|
|
76
|
+
self._session = aiohttp.ClientSession(timeout=timeout)
|
|
77
|
+
return self._session
|
|
78
|
+
|
|
79
|
+
async def close(self):
|
|
80
|
+
if self._session and not self._session.closed:
|
|
81
|
+
await self._session.close()
|
|
82
|
+
|
|
83
|
+
async def search(
|
|
84
|
+
self,
|
|
85
|
+
query: str,
|
|
86
|
+
user_id: Optional[str] = None,
|
|
87
|
+
conversation_id: Optional[str] = None,
|
|
88
|
+
tenant_id: Optional[str] = None,
|
|
89
|
+
limit: int = 10,
|
|
90
|
+
min_similarity: int = 70,
|
|
91
|
+
context_messages: int = 0,
|
|
92
|
+
) -> List[MemorySearchResult]:
|
|
93
|
+
"""Search long-term memory with semantic search."""
|
|
94
|
+
params = {
|
|
95
|
+
"q": query,
|
|
96
|
+
"limit": limit,
|
|
97
|
+
"min_similarity": min_similarity,
|
|
98
|
+
"context_messages": context_messages,
|
|
99
|
+
}
|
|
100
|
+
if user_id:
|
|
101
|
+
params["user_id"] = user_id
|
|
102
|
+
if conversation_id:
|
|
103
|
+
params["conversation_id"] = conversation_id
|
|
104
|
+
if tenant_id:
|
|
105
|
+
params["tenant_id"] = tenant_id
|
|
106
|
+
|
|
107
|
+
url = f"{self.base_url}/conversation/search?{urlencode(params)}"
|
|
108
|
+
session = await self._get_session()
|
|
109
|
+
try:
|
|
110
|
+
async with session.get(url) as response:
|
|
111
|
+
if response.status >= 400:
|
|
112
|
+
error_text = await response.text()
|
|
113
|
+
raise APIException(
|
|
114
|
+
f"Memory search failed: {response.status} - {error_text}"
|
|
115
|
+
)
|
|
116
|
+
data = await response.json()
|
|
117
|
+
|
|
118
|
+
results = []
|
|
119
|
+
for item in data.get("results", []):
|
|
120
|
+
msg = MemoryMessage.from_dict(item["message"])
|
|
121
|
+
context = [MemoryMessage.from_dict(c) for c in item.get("context", [])]
|
|
122
|
+
results.append(
|
|
123
|
+
MemorySearchResult(message=msg, score=item["score"], context=context)
|
|
124
|
+
)
|
|
125
|
+
return results
|
|
126
|
+
except aiohttp.ClientError as e:
|
|
127
|
+
raise APIException(f"Memory search failed: {e}")
|
|
128
|
+
|
|
129
|
+
async def get_user_summary(self, user_id: str) -> Optional[UserSummary]:
|
|
130
|
+
"""Get user's conversation summary."""
|
|
131
|
+
url = f"{self.base_url}/conversation/user/{user_id}/summary"
|
|
132
|
+
session = await self._get_session()
|
|
133
|
+
try:
|
|
134
|
+
async with session.get(url) as response:
|
|
135
|
+
if response.status == 404:
|
|
136
|
+
return None
|
|
137
|
+
if response.status >= 400:
|
|
138
|
+
error_text = await response.text()
|
|
139
|
+
raise APIException(
|
|
140
|
+
f"Summary fetch failed: {response.status} - {error_text}"
|
|
141
|
+
)
|
|
142
|
+
data = await response.json()
|
|
143
|
+
|
|
144
|
+
if not data.get("has_summary") or not data.get("summary"):
|
|
145
|
+
return None
|
|
146
|
+
|
|
147
|
+
s = data["summary"]
|
|
148
|
+
return UserSummary(
|
|
149
|
+
text=s["text"],
|
|
150
|
+
last_updated=s["last_updated"],
|
|
151
|
+
message_count=s["message_count"],
|
|
152
|
+
version=s.get("version", 1),
|
|
153
|
+
)
|
|
154
|
+
except aiohttp.ClientError as e:
|
|
155
|
+
raise APIException(f"Summary fetch failed: {e}")
|
march_agent/message.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""Message data structure."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Dict, Any, Optional, TYPE_CHECKING
|
|
6
|
+
|
|
7
|
+
from .conversation import Conversation
|
|
8
|
+
from .conversation_client import ConversationClient
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from .memory import Memory
|
|
12
|
+
from .memory_client import MemoryClient
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class Message:
|
|
17
|
+
"""Represents an incoming message."""
|
|
18
|
+
|
|
19
|
+
content: str
|
|
20
|
+
conversation_id: str
|
|
21
|
+
user_id: str
|
|
22
|
+
headers: Dict[str, str]
|
|
23
|
+
raw_body: Dict[str, Any]
|
|
24
|
+
conversation: Optional[Conversation] = None
|
|
25
|
+
memory: Optional["Memory"] = None
|
|
26
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
27
|
+
schema: Optional[Dict[str, Any]] = None
|
|
28
|
+
|
|
29
|
+
@classmethod
|
|
30
|
+
def from_kafka_message(
|
|
31
|
+
cls,
|
|
32
|
+
body: Dict[str, Any],
|
|
33
|
+
headers: Dict[str, str],
|
|
34
|
+
conversation_client: Optional[ConversationClient] = None,
|
|
35
|
+
memory_client: Optional["MemoryClient"] = None,
|
|
36
|
+
agent_name: Optional[str] = None,
|
|
37
|
+
):
|
|
38
|
+
"""Create Message from Kafka message."""
|
|
39
|
+
from .memory import Memory
|
|
40
|
+
|
|
41
|
+
conversation_id = headers.get("conversationId")
|
|
42
|
+
user_id = headers.get("userId", "anonymous")
|
|
43
|
+
|
|
44
|
+
# Parse metadata from header
|
|
45
|
+
metadata = None
|
|
46
|
+
metadata_header = headers.get("messageMetadata")
|
|
47
|
+
if metadata_header:
|
|
48
|
+
try:
|
|
49
|
+
metadata = json.loads(metadata_header)
|
|
50
|
+
except json.JSONDecodeError:
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
# Parse schema from header
|
|
54
|
+
schema = None
|
|
55
|
+
schema_header = headers.get("messageSchema")
|
|
56
|
+
if schema_header:
|
|
57
|
+
try:
|
|
58
|
+
schema = json.loads(schema_header)
|
|
59
|
+
except json.JSONDecodeError:
|
|
60
|
+
pass
|
|
61
|
+
|
|
62
|
+
conversation = None
|
|
63
|
+
if conversation_id and conversation_client:
|
|
64
|
+
conversation = Conversation(conversation_id, conversation_client, agent_name)
|
|
65
|
+
|
|
66
|
+
memory = None
|
|
67
|
+
if memory_client and user_id and conversation_id:
|
|
68
|
+
memory = Memory(user_id, conversation_id, memory_client)
|
|
69
|
+
|
|
70
|
+
return cls(
|
|
71
|
+
content=body.get("content", ""),
|
|
72
|
+
conversation_id=conversation_id,
|
|
73
|
+
user_id=user_id,
|
|
74
|
+
headers=headers,
|
|
75
|
+
raw_body=body,
|
|
76
|
+
conversation=conversation,
|
|
77
|
+
memory=memory,
|
|
78
|
+
metadata=metadata,
|
|
79
|
+
schema=schema,
|
|
80
|
+
)
|
march_agent/streamer.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
"""Streamer class for streaming responses with context manager support."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
from typing import Optional, Dict, Any, List, TYPE_CHECKING
|
|
6
|
+
|
|
7
|
+
from .artifact import Artifact, ArtifactType
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from .gateway_client import GatewayClient
|
|
11
|
+
from .message import Message
|
|
12
|
+
from .conversation_client import ConversationClient
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class Streamer:
|
|
18
|
+
"""Handles streaming responses back to the conversation via the gateway (async)."""
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
agent_name: str,
|
|
23
|
+
original_message: "Message",
|
|
24
|
+
gateway_client: "GatewayClient",
|
|
25
|
+
conversation_client: Optional["ConversationClient"] = None,
|
|
26
|
+
awaiting: bool = False,
|
|
27
|
+
send_to: str = "user",
|
|
28
|
+
):
|
|
29
|
+
self._agent_name = agent_name
|
|
30
|
+
self._original_message = original_message
|
|
31
|
+
self._gateway_client = gateway_client
|
|
32
|
+
self._conversation_client = conversation_client
|
|
33
|
+
self._awaiting = awaiting # Used by context manager
|
|
34
|
+
self._send_to = send_to
|
|
35
|
+
self._finished = False
|
|
36
|
+
self._response_schema: Optional[Dict[str, Any]] = None
|
|
37
|
+
self._message_metadata: Optional[Dict[str, Any]] = None
|
|
38
|
+
self._artifacts: List[Artifact] = []
|
|
39
|
+
self._artifacts_sent: bool = False
|
|
40
|
+
|
|
41
|
+
def set_response_schema(self, schema: Dict[str, Any]) -> "Streamer":
|
|
42
|
+
"""Set response schema (fluent API)."""
|
|
43
|
+
self._response_schema = schema
|
|
44
|
+
return self
|
|
45
|
+
|
|
46
|
+
def set_message_metadata(self, metadata: Dict[str, Any]) -> "Streamer":
|
|
47
|
+
"""Set message metadata (fluent API)."""
|
|
48
|
+
self._message_metadata = metadata
|
|
49
|
+
return self
|
|
50
|
+
|
|
51
|
+
def add_artifact(
|
|
52
|
+
self,
|
|
53
|
+
url: str,
|
|
54
|
+
type: str,
|
|
55
|
+
title: Optional[str] = None,
|
|
56
|
+
description: Optional[str] = None,
|
|
57
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
58
|
+
) -> "Streamer":
|
|
59
|
+
"""Add an artifact to the message (fluent API).
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
url: URL to the artifact (file or iframe src)
|
|
63
|
+
type: Type of artifact ('document', 'image', 'iframe', 'video', 'audio', 'code', 'link', 'file')
|
|
64
|
+
title: Optional display title
|
|
65
|
+
description: Optional description or alt text
|
|
66
|
+
metadata: Optional metadata dict (size, mimeType, dimensions, etc.)
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
self for method chaining
|
|
70
|
+
|
|
71
|
+
Example:
|
|
72
|
+
streamer.add_artifact(
|
|
73
|
+
url="https://example.com/report.pdf",
|
|
74
|
+
type="document",
|
|
75
|
+
title="Monthly Report",
|
|
76
|
+
metadata={"size": 1024000, "mimeType": "application/pdf"}
|
|
77
|
+
)
|
|
78
|
+
"""
|
|
79
|
+
artifact = Artifact(
|
|
80
|
+
url=url,
|
|
81
|
+
type=ArtifactType(type),
|
|
82
|
+
title=title,
|
|
83
|
+
description=description,
|
|
84
|
+
metadata=metadata,
|
|
85
|
+
position=len(self._artifacts),
|
|
86
|
+
)
|
|
87
|
+
self._artifacts.append(artifact)
|
|
88
|
+
return self
|
|
89
|
+
|
|
90
|
+
def set_artifacts(self, artifacts: List[Dict[str, Any]]) -> "Streamer":
|
|
91
|
+
"""Set all artifacts at once (replaces any existing).
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
artifacts: List of artifact dicts with url, type, title, description, metadata
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
self for method chaining
|
|
98
|
+
"""
|
|
99
|
+
self._artifacts = [
|
|
100
|
+
Artifact(
|
|
101
|
+
url=a["url"],
|
|
102
|
+
type=ArtifactType(a["type"]),
|
|
103
|
+
title=a.get("title"),
|
|
104
|
+
description=a.get("description"),
|
|
105
|
+
metadata=a.get("metadata"),
|
|
106
|
+
position=i,
|
|
107
|
+
)
|
|
108
|
+
for i, a in enumerate(artifacts)
|
|
109
|
+
]
|
|
110
|
+
return self
|
|
111
|
+
|
|
112
|
+
def stream(
|
|
113
|
+
self, content: str, persist: bool = True, event_type: Optional[str] = None
|
|
114
|
+
) -> None:
|
|
115
|
+
"""Stream a content chunk (not done).
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
content: The content to stream
|
|
119
|
+
persist: If True, this content will be saved to DB. If False, only streamed.
|
|
120
|
+
event_type: Optional event type for the chunk (e.g., 'thinking', 'tool_call').
|
|
121
|
+
"""
|
|
122
|
+
if self._finished:
|
|
123
|
+
raise RuntimeError("Streamer already finished")
|
|
124
|
+
self._send(content, done=False, persist=persist, event_type=event_type)
|
|
125
|
+
|
|
126
|
+
def write(self, content: str, persist: bool = True) -> None:
|
|
127
|
+
"""Alias for stream() - write a content chunk.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
content: The content to write
|
|
131
|
+
persist: If True, this content will be saved to DB. If False, only streamed.
|
|
132
|
+
"""
|
|
133
|
+
self.stream(content, persist=persist)
|
|
134
|
+
|
|
135
|
+
async def finish(self, awaiting: Optional[bool] = None) -> None:
|
|
136
|
+
"""Finish streaming with empty done=True chunk.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
awaiting: If True, sets awaiting_route to this agent's name.
|
|
140
|
+
If None, uses the awaiting value from constructor.
|
|
141
|
+
"""
|
|
142
|
+
if self._finished:
|
|
143
|
+
return # Idempotent
|
|
144
|
+
|
|
145
|
+
self._send("", done=True, persist=True)
|
|
146
|
+
self._finished = True
|
|
147
|
+
|
|
148
|
+
# Use constructor's awaiting if not explicitly provided
|
|
149
|
+
should_await = awaiting if awaiting is not None else self._awaiting
|
|
150
|
+
|
|
151
|
+
# Store pending_response_schema if schema was set
|
|
152
|
+
if self._response_schema and self._conversation_client:
|
|
153
|
+
await self._set_pending_response_schema()
|
|
154
|
+
|
|
155
|
+
if should_await and self._conversation_client:
|
|
156
|
+
await self._set_awaiting_route()
|
|
157
|
+
|
|
158
|
+
def _send(
|
|
159
|
+
self,
|
|
160
|
+
content: str,
|
|
161
|
+
done: bool,
|
|
162
|
+
persist: bool = True,
|
|
163
|
+
event_type: Optional[str] = None,
|
|
164
|
+
) -> None:
|
|
165
|
+
"""Send message to router via gateway (sync - uses gRPC)."""
|
|
166
|
+
message_body = {"content": content, "done": done, "persist": persist}
|
|
167
|
+
if event_type:
|
|
168
|
+
message_body["eventType"] = event_type
|
|
169
|
+
headers = {
|
|
170
|
+
"conversationId": self._original_message.conversation_id,
|
|
171
|
+
"userId": self._original_message.user_id,
|
|
172
|
+
"from_": self._agent_name,
|
|
173
|
+
"to_": self._send_to,
|
|
174
|
+
"nextRoute": self._send_to,
|
|
175
|
+
}
|
|
176
|
+
if self._response_schema:
|
|
177
|
+
headers["responseSchema"] = json.dumps(self._response_schema)
|
|
178
|
+
if self._message_metadata:
|
|
179
|
+
headers["messageMetadata"] = json.dumps(self._message_metadata)
|
|
180
|
+
|
|
181
|
+
# Send artifacts with first chunk (only once)
|
|
182
|
+
if self._artifacts and not self._artifacts_sent:
|
|
183
|
+
headers["artifacts"] = json.dumps([a.to_dict() for a in self._artifacts])
|
|
184
|
+
self._artifacts_sent = True
|
|
185
|
+
|
|
186
|
+
# Produce via gateway gRPC (synchronous operation)
|
|
187
|
+
self._gateway_client.produce(
|
|
188
|
+
topic="router.inbox",
|
|
189
|
+
key=self._original_message.conversation_id,
|
|
190
|
+
headers=headers,
|
|
191
|
+
body=message_body,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
async def _set_pending_response_schema(self) -> None:
|
|
195
|
+
"""Store response schema on conversation for form validation."""
|
|
196
|
+
try:
|
|
197
|
+
await self._conversation_client.update_conversation(
|
|
198
|
+
self._original_message.conversation_id,
|
|
199
|
+
{"pending_response_schema": self._response_schema},
|
|
200
|
+
)
|
|
201
|
+
except Exception as e:
|
|
202
|
+
logger.error(f"Failed to set pending_response_schema: {e}")
|
|
203
|
+
|
|
204
|
+
async def _set_awaiting_route(self) -> None:
|
|
205
|
+
"""Set awaiting_route to this agent's name."""
|
|
206
|
+
try:
|
|
207
|
+
await self._conversation_client.update_conversation(
|
|
208
|
+
self._original_message.conversation_id,
|
|
209
|
+
{"awaiting_route": self._agent_name},
|
|
210
|
+
)
|
|
211
|
+
except Exception as e:
|
|
212
|
+
logger.error(f"Failed to set awaiting_route: {e}")
|
|
213
|
+
|
|
214
|
+
async def __aenter__(self) -> "Streamer":
|
|
215
|
+
return self
|
|
216
|
+
|
|
217
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
218
|
+
if not self._finished:
|
|
219
|
+
await self.finish(awaiting=self._awaiting)
|
|
220
|
+
return False # Don't suppress exceptions
|