basion-agent 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. basion_agent/__init__.py +62 -0
  2. basion_agent/agent.py +360 -0
  3. basion_agent/agent_state_client.py +149 -0
  4. basion_agent/app.py +502 -0
  5. basion_agent/artifact.py +58 -0
  6. basion_agent/attachment_client.py +153 -0
  7. basion_agent/checkpoint_client.py +169 -0
  8. basion_agent/checkpointer.py +16 -0
  9. basion_agent/cli.py +139 -0
  10. basion_agent/conversation.py +103 -0
  11. basion_agent/conversation_client.py +86 -0
  12. basion_agent/conversation_message.py +48 -0
  13. basion_agent/exceptions.py +36 -0
  14. basion_agent/extensions/__init__.py +1 -0
  15. basion_agent/extensions/langgraph.py +526 -0
  16. basion_agent/extensions/pydantic_ai.py +180 -0
  17. basion_agent/gateway_client.py +531 -0
  18. basion_agent/gateway_pb2.py +73 -0
  19. basion_agent/gateway_pb2_grpc.py +101 -0
  20. basion_agent/heartbeat.py +84 -0
  21. basion_agent/loki_handler.py +355 -0
  22. basion_agent/memory.py +73 -0
  23. basion_agent/memory_client.py +155 -0
  24. basion_agent/message.py +333 -0
  25. basion_agent/py.typed +0 -0
  26. basion_agent/streamer.py +184 -0
  27. basion_agent/structural/__init__.py +6 -0
  28. basion_agent/structural/artifact.py +94 -0
  29. basion_agent/structural/base.py +71 -0
  30. basion_agent/structural/stepper.py +125 -0
  31. basion_agent/structural/surface.py +90 -0
  32. basion_agent/structural/text_block.py +96 -0
  33. basion_agent/tools/__init__.py +19 -0
  34. basion_agent/tools/container.py +46 -0
  35. basion_agent/tools/knowledge_graph.py +306 -0
  36. basion_agent-0.4.0.dist-info/METADATA +880 -0
  37. basion_agent-0.4.0.dist-info/RECORD +41 -0
  38. basion_agent-0.4.0.dist-info/WHEEL +5 -0
  39. basion_agent-0.4.0.dist-info/entry_points.txt +2 -0
  40. basion_agent-0.4.0.dist-info/licenses/LICENSE +21 -0
  41. basion_agent-0.4.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,73 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # NO CHECKED-IN PROTOBUF GENCODE
4
+ # source: gateway.proto
5
+ # Protobuf Python Version: 6.31.1
6
+ """Generated protocol buffer code."""
7
+ from google.protobuf import descriptor as _descriptor
8
+ from google.protobuf import descriptor_pool as _descriptor_pool
9
+ from google.protobuf import runtime_version as _runtime_version
10
+ from google.protobuf import symbol_database as _symbol_database
11
+ from google.protobuf.internal import builder as _builder
12
+ _runtime_version.ValidateProtobufRuntimeVersion(
13
+ _runtime_version.Domain.PUBLIC,
14
+ 6,
15
+ 31,
16
+ 1,
17
+ '',
18
+ 'gateway.proto'
19
+ )
20
+ # @@protoc_insertion_point(imports)
21
+
22
+ _sym_db = _symbol_database.Default()
23
+
24
+
25
+
26
+
27
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rgateway.proto\x12\x07gateway\"\xf6\x01\n\rClientMessage\x12$\n\x04\x61uth\x18\x01 \x01(\x0b\x32\x14.gateway.AuthRequestH\x00\x12.\n\tsubscribe\x18\x02 \x01(\x0b\x32\x19.gateway.SubscribeRequestH\x00\x12\x32\n\x0bunsubscribe\x18\x03 \x01(\x0b\x32\x1b.gateway.UnsubscribeRequestH\x00\x12*\n\x07produce\x18\x04 \x01(\x0b\x32\x17.gateway.ProduceRequestH\x00\x12$\n\x04ping\x18\x05 \x01(\x0b\x32\x14.gateway.PingRequestH\x00\x42\t\n\x07payload\"\xd4\x02\n\rServerMessage\x12.\n\rauth_response\x18\x01 \x01(\x0b\x32\x15.gateway.AuthResponseH\x00\x12(\n\x07message\x18\x02 \x01(\x0b\x32\x15.gateway.KafkaMessageH\x00\x12*\n\x0bproduce_ack\x18\x03 \x01(\x0b\x32\x13.gateway.ProduceAckH\x00\x12.\n\rsubscribe_ack\x18\x04 \x01(\x0b\x32\x15.gateway.SubscribeAckH\x00\x12\x32\n\x0funsubscribe_ack\x18\x05 \x01(\x0b\x32\x17.gateway.UnsubscribeAckH\x00\x12%\n\x04pong\x18\x06 \x01(\x0b\x32\x15.gateway.PongResponseH\x00\x12\'\n\x05\x65rror\x18\x07 \x01(\x0b\x32\x16.gateway.ErrorResponseH\x00\x42\t\n\x07payload\"3\n\x0b\x41uthRequest\x12\x0f\n\x07\x61pi_key\x18\x01 \x01(\t\x12\x13\n\x0b\x61gent_names\x18\x02 \x03(\t\"@\n\x0c\x41uthResponse\x12\x15\n\rconnection_id\x18\x01 \x01(\t\x12\x19\n\x11subscribed_topics\x18\x02 \x03(\t\"&\n\x10SubscribeRequest\x12\x12\n\nagent_name\x18\x01 \x01(\t\"\x1d\n\x0cSubscribeAck\x12\r\n\x05topic\x18\x01 \x01(\t\"(\n\x12UnsubscribeRequest\x12\x12\n\nagent_name\x18\x01 \x01(\t\"$\n\x0eUnsubscribeAck\x12\x12\n\nagent_name\x18\x01 \x01(\t\"\xb9\x01\n\x0eProduceRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x35\n\x07headers\x18\x03 \x03(\x0b\x32$.gateway.ProduceRequest.HeadersEntry\x12\x0c\n\x04\x62ody\x18\x04 \x01(\x0c\x12\x16\n\x0e\x63orrelation_id\x18\x05 \x01(\t\x1a.\n\x0cHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"V\n\nProduceAck\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpartition\x18\x02 \x01(\x05\x12\x0e\n\x06offset\x18\x03 \x01(\x03\x12\x16\n\x0e\x63orrelation_id\x18\x04 \x01(\t\"\xd3\x01\n\x0cKafkaMessage\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpartition\x18\x02 \x01(\x05\x12\x0e\n\x06offset\x18\x03 \x01(\x03\x12\x0b\n\x03key\x18\x04 \x01(\t\x12\x33\n\x07headers\x18\x05 \x03(\x0b\x32\".gateway.KafkaMessage.HeadersEntry\x12\x0c\n\x04\x62ody\x18\x06 \x01(\x0c\x12\x11\n\ttimestamp\x18\x07 \x01(\x03\x1a.\n\x0cHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\" \n\x0bPingRequest\x12\x11\n\ttimestamp\x18\x01 \x01(\x03\"B\n\x0cPongResponse\x12\x18\n\x10\x63lient_timestamp\x18\x01 \x01(\x03\x12\x18\n\x10server_timestamp\x18\x02 \x01(\x03\"F\n\rErrorResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x16\n\x0e\x63orrelation_id\x18\x03 \x01(\t2Q\n\x0c\x41gentGateway\x12\x41\n\x0b\x41gentStream\x12\x16.gateway.ClientMessage\x1a\x16.gateway.ServerMessage(\x01\x30\x01\x42 Z\x1e\x61gent-gateway/internal/grpc/pbb\x06proto3')
28
+
29
+ _globals = globals()
30
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
31
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'gateway_pb2', _globals)
32
+ if not _descriptor._USE_C_DESCRIPTORS:
33
+ _globals['DESCRIPTOR']._loaded_options = None
34
+ _globals['DESCRIPTOR']._serialized_options = b'Z\036agent-gateway/internal/grpc/pb'
35
+ _globals['_PRODUCEREQUEST_HEADERSENTRY']._loaded_options = None
36
+ _globals['_PRODUCEREQUEST_HEADERSENTRY']._serialized_options = b'8\001'
37
+ _globals['_KAFKAMESSAGE_HEADERSENTRY']._loaded_options = None
38
+ _globals['_KAFKAMESSAGE_HEADERSENTRY']._serialized_options = b'8\001'
39
+ _globals['_CLIENTMESSAGE']._serialized_start=27
40
+ _globals['_CLIENTMESSAGE']._serialized_end=273
41
+ _globals['_SERVERMESSAGE']._serialized_start=276
42
+ _globals['_SERVERMESSAGE']._serialized_end=616
43
+ _globals['_AUTHREQUEST']._serialized_start=618
44
+ _globals['_AUTHREQUEST']._serialized_end=669
45
+ _globals['_AUTHRESPONSE']._serialized_start=671
46
+ _globals['_AUTHRESPONSE']._serialized_end=735
47
+ _globals['_SUBSCRIBEREQUEST']._serialized_start=737
48
+ _globals['_SUBSCRIBEREQUEST']._serialized_end=775
49
+ _globals['_SUBSCRIBEACK']._serialized_start=777
50
+ _globals['_SUBSCRIBEACK']._serialized_end=806
51
+ _globals['_UNSUBSCRIBEREQUEST']._serialized_start=808
52
+ _globals['_UNSUBSCRIBEREQUEST']._serialized_end=848
53
+ _globals['_UNSUBSCRIBEACK']._serialized_start=850
54
+ _globals['_UNSUBSCRIBEACK']._serialized_end=886
55
+ _globals['_PRODUCEREQUEST']._serialized_start=889
56
+ _globals['_PRODUCEREQUEST']._serialized_end=1074
57
+ _globals['_PRODUCEREQUEST_HEADERSENTRY']._serialized_start=1028
58
+ _globals['_PRODUCEREQUEST_HEADERSENTRY']._serialized_end=1074
59
+ _globals['_PRODUCEACK']._serialized_start=1076
60
+ _globals['_PRODUCEACK']._serialized_end=1162
61
+ _globals['_KAFKAMESSAGE']._serialized_start=1165
62
+ _globals['_KAFKAMESSAGE']._serialized_end=1376
63
+ _globals['_KAFKAMESSAGE_HEADERSENTRY']._serialized_start=1028
64
+ _globals['_KAFKAMESSAGE_HEADERSENTRY']._serialized_end=1074
65
+ _globals['_PINGREQUEST']._serialized_start=1378
66
+ _globals['_PINGREQUEST']._serialized_end=1410
67
+ _globals['_PONGRESPONSE']._serialized_start=1412
68
+ _globals['_PONGRESPONSE']._serialized_end=1478
69
+ _globals['_ERRORRESPONSE']._serialized_start=1480
70
+ _globals['_ERRORRESPONSE']._serialized_end=1550
71
+ _globals['_AGENTGATEWAY']._serialized_start=1552
72
+ _globals['_AGENTGATEWAY']._serialized_end=1633
73
+ # @@protoc_insertion_point(module_scope)
@@ -0,0 +1,101 @@
1
+ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2
+ """Client and server classes corresponding to protobuf-defined services."""
3
+ import grpc
4
+ import warnings
5
+
6
+ from . import gateway_pb2 as gateway__pb2
7
+
8
+ GRPC_GENERATED_VERSION = '1.76.0'
9
+ GRPC_VERSION = grpc.__version__
10
+ _version_not_supported = False
11
+
12
+ try:
13
+ from grpc._utilities import first_version_is_lower
14
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
15
+ except ImportError:
16
+ _version_not_supported = True
17
+
18
+ if _version_not_supported:
19
+ raise RuntimeError(
20
+ f'The grpc package installed is at version {GRPC_VERSION},'
21
+ + ' but the generated code in gateway_pb2_grpc.py depends on'
22
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
23
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
24
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
25
+ )
26
+
27
+
28
+ class AgentGatewayStub(object):
29
+ """Bidirectional streaming service for agent communication
30
+ """
31
+
32
+ def __init__(self, channel):
33
+ """Constructor.
34
+
35
+ Args:
36
+ channel: A grpc.Channel.
37
+ """
38
+ self.AgentStream = channel.stream_stream(
39
+ '/gateway.AgentGateway/AgentStream',
40
+ request_serializer=gateway__pb2.ClientMessage.SerializeToString,
41
+ response_deserializer=gateway__pb2.ServerMessage.FromString,
42
+ _registered_method=True)
43
+
44
+
45
+ class AgentGatewayServicer(object):
46
+ """Bidirectional streaming service for agent communication
47
+ """
48
+
49
+ def AgentStream(self, request_iterator, context):
50
+ """Main bidirectional stream for all agent communication
51
+ """
52
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
53
+ context.set_details('Method not implemented!')
54
+ raise NotImplementedError('Method not implemented!')
55
+
56
+
57
+ def add_AgentGatewayServicer_to_server(servicer, server):
58
+ rpc_method_handlers = {
59
+ 'AgentStream': grpc.stream_stream_rpc_method_handler(
60
+ servicer.AgentStream,
61
+ request_deserializer=gateway__pb2.ClientMessage.FromString,
62
+ response_serializer=gateway__pb2.ServerMessage.SerializeToString,
63
+ ),
64
+ }
65
+ generic_handler = grpc.method_handlers_generic_handler(
66
+ 'gateway.AgentGateway', rpc_method_handlers)
67
+ server.add_generic_rpc_handlers((generic_handler,))
68
+ server.add_registered_method_handlers('gateway.AgentGateway', rpc_method_handlers)
69
+
70
+
71
+ # This class is part of an EXPERIMENTAL API.
72
+ class AgentGateway(object):
73
+ """Bidirectional streaming service for agent communication
74
+ """
75
+
76
+ @staticmethod
77
+ def AgentStream(request_iterator,
78
+ target,
79
+ options=(),
80
+ channel_credentials=None,
81
+ call_credentials=None,
82
+ insecure=False,
83
+ compression=None,
84
+ wait_for_ready=None,
85
+ timeout=None,
86
+ metadata=None):
87
+ return grpc.experimental.stream_stream(
88
+ request_iterator,
89
+ target,
90
+ '/gateway.AgentGateway/AgentStream',
91
+ gateway__pb2.ClientMessage.SerializeToString,
92
+ gateway__pb2.ServerMessage.FromString,
93
+ options,
94
+ channel_credentials,
95
+ insecure,
96
+ call_credentials,
97
+ compression,
98
+ wait_for_ready,
99
+ timeout,
100
+ metadata,
101
+ _registered_method=True)
@@ -0,0 +1,84 @@
1
+ """Heartbeat manager for keeping agent status active."""
2
+
3
+ import logging
4
+ import time
5
+ import threading
6
+ from typing import Optional, TYPE_CHECKING
7
+
8
+ from .exceptions import HeartbeatError
9
+
10
+ if TYPE_CHECKING:
11
+ from .gateway_client import GatewayClient
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class HeartbeatManager:
17
+ """Manages periodic heartbeat signals to the API via the gateway proxy."""
18
+
19
+ def __init__(
20
+ self,
21
+ gateway_client: "GatewayClient",
22
+ agent_name: str,
23
+ interval: int = 60
24
+ ):
25
+ self.gateway_client = gateway_client
26
+ self.agent_name = agent_name
27
+ self.interval = interval
28
+ self._running = False
29
+ self._thread: Optional[threading.Thread] = None
30
+
31
+ def start(self):
32
+ """Start the heartbeat thread."""
33
+ if self._running:
34
+ raise HeartbeatError("Heartbeat already running")
35
+
36
+ self._running = True
37
+ self._thread = threading.Thread(target=self._heartbeat_loop, daemon=True)
38
+ self._thread.start()
39
+ logger.info(f"Heartbeat started for agent '{self.agent_name}'")
40
+
41
+ def stop(self):
42
+ """Stop the heartbeat thread."""
43
+ if not self._running:
44
+ return
45
+
46
+ self._running = False
47
+ if self._thread:
48
+ self._thread.join(timeout=5.0)
49
+ logger.info("Heartbeat stopped")
50
+
51
+ def _heartbeat_loop(self):
52
+ """Background loop that sends heartbeat periodically."""
53
+ while self._running:
54
+ try:
55
+ self._send_heartbeat()
56
+ except Exception as e:
57
+ logger.error(f"Heartbeat send failed: {e}", exc_info=True)
58
+
59
+ # Sleep in small intervals to allow quick shutdown
60
+ for _ in range(self.interval):
61
+ if not self._running:
62
+ break
63
+ time.sleep(1)
64
+
65
+ def _send_heartbeat(self):
66
+ """Send a single heartbeat to the API via gateway proxy."""
67
+ payload = {"name": self.agent_name}
68
+
69
+ try:
70
+ response = self.gateway_client.http_post(
71
+ "ai-inventory",
72
+ "/api/v1/health/heartbeat",
73
+ json=payload,
74
+ timeout=5.0
75
+ )
76
+ if response.status_code == 404:
77
+ logger.warning(f"Agent '{self.agent_name}' not found. Re-registration may be needed.")
78
+ elif response.status_code not in (200, 201):
79
+ logger.warning(f"Heartbeat returned status {response.status_code}: {response.text}")
80
+ else:
81
+ logger.debug(f"Heartbeat sent successfully for agent '{self.agent_name}'")
82
+ except Exception as e:
83
+ logger.error(f"Heartbeat request failed: {e}")
84
+ raise HeartbeatError(f"Failed to send heartbeat: {e}")
@@ -0,0 +1,355 @@
1
+ """
2
+ Loki log handler for remote log collection.
3
+
4
+ This module provides a Python logging.Handler that batches logs
5
+ and sends them to a centralized Loki instance via the agent-gateway.
6
+ """
7
+
8
+ import atexit
9
+ import gzip
10
+ import json
11
+ import logging
12
+ import os
13
+ import queue
14
+ import threading
15
+ import time
16
+ from dataclasses import dataclass, field
17
+ from typing import Dict, List, Optional, Any, TYPE_CHECKING
18
+
19
+ import requests
20
+
21
+ if TYPE_CHECKING:
22
+ from .gateway_client import GatewayClient
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ @dataclass
28
+ class LogEntry:
29
+ """A single log entry to be sent to Loki."""
30
+ timestamp: int # Unix timestamp in nanoseconds
31
+ level: str
32
+ message: str
33
+ extra: Dict[str, str] = field(default_factory=dict)
34
+
35
+
36
+ class LokiLogHandler(logging.Handler):
37
+ """
38
+ A logging handler that batches and sends logs to Loki via the agent-gateway.
39
+
40
+ Features:
41
+ - Batches logs to reduce HTTP overhead
42
+ - Background thread for non-blocking log sending
43
+ - Graceful degradation when Loki is unavailable
44
+ - Bounded buffer to prevent memory exhaustion
45
+
46
+ Example:
47
+ from basion_agent.loki_handler import LokiLogHandler
48
+
49
+ handler = LokiLogHandler(
50
+ gateway_client=gateway_client,
51
+ agent_name="my-agent",
52
+ batch_size=100,
53
+ flush_interval=5.0,
54
+ )
55
+
56
+ # Add to root logger or specific logger
57
+ logging.getLogger().addHandler(handler)
58
+ """
59
+
60
+ def __init__(
61
+ self,
62
+ gateway_client: "GatewayClient",
63
+ agent_name: str,
64
+ batch_size: int = 100,
65
+ flush_interval: float = 5.0,
66
+ max_buffer_size: int = 10000,
67
+ retry_max_delay: float = 30.0,
68
+ level: int = logging.DEBUG,
69
+ enable_compression: bool = True,
70
+ ):
71
+ """
72
+ Initialize the Loki log handler.
73
+
74
+ Args:
75
+ gateway_client: The GatewayClient for HTTP communication
76
+ agent_name: Name of the agent (used as a label in Loki)
77
+ batch_size: Number of logs to batch before sending (default: 100)
78
+ flush_interval: Maximum seconds between flushes (default: 5.0)
79
+ max_buffer_size: Maximum logs to buffer (default: 10000)
80
+ retry_max_delay: Maximum retry delay in seconds (default: 30.0)
81
+ level: Minimum log level to handle (default: DEBUG)
82
+ enable_compression: Use gzip compression for log batches (default: True)
83
+ """
84
+ super().__init__(level=level)
85
+
86
+ self.gateway_client = gateway_client
87
+ self.agent_name = agent_name
88
+ self.batch_size = batch_size
89
+ self.flush_interval = flush_interval
90
+ self.max_buffer_size = max_buffer_size
91
+ self.retry_max_delay = retry_max_delay
92
+ self.enable_compression = enable_compression
93
+
94
+ # Environment-based log level override for production
95
+ if os.getenv("ENVIRONMENT") == "production":
96
+ production_level = max(level, logging.INFO)
97
+ self.setLevel(production_level)
98
+ if production_level != level:
99
+ logger.info(f"Production mode: Elevated log level from {level} to {production_level}")
100
+
101
+ # Thread-safe queue for log entries
102
+ self._queue: queue.Queue[LogEntry] = queue.Queue(maxsize=max_buffer_size)
103
+
104
+ # Background flush thread
105
+ self._running = False
106
+ self._thread: Optional[threading.Thread] = None
107
+ self._lock = threading.Lock()
108
+
109
+ # Retry state
110
+ self._retry_delay = 1.0
111
+ self._last_error_time: Optional[float] = None
112
+
113
+ # Start background thread
114
+ self._start_flush_thread()
115
+
116
+ # Register cleanup on exit
117
+ atexit.register(self.close)
118
+
119
+ def _start_flush_thread(self):
120
+ """Start the background flush thread."""
121
+ with self._lock:
122
+ if self._running:
123
+ return
124
+
125
+ self._running = True
126
+ self._thread = threading.Thread(
127
+ target=self._flush_loop,
128
+ daemon=True,
129
+ name=f"LokiLogHandler-{self.agent_name}"
130
+ )
131
+ self._thread.start()
132
+
133
+ def _flush_loop(self):
134
+ """Background loop that periodically flushes logs."""
135
+ last_flush = time.time()
136
+
137
+ while self._running:
138
+ try:
139
+ # Calculate time until next flush
140
+ elapsed = time.time() - last_flush
141
+ wait_time = max(0, self.flush_interval - elapsed)
142
+
143
+ # Try to collect logs up to batch_size or until timeout
144
+ entries: List[LogEntry] = []
145
+ deadline = time.time() + wait_time
146
+
147
+ while len(entries) < self.batch_size:
148
+ remaining = max(0, deadline - time.time())
149
+ if remaining <= 0 and entries:
150
+ break
151
+
152
+ try:
153
+ entry = self._queue.get(timeout=min(remaining, 0.5) if remaining > 0 else 0.1)
154
+ entries.append(entry)
155
+ self._queue.task_done()
156
+ except queue.Empty:
157
+ if time.time() >= deadline:
158
+ break
159
+
160
+ # Flush if we have entries
161
+ if entries:
162
+ self._flush_entries(entries)
163
+ last_flush = time.time()
164
+
165
+ except Exception as e:
166
+ # Log locally but don't crash
167
+ logger.warning(f"Error in Loki flush loop: {e}")
168
+ time.sleep(1.0)
169
+
170
+ def _flush_entries(self, entries: List[LogEntry]):
171
+ """Send a batch of log entries to Loki via the gateway."""
172
+ if not entries:
173
+ return
174
+
175
+ payload = {
176
+ "agent_name": self.agent_name,
177
+ "entries": [
178
+ {
179
+ "timestamp": e.timestamp,
180
+ "level": e.level,
181
+ "message": e.message,
182
+ "extra": e.extra,
183
+ }
184
+ for e in entries
185
+ ]
186
+ }
187
+
188
+ try:
189
+ # Prepare headers
190
+ headers = {"X-API-Key": self.gateway_client.api_key}
191
+
192
+ # Prepare body (with optional compression)
193
+ json_payload = json.dumps(payload).encode('utf-8')
194
+ body = json_payload
195
+
196
+ if self.enable_compression:
197
+ body = gzip.compress(json_payload)
198
+ headers["Content-Encoding"] = "gzip"
199
+ headers["Content-Type"] = "application/json"
200
+ else:
201
+ headers["Content-Type"] = "application/json"
202
+
203
+ # Use gateway client's HTTP URL
204
+ response = requests.post(
205
+ f"{self._get_http_url()}/api/v1/logs",
206
+ data=body,
207
+ headers=headers,
208
+ timeout=10.0,
209
+ )
210
+
211
+ if response.status_code in (200, 204):
212
+ # Success - reset retry delay
213
+ self._retry_delay = 1.0
214
+ self._last_error_time = None
215
+ else:
216
+ raise Exception(f"Gateway returned status {response.status_code}")
217
+
218
+ except Exception as e:
219
+ # Log warning locally (but avoid recursion by using direct stderr)
220
+ current_time = time.time()
221
+ if self._last_error_time is None or current_time - self._last_error_time > 60:
222
+ import sys
223
+ print(
224
+ f"[LokiLogHandler] Failed to send {len(entries)} logs: {e}. "
225
+ f"Retry delay: {self._retry_delay}s",
226
+ file=sys.stderr
227
+ )
228
+ self._last_error_time = current_time
229
+
230
+ # Exponential backoff
231
+ time.sleep(self._retry_delay)
232
+ self._retry_delay = min(self._retry_delay * 2, self.retry_max_delay)
233
+
234
+ # Re-queue entries that failed (if buffer has space)
235
+ for entry in entries:
236
+ try:
237
+ self._queue.put_nowait(entry)
238
+ except queue.Full:
239
+ break # Buffer full, drop oldest logs
240
+
241
+ def _get_http_url(self) -> str:
242
+ """Get HTTP URL from gateway client (handles protocol conversion)."""
243
+ if hasattr(self.gateway_client, 'http_url'):
244
+ return self.gateway_client.http_url
245
+
246
+ # Fallback: convert gRPC URL to HTTP URL
247
+ gateway_url = self.gateway_client.gateway_url
248
+ return gateway_url.replace("grpc://", "http://").replace("grpcs://", "https://")
249
+
250
+ def is_healthy(self) -> bool:
251
+ """
252
+ Check if handler is functioning (not backlogged).
253
+
254
+ Returns:
255
+ True if handler is running and buffer is not >80% full
256
+ """
257
+ return (
258
+ self._running and
259
+ self._queue.qsize() < self.max_buffer_size * 0.8
260
+ )
261
+
262
+ def emit(self, record: logging.LogRecord):
263
+ """
264
+ Emit a log record.
265
+
266
+ This method is called by the logging framework for each log message.
267
+ It converts the record to a LogEntry and queues it for sending.
268
+ """
269
+ try:
270
+ # Convert LogRecord to LogEntry
271
+ entry = self._record_to_entry(record)
272
+
273
+ # Try to add to queue (non-blocking)
274
+ try:
275
+ self._queue.put_nowait(entry)
276
+ except queue.Full:
277
+ # Buffer full - drop this log (graceful degradation)
278
+ pass
279
+
280
+ except Exception:
281
+ # Never crash due to logging
282
+ self.handleError(record)
283
+
284
+ def _record_to_entry(self, record: logging.LogRecord) -> LogEntry:
285
+ """Convert a logging.LogRecord to a LogEntry."""
286
+ # Get timestamp in nanoseconds
287
+ timestamp = int(record.created * 1_000_000_000)
288
+
289
+ # Normalize level name
290
+ level = record.levelname.lower()
291
+ if level == "warning":
292
+ level = "warn"
293
+
294
+ # Format message
295
+ message = self.format(record) if self.formatter else record.getMessage()
296
+
297
+ # Extract extra fields
298
+ extra: Dict[str, str] = {}
299
+
300
+ # Check for conversation_id in record
301
+ if hasattr(record, "conversation_id"):
302
+ extra["conversation_id"] = str(record.conversation_id)
303
+
304
+ # Check for user_id in record
305
+ if hasattr(record, "user_id"):
306
+ extra["user_id"] = str(record.user_id)
307
+
308
+ # Include logger name
309
+ extra["logger"] = record.name
310
+
311
+ # Include module/function info for debugging
312
+ if record.funcName and record.funcName != "<module>":
313
+ extra["function"] = record.funcName
314
+
315
+ # Include exception info if present
316
+ if record.exc_info and record.exc_info[0] is not None:
317
+ extra["exception_type"] = record.exc_info[0].__name__
318
+
319
+ return LogEntry(
320
+ timestamp=timestamp,
321
+ level=level,
322
+ message=message,
323
+ extra=extra,
324
+ )
325
+
326
+ def flush(self):
327
+ """Flush all buffered logs immediately."""
328
+ entries: List[LogEntry] = []
329
+ while True:
330
+ try:
331
+ entry = self._queue.get_nowait()
332
+ entries.append(entry)
333
+ self._queue.task_done()
334
+ except queue.Empty:
335
+ break
336
+
337
+ if entries:
338
+ self._flush_entries(entries)
339
+
340
+ def close(self):
341
+ """Close the handler and flush remaining logs."""
342
+ with self._lock:
343
+ if not self._running:
344
+ return
345
+
346
+ self._running = False
347
+
348
+ # Wait for flush thread to finish
349
+ if self._thread and self._thread.is_alive():
350
+ self._thread.join(timeout=5.0)
351
+
352
+ # Final flush
353
+ self.flush()
354
+
355
+ super().close()
basion_agent/memory.py ADDED
@@ -0,0 +1,73 @@
1
+ """Memory context for accessing AI memory from message handler."""
2
+
3
+ from typing import List, Optional
4
+
5
+ from .memory_client import MemoryClient, MemorySearchResult, UserSummary
6
+
7
+
8
+ class Memory:
9
+ """User-facing memory context, attached to Message.
10
+
11
+ Provides easy access to memory functions scoped to the current user/conversation.
12
+ """
13
+
14
+ def __init__(self, user_id: str, conversation_id: str, client: MemoryClient):
15
+ self.user_id = user_id
16
+ self.conversation_id = conversation_id
17
+ self._client = client
18
+
19
+ async def query_about_user(
20
+ self,
21
+ query: str,
22
+ limit: int = 10,
23
+ threshold: int = 70,
24
+ context_messages: int = 0,
25
+ ) -> List[MemorySearchResult]:
26
+ """Query user's long-term memory with semantic search.
27
+
28
+ Args:
29
+ query: Semantic search query
30
+ limit: Maximum results (1-100)
31
+ threshold: Similarity threshold 0-100
32
+ context_messages: Surrounding messages to include (0-20)
33
+
34
+ Returns:
35
+ List of MemorySearchResult
36
+ """
37
+ return await self._client.search(
38
+ query=query,
39
+ user_id=self.user_id,
40
+ limit=limit,
41
+ min_similarity=threshold,
42
+ context_messages=context_messages,
43
+ )
44
+
45
+ async def query_about_conversation(
46
+ self,
47
+ query: str,
48
+ limit: int = 10,
49
+ threshold: int = 70,
50
+ context_messages: int = 0,
51
+ ) -> List[MemorySearchResult]:
52
+ """Query conversation's long-term memory with semantic search.
53
+
54
+ Args:
55
+ query: Semantic search query
56
+ limit: Maximum results (1-100)
57
+ threshold: Similarity threshold 0-100
58
+ context_messages: Surrounding messages to include (0-20)
59
+
60
+ Returns:
61
+ List of MemorySearchResult
62
+ """
63
+ return await self._client.search(
64
+ query=query,
65
+ conversation_id=self.conversation_id,
66
+ limit=limit,
67
+ min_similarity=threshold,
68
+ context_messages=context_messages,
69
+ )
70
+
71
+ async def get_user_summary(self) -> Optional[UserSummary]:
72
+ """Get user's summary (aggregated across all conversations)."""
73
+ return await self._client.get_user_summary(self.user_id)