noesium 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. noesium/core/__init__.py +4 -0
  2. noesium/core/agent/__init__.py +14 -0
  3. noesium/core/agent/base.py +227 -0
  4. noesium/core/consts.py +6 -0
  5. noesium/core/goalith/conflict/conflict.py +104 -0
  6. noesium/core/goalith/conflict/detector.py +53 -0
  7. noesium/core/goalith/decomposer/__init__.py +6 -0
  8. noesium/core/goalith/decomposer/base.py +46 -0
  9. noesium/core/goalith/decomposer/callable_decomposer.py +65 -0
  10. noesium/core/goalith/decomposer/llm_decomposer.py +326 -0
  11. noesium/core/goalith/decomposer/prompts.py +140 -0
  12. noesium/core/goalith/decomposer/simple_decomposer.py +61 -0
  13. noesium/core/goalith/errors.py +22 -0
  14. noesium/core/goalith/goalgraph/graph.py +526 -0
  15. noesium/core/goalith/goalgraph/node.py +179 -0
  16. noesium/core/goalith/replanner/base.py +31 -0
  17. noesium/core/goalith/replanner/replanner.py +36 -0
  18. noesium/core/goalith/service.py +26 -0
  19. noesium/core/llm/__init__.py +154 -0
  20. noesium/core/llm/base.py +152 -0
  21. noesium/core/llm/litellm.py +528 -0
  22. noesium/core/llm/llamacpp.py +487 -0
  23. noesium/core/llm/message.py +184 -0
  24. noesium/core/llm/ollama.py +459 -0
  25. noesium/core/llm/openai.py +520 -0
  26. noesium/core/llm/openrouter.py +89 -0
  27. noesium/core/llm/prompt.py +551 -0
  28. noesium/core/memory/__init__.py +11 -0
  29. noesium/core/memory/base.py +464 -0
  30. noesium/core/memory/memu/__init__.py +24 -0
  31. noesium/core/memory/memu/config/__init__.py +26 -0
  32. noesium/core/memory/memu/config/activity/config.py +46 -0
  33. noesium/core/memory/memu/config/event/config.py +46 -0
  34. noesium/core/memory/memu/config/markdown_config.py +241 -0
  35. noesium/core/memory/memu/config/profile/config.py +48 -0
  36. noesium/core/memory/memu/llm_adapter.py +129 -0
  37. noesium/core/memory/memu/memory/__init__.py +31 -0
  38. noesium/core/memory/memu/memory/actions/__init__.py +40 -0
  39. noesium/core/memory/memu/memory/actions/add_activity_memory.py +299 -0
  40. noesium/core/memory/memu/memory/actions/base_action.py +342 -0
  41. noesium/core/memory/memu/memory/actions/cluster_memories.py +262 -0
  42. noesium/core/memory/memu/memory/actions/generate_suggestions.py +198 -0
  43. noesium/core/memory/memu/memory/actions/get_available_categories.py +66 -0
  44. noesium/core/memory/memu/memory/actions/link_related_memories.py +515 -0
  45. noesium/core/memory/memu/memory/actions/run_theory_of_mind.py +254 -0
  46. noesium/core/memory/memu/memory/actions/update_memory_with_suggestions.py +514 -0
  47. noesium/core/memory/memu/memory/embeddings.py +130 -0
  48. noesium/core/memory/memu/memory/file_manager.py +306 -0
  49. noesium/core/memory/memu/memory/memory_agent.py +578 -0
  50. noesium/core/memory/memu/memory/recall_agent.py +376 -0
  51. noesium/core/memory/memu/memory_store.py +628 -0
  52. noesium/core/memory/models.py +149 -0
  53. noesium/core/msgbus/__init__.py +12 -0
  54. noesium/core/msgbus/base.py +395 -0
  55. noesium/core/orchestrix/__init__.py +0 -0
  56. noesium/core/py.typed +0 -0
  57. noesium/core/routing/__init__.py +20 -0
  58. noesium/core/routing/base.py +66 -0
  59. noesium/core/routing/router.py +241 -0
  60. noesium/core/routing/strategies/__init__.py +9 -0
  61. noesium/core/routing/strategies/dynamic_complexity.py +361 -0
  62. noesium/core/routing/strategies/self_assessment.py +147 -0
  63. noesium/core/routing/types.py +38 -0
  64. noesium/core/toolify/__init__.py +39 -0
  65. noesium/core/toolify/base.py +360 -0
  66. noesium/core/toolify/config.py +138 -0
  67. noesium/core/toolify/mcp_integration.py +275 -0
  68. noesium/core/toolify/registry.py +214 -0
  69. noesium/core/toolify/toolkits/__init__.py +1 -0
  70. noesium/core/tracing/__init__.py +37 -0
  71. noesium/core/tracing/langgraph_hooks.py +308 -0
  72. noesium/core/tracing/opik_tracing.py +144 -0
  73. noesium/core/tracing/token_tracker.py +166 -0
  74. noesium/core/utils/__init__.py +10 -0
  75. noesium/core/utils/logging.py +172 -0
  76. noesium/core/utils/statistics.py +12 -0
  77. noesium/core/utils/typing.py +17 -0
  78. noesium/core/vector_store/__init__.py +79 -0
  79. noesium/core/vector_store/base.py +94 -0
  80. noesium/core/vector_store/pgvector.py +304 -0
  81. noesium/core/vector_store/weaviate.py +383 -0
  82. noesium-0.1.0.dist-info/METADATA +525 -0
  83. noesium-0.1.0.dist-info/RECORD +86 -0
  84. noesium-0.1.0.dist-info/WHEEL +5 -0
  85. noesium-0.1.0.dist-info/licenses/LICENSE +21 -0
  86. noesium-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,149 @@
1
+ """
2
+ Pydantic models for memory system data structures.
3
+
4
+ This module defines the core data models used by the simple memory system,
5
+ providing type safety and validation using Pydantic.
6
+ """
7
+
8
+ from datetime import datetime
9
+ from typing import Any, Dict, List, Literal, Optional
10
+ from uuid import uuid4
11
+
12
+ from pydantic import BaseModel, ConfigDict, Field
13
+
14
+
15
+ class BaseMemoryItem(BaseModel):
16
+ """
17
+ Base class for all memory items with common fields and functionality.
18
+
19
+ This class provides the foundation for all memory items, ensuring
20
+ consistent metadata and identification across different memory types.
21
+ """
22
+
23
+ model_config = ConfigDict(from_attributes=True, validate_default=True, arbitrary_types_allowed=True, extra="allow")
24
+
25
+ id: str = Field(default_factory=lambda: str(uuid4()), description="Unique identifier for the memory item")
26
+
27
+ created_at: datetime = Field(default_factory=datetime.utcnow, description="Timestamp when the memory was created")
28
+
29
+ updated_at: Optional[datetime] = Field(default=None, description="Timestamp when the memory was last updated")
30
+
31
+ version: int = Field(default=1, description="Version number for tracking memory updates", ge=1)
32
+
33
+ metadata: Dict[str, Any] = Field(default_factory=dict, description="Additional metadata for the memory item")
34
+
35
+ tags: List[str] = Field(default_factory=list, description="Tags for categorizing and filtering memory items")
36
+
37
+ def add_tag(self, tag: str) -> None:
38
+ """Add a tag to the memory item if it doesn't already exist."""
39
+ if tag not in self.tags:
40
+ self.tags.append(tag)
41
+
42
+ def remove_tag(self, tag: str) -> None:
43
+ """Remove a tag from the memory item."""
44
+ if tag in self.tags:
45
+ self.tags.remove(tag)
46
+
47
+ def update_metadata(self, key: str, value: Any) -> None:
48
+ """Update a metadata field."""
49
+ self.metadata[key] = value
50
+ self.updated_at = datetime.utcnow()
51
+
52
+
53
+ class MemoryItem(BaseMemoryItem):
54
+ """
55
+ Standard memory item for storing general information.
56
+
57
+ This is the primary memory item type for storing conversations,
58
+ facts, or any textual information with context.
59
+ """
60
+
61
+ content: str = Field(description="The main content/text of the memory item")
62
+
63
+ memory_type: Literal["message", "fact", "note"] = Field(
64
+ default="message", description="Type of memory item for categorization"
65
+ )
66
+
67
+ user_id: Optional[str] = Field(default=None, description="ID of the user associated with this memory")
68
+
69
+ agent_id: Optional[str] = Field(default=None, description="ID of the agent associated with this memory")
70
+
71
+ session_id: Optional[str] = Field(default=None, description="ID of the session/conversation this memory belongs to")
72
+
73
+ importance: float = Field(
74
+ default=0.5, description="Importance score of the memory item (0.0 to 1.0)", ge=0.0, le=1.0
75
+ )
76
+
77
+ context: Optional[Dict[str, Any]] = Field(
78
+ default_factory=dict, description="Additional context information for the memory"
79
+ )
80
+
81
+
82
+ class MemoryFilter(BaseModel):
83
+ """
84
+ Filter model for querying memory items.
85
+
86
+ This model provides a structured way to filter memory items
87
+ based on various criteria.
88
+ """
89
+
90
+ user_id: Optional[str] = Field(default=None, description="Filter by user ID")
91
+
92
+ agent_id: Optional[str] = Field(default=None, description="Filter by agent ID")
93
+
94
+ session_id: Optional[str] = Field(default=None, description="Filter by session/conversation ID")
95
+
96
+ memory_type: Optional[str] = Field(default=None, description="Filter by memory type")
97
+
98
+ tags: Optional[List[str]] = Field(default=None, description="Filter by tags (items must have all specified tags)")
99
+
100
+ date_from: Optional[datetime] = Field(default=None, description="Filter items created after this date")
101
+
102
+ date_to: Optional[datetime] = Field(default=None, description="Filter items created before this date")
103
+
104
+ min_importance: Optional[float] = Field(
105
+ default=None, description="Filter items with importance above this threshold", ge=0.0, le=1.0
106
+ )
107
+
108
+ metadata_filters: Dict[str, Any] = Field(default_factory=dict, description="Filter by metadata key-value pairs")
109
+
110
+
111
+ class SearchResult(BaseModel):
112
+ """
113
+ Model for search results with relevance scoring.
114
+
115
+ This model wraps memory items with relevance scores
116
+ for search and retrieval operations.
117
+ """
118
+
119
+ memory_item: MemoryItem = Field(description="The retrieved memory item")
120
+
121
+ relevance_score: float = Field(description="Relevance score for the search query", ge=0.0, le=1.0)
122
+
123
+ distance: Optional[float] = Field(default=None, description="Distance metric from vector search (if applicable)")
124
+
125
+ search_metadata: Dict[str, Any] = Field(
126
+ default_factory=dict, description="Additional metadata about the search result"
127
+ )
128
+
129
+
130
+ class MemoryStats(BaseModel):
131
+ """
132
+ Statistics model for memory system analytics.
133
+
134
+ This model provides insights into memory usage and performance.
135
+ """
136
+
137
+ total_items: int = Field(description="Total number of memory items")
138
+
139
+ items_by_type: Dict[str, int] = Field(default_factory=dict, description="Count of items by memory type")
140
+
141
+ items_by_user: Dict[str, int] = Field(default_factory=dict, description="Count of items by user ID")
142
+
143
+ oldest_item_date: Optional[datetime] = Field(default=None, description="Date of the oldest memory item")
144
+
145
+ newest_item_date: Optional[datetime] = Field(default=None, description="Date of the newest memory item")
146
+
147
+ average_importance: float = Field(default=0.0, description="Average importance score across all items")
148
+
149
+ storage_size_bytes: Optional[int] = Field(default=None, description="Approximate storage size in bytes")
@@ -0,0 +1,12 @@
1
+ """Generic event-watchdog framework based on bubus library."""
2
+
3
+ from bubus import BaseEvent, EventBus
4
+
5
+ from .base import BaseWatchdog, EventProcessor
6
+
7
+ __all__ = [
8
+ "BaseEvent",
9
+ "BaseWatchdog",
10
+ "EventBus",
11
+ "EventProcessor",
12
+ ]
@@ -0,0 +1,395 @@
1
+ """Base watchdog class for general event monitoring components.
2
+
3
+ This is a generic watchdog class that can be used with any event processor that
4
+ implements the EventProcessor protocol. Derived from browser-use BaseWatchdog.
5
+ """
6
+
7
+ import time
8
+ from collections.abc import Iterable
9
+ from typing import Any, ClassVar, Generic, Protocol, TypeVar
10
+
11
+ from bubus import BaseEvent, EventBus
12
+ from pydantic import BaseModel, ConfigDict, Field, model_validator
13
+
14
+ from noesium.core.utils.logging import color_text
15
+
16
+ # Generic type for the event processor
17
+ TEventProcessor = TypeVar("TEventProcessor", bound="EventProcessor")
18
+
19
+
20
+ class EventProcessor(Protocol):
21
+ """Protocol defining the interface for event processors."""
22
+
23
+ @property
24
+ def event_bus(self) -> EventBus:
25
+ """Get the event bus instance."""
26
+ ...
27
+
28
+ @property
29
+ def logger(self):
30
+ """Get the logger instance."""
31
+ ...
32
+
33
+
34
+ class BaseWatchdog(BaseModel, Generic[TEventProcessor]):
35
+ """Base class for all event watchdogs.
36
+
37
+ Watchdogs monitor events and emit new events based on changes.
38
+ They automatically register event handlers based on method names.
39
+
40
+ Handler methods should be named: on_EventTypeName(self, event: EventTypeName)
41
+
42
+ Generic type TEventProcessor allows you to specify the type of event processor
43
+ this watchdog works with (e.g., DatabaseSession, etc.)
44
+ """
45
+
46
+ model_config = ConfigDict(
47
+ arbitrary_types_allowed=True, # allow non-serializable objects like EventBus/EventProcessor in fields
48
+ extra="forbid", # dont allow implicit class/instance state, everything must be a properly typed Field or PrivateAttr
49
+ validate_assignment=False, # avoid re-triggering __init__ / validators on values on every assignment
50
+ revalidate_instances="never", # avoid re-triggering __init__ / validators and erasing private attrs
51
+ )
52
+
53
+ # Core dependencies
54
+ event_bus: EventBus = Field()
55
+ event_processor: Any = Field() # Use Any to avoid Pydantic validation issues with generic types
56
+
57
+ @model_validator(mode="after")
58
+ def validate_event_bus_consistency(self) -> "BaseWatchdog":
59
+ """Validate that event_processor has the same event_bus instance as the watchdog.
60
+
61
+ This prevents the architectural issue where events are dispatched to one bus
62
+ but handlers listen on a different bus, causing infinite hangs.
63
+ """
64
+ if not hasattr(self.event_processor, "event_bus"):
65
+ raise ValueError(
66
+ f"EventProcessor {type(self.event_processor).__name__} must have an 'event_bus' attribute. "
67
+ f"Ensure your event processor implements the EventProcessor protocol correctly."
68
+ )
69
+
70
+ if not hasattr(self.event_processor, "logger"):
71
+ raise ValueError(
72
+ f"EventProcessor {type(self.event_processor).__name__} must have an 'logger' attribute. "
73
+ f"Ensure your event processor implements the EventProcessor protocol correctly."
74
+ )
75
+
76
+ processor_bus = self.event_processor.event_bus
77
+ watchdog_bus = self.event_bus
78
+
79
+ if processor_bus is not watchdog_bus:
80
+ raise ValueError(
81
+ f"EventProcessor.event_bus and BaseWatchdog.event_bus must be the same instance! "
82
+ f"Found different instances: {type(processor_bus)} vs {type(watchdog_bus)}. "
83
+ f"This causes events to be dispatched to one bus while handlers listen on another, "
84
+ f"resulting in infinite hangs. Ensure both use the same EventBus instance."
85
+ )
86
+
87
+ return self
88
+
89
+ # Class variables to statically define the list of events relevant to each watchdog
90
+ # (not enforced, just to make it easier to understand the code and debug watchdogs at runtime)
91
+ LISTENS_TO: ClassVar[list[type[BaseEvent[Any]]]] = [] # Events this watchdog listens to
92
+ EMITS: ClassVar[list[type[BaseEvent[Any]]]] = [] # Events this watchdog emits
93
+
94
+ # Shared state that other watchdogs might need to access should not be defined on EventProcessor, not here!
95
+ # Shared helper methods needed by other watchdogs should be defined on EventProcessor, not here!
96
+ # Alternatively, expose some events on the watchdog to allow access to state/helpers via event_bus system.
97
+
98
+ # Private state internal to the watchdog can be defined like this on BaseWatchdog subclasses:
99
+ # _cache: dict[str, bytes] = PrivateAttr(default_factory=dict)
100
+ # _watcher_task: asyncio.Task | None = PrivateAttr(default=None)
101
+ # _download_tasks: WeakSet[asyncio.Task] = PrivateAttr(default_factory=WeakSet)
102
+ # ...
103
+
104
+ @property
105
+ def logger(self):
106
+ """Get the logger from the event processor."""
107
+ return self.event_processor.logger
108
+
109
+ @staticmethod
110
+ def attach_handler_to_processor(
111
+ event_processor: EventProcessor, event_class: type[BaseEvent[Any]], handler
112
+ ) -> None:
113
+ """Attach a single event handler to an event processor.
114
+
115
+ Args:
116
+ event_processor: The event processor to attach to
117
+ event_class: The event class to listen for
118
+ handler: The handler method (must start with 'on_' and end with event type)
119
+ """
120
+ event_bus = event_processor.event_bus
121
+
122
+ # Validate handler naming convention
123
+ assert hasattr(handler, "__name__"), "Handler must have a __name__ attribute"
124
+ assert handler.__name__.startswith("on_"), f'Handler {handler.__name__} must start with "on_"'
125
+ assert handler.__name__.endswith(
126
+ event_class.__name__
127
+ ), f"Handler {handler.__name__} must end with event type {event_class.__name__}"
128
+
129
+ # Get the watchdog instance if this is a bound method
130
+ watchdog_instance = getattr(handler, "__self__", None)
131
+ watchdog_class_name = watchdog_instance.__class__.__name__ if watchdog_instance else "Unknown"
132
+
133
+ # Create a wrapper function with unique name to avoid duplicate handler warnings
134
+ # Capture handler by value to avoid closure issues
135
+ def make_unique_handler(actual_handler):
136
+ async def unique_handler(event):
137
+ # Safe event history access - avoid hanging during registration
138
+ try:
139
+ parent_event = event_bus.event_history.get(event.event_parent_id) if event.event_parent_id else None
140
+ grandparent_event = (
141
+ event_bus.event_history.get(parent_event.event_parent_id)
142
+ if parent_event and parent_event.event_parent_id
143
+ else None
144
+ )
145
+ if parent_event:
146
+ parent_info = f"{color_text('↲ triggered by', 'yellow')} {color_text(f'on_{parent_event.event_type}#{parent_event.event_id[-4:]}', 'cyan')}"
147
+ return_info = f"⤴ {color_text('returned to', 'green')} {color_text(f'on_{parent_event.event_type}#{parent_event.event_id[-4:]}', 'cyan')}"
148
+ else:
149
+ parent_info = color_text("👈 by EventProcessor", "magenta")
150
+ return_info = (
151
+ f"👉 {color_text('returned to', 'green')} {color_text('EventProcessor', 'magenta')}"
152
+ )
153
+
154
+ grandparent_info = ""
155
+ if parent_event and grandparent_event:
156
+ grandparent_info = f" {color_text('↲ under', 'yellow')} {color_text(f'{grandparent_event.event_type}#{grandparent_event.event_id[-4:]}', 'cyan')}"
157
+ except Exception:
158
+ # Fallback logging if event history access fails
159
+ parent_info = color_text("👈 by EventProcessor", "magenta")
160
+ return_info = f"👉 {color_text('returned to', 'green')} {color_text('EventProcessor', 'magenta')}"
161
+ grandparent_info = ""
162
+
163
+ event_str = f"#{event.event_id[-4:]}" if hasattr(event, "event_id") and event.event_id else ""
164
+ time_start = time.time()
165
+ watchdog_and_handler_str = f"[{watchdog_class_name}.{actual_handler.__name__}({event_str})]".ljust(54)
166
+ event_processor.logger.debug(
167
+ f"{color_text('🚌', 'cyan')} {watchdog_and_handler_str} ⏳ Starting... {parent_info}{grandparent_info}"
168
+ )
169
+
170
+ try:
171
+ # **EXECUTE THE EVENT HANDLER FUNCTION**
172
+ result = await actual_handler(event)
173
+
174
+ if isinstance(result, Exception):
175
+ raise result
176
+
177
+ # just for debug logging, not used for anything else
178
+ time_end = time.time()
179
+ time_elapsed = time_end - time_start
180
+ result_summary = (
181
+ "" if result is None else f" ➡️ {color_text(f'<{type(result).__name__}>', 'magenta')}"
182
+ )
183
+ event_processor.logger.debug(
184
+ f"{color_text('🚌', 'green')} {watchdog_and_handler_str} ✅ Succeeded ({time_elapsed:.2f}s){result_summary} {return_info}"
185
+ )
186
+ return result
187
+ except Exception as e:
188
+ time_end = time.time()
189
+ time_elapsed = time_end - time_start
190
+ event_processor.logger.error(
191
+ f"{color_text('🚌', 'red')} {watchdog_and_handler_str} ❌ Failed ({time_elapsed:.2f}s): {type(e).__name__}: {e}"
192
+ )
193
+
194
+ # Attempt to handle errors - subclasses can override this method
195
+ try:
196
+ await watchdog_instance._handle_handler_error(e, event, actual_handler)
197
+ except Exception as sub_error:
198
+ event_processor.logger.error(
199
+ f"{color_text('🚌', 'red')} {watchdog_and_handler_str} ❌ Error handling failed: {type(sub_error).__name__}: {sub_error}"
200
+ )
201
+ raise
202
+
203
+ raise
204
+
205
+ return unique_handler
206
+
207
+ unique_handler = make_unique_handler(handler)
208
+ unique_handler.__name__ = f"{watchdog_class_name}.{handler.__name__}"
209
+
210
+ # Check if this handler is already registered - throw error if duplicate
211
+ existing_handlers = event_bus.handlers.get(event_class.__name__, [])
212
+ handler_names = [getattr(h, "__name__", str(h)) for h in existing_handlers]
213
+
214
+ if unique_handler.__name__ in handler_names:
215
+ raise RuntimeError(
216
+ f"[{watchdog_class_name}] Duplicate handler registration attempted! "
217
+ f"Handler {unique_handler.__name__} is already registered for {event_class.__name__}. "
218
+ f"This likely means attach_to_processor() was called multiple times."
219
+ )
220
+
221
+ event_bus.on(event_class, unique_handler)
222
+
223
+ async def _handle_handler_error(self, error: Exception, event: BaseEvent[Any], handler) -> None:
224
+ """Handle errors that occur in event handlers.
225
+
226
+ Subclasses can override this method to implement custom error handling logic.
227
+ Default implementation does nothing.
228
+
229
+ Args:
230
+ error: The exception that occurred
231
+ event: The event that was being processed
232
+ handler: The handler method that failed
233
+ """
234
+
235
+ def attach_to_processor(self) -> None:
236
+ """Attach watchdog to its event processor and start monitoring.
237
+
238
+ This method handles event listener registration. The watchdog is already
239
+ bound to an event processor via self.event_processor from initialization.
240
+ """
241
+ # Register event handlers automatically based on method names
242
+ assert self.event_processor is not None, "Event processor not initialized"
243
+
244
+ # Create efficient event class lookup
245
+ event_class_map = {}
246
+
247
+ # Primary strategy: Use LISTENS_TO for efficient event class discovery
248
+ if self.LISTENS_TO:
249
+ event_class_map = {event_class.__name__: event_class for event_class in self.LISTENS_TO}
250
+ self.logger.debug(
251
+ f"[{self.__class__.__name__}] Using LISTENS_TO for event discovery: {list(event_class_map.keys())}"
252
+ )
253
+ else:
254
+ # Safe fallback strategy: Try to discover event classes from event bus event registry
255
+ # This is more reliable than trying to extract from handler annotations during registration
256
+ try:
257
+ # Check if the event bus has an event registry or similar mechanism
258
+ if hasattr(self.event_bus, "_event_types"):
259
+ event_class_map = {cls.__name__: cls for cls in self.event_bus._event_types}
260
+ elif hasattr(self.event_bus, "event_registry"):
261
+ event_class_map = {name: cls for name, cls in self.event_bus.event_registry.items()}
262
+ else:
263
+ # Last resort: try to extract from existing handlers (but do it safely)
264
+ for event_name, handlers in self.event_bus.handlers.items():
265
+ if handlers and hasattr(handlers[0], "__annotations__"):
266
+ # Get the event class from handler's first parameter annotation
267
+ annotations = handlers[0].__annotations__
268
+ if "event" in annotations:
269
+ event_class = annotations["event"]
270
+ if isinstance(event_class, type) and issubclass(event_class, BaseEvent):
271
+ event_class_map[event_name] = event_class
272
+
273
+ if event_class_map:
274
+ self.logger.debug(
275
+ f"[{self.__class__.__name__}] Discovered event classes: {list(event_class_map.keys())}"
276
+ )
277
+ else:
278
+ self.logger.warning(
279
+ f"[{self.__class__.__name__}] No event classes discovered. Define LISTENS_TO for better performance."
280
+ )
281
+ except Exception as e:
282
+ self.logger.warning(f"[{self.__class__.__name__}] Failed to discover event classes: {e}")
283
+
284
+ # Find all handler methods (on_EventName) and register them efficiently
285
+ registered_events = set()
286
+ handler_methods = []
287
+
288
+ # Collect handler methods first
289
+ for method_name in dir(self):
290
+ if method_name.startswith("on_") and callable(getattr(self, method_name)):
291
+ handler_methods.append(method_name)
292
+
293
+ # Process each handler method
294
+ for method_name in handler_methods:
295
+ # Extract event name from method name (on_EventName -> EventName)
296
+ event_name = method_name[3:] # Remove 'on_' prefix
297
+
298
+ # Look up event class efficiently
299
+ event_class = event_class_map.get(event_name)
300
+
301
+ if event_class:
302
+ # ASSERTION: If LISTENS_TO is defined, enforce it
303
+ if self.LISTENS_TO:
304
+ assert event_class in self.LISTENS_TO, (
305
+ f"[{self.__class__.__name__}] Handler {method_name} listens to {event_name} "
306
+ f"but {event_name} is not declared in LISTENS_TO: {[e.__name__ for e in self.LISTENS_TO]}"
307
+ )
308
+
309
+ handler = getattr(self, method_name)
310
+
311
+ # Use the static helper to attach the handler
312
+ self.attach_handler_to_processor(self.event_processor, event_class, handler)
313
+ registered_events.add(event_class)
314
+
315
+ self.logger.debug(f"[{self.__class__.__name__}] Registered handler {method_name} for {event_name}")
316
+ else:
317
+ # Better error message for missing event classes
318
+ if self.LISTENS_TO:
319
+ available_events = [e.__name__ for e in self.LISTENS_TO]
320
+ self.logger.warning(
321
+ f"[{self.__class__.__name__}] Handler {method_name} references unknown event '{event_name}'. "
322
+ f"Available events in LISTENS_TO: {available_events}"
323
+ )
324
+ else:
325
+ self.logger.warning(
326
+ f"[{self.__class__.__name__}] Handler {method_name} references unknown event '{event_name}'. "
327
+ f"Consider defining LISTENS_TO class variable for better event discovery."
328
+ )
329
+
330
+ # ASSERTION: If LISTENS_TO is defined, ensure all declared events have handlers
331
+ if self.LISTENS_TO:
332
+ missing_handlers = set(self.LISTENS_TO) - registered_events
333
+ if missing_handlers:
334
+ missing_names = [e.__name__ for e in missing_handlers]
335
+ missing_method_names = [f"on_{name}" for name in missing_names]
336
+ self.logger.warning(
337
+ f"[{self.__class__.__name__}] LISTENS_TO declares {missing_names} "
338
+ f'but no handlers found (missing {", ".join(missing_method_names)} methods)'
339
+ )
340
+
341
+ self.logger.info(f"[{self.__class__.__name__}] Successfully registered {len(registered_events)} event handlers")
342
+
343
+ def emit_event(self, event: BaseEvent[Any]) -> None:
344
+ """Emit an event to the event bus.
345
+
346
+ Args:
347
+ event: The event to emit
348
+ """
349
+ if self.EMITS:
350
+ event_type = type(event)
351
+ assert event_type in self.EMITS, (
352
+ f"[{self.__class__.__name__}] Attempting to emit {event_type.__name__} "
353
+ f"but it is not declared in EMITS: {[e.__name__ for e in self.EMITS]}"
354
+ )
355
+
356
+ self.event_bus.dispatch(event)
357
+
358
+ def __del__(self) -> None:
359
+ """Clean up any running tasks during garbage collection."""
360
+
361
+ # A BIT OF MAGIC: Cancel any private attributes that look like asyncio tasks
362
+ try:
363
+ for attr_name in dir(self):
364
+ # e.g. _watcher_task = asyncio.Task
365
+ if attr_name.startswith("_") and attr_name.endswith("_task"):
366
+ try:
367
+ task = getattr(self, attr_name)
368
+ if hasattr(task, "cancel") and callable(task.cancel) and not task.done():
369
+ task.cancel()
370
+ # self.logger.debug(f'[{self.__class__.__name__}] Cancelled {attr_name} during cleanup')
371
+ except Exception:
372
+ pass # Ignore errors during cleanup
373
+
374
+ # e.g. _download_tasks = WeakSet[asyncio.Task] or list[asyncio.Task]
375
+ if (
376
+ attr_name.startswith("_")
377
+ and attr_name.endswith("_tasks")
378
+ and isinstance(getattr(self, attr_name), Iterable)
379
+ ):
380
+ for task in getattr(self, attr_name):
381
+ try:
382
+ if hasattr(task, "cancel") and callable(task.cancel) and not task.done():
383
+ task.cancel()
384
+ # self.logger.debug(f'[{self.__class__.__name__}] Cancelled {attr_name} during cleanup')
385
+ except Exception:
386
+ pass # Ignore errors during cleanup
387
+ except Exception as e:
388
+ # Use a basic logger if available, otherwise ignore
389
+ try:
390
+ if hasattr(self, "logger"):
391
+ self.logger.error(
392
+ f"⚠️ Error during {self.__class__.__name__} garbage collection __del__(): {type(e)}: {e}"
393
+ )
394
+ except Exception:
395
+ pass # Ignore errors during cleanup
File without changes
noesium/core/py.typed ADDED
File without changes
@@ -0,0 +1,20 @@
1
+ """Model routing module for determining appropriate LLM tiers based on query complexity."""
2
+
3
+ from .base import BaseRoutingStrategy
4
+ from .router import ModelRouter
5
+ from .strategies import DynamicComplexityStrategy, SelfAssessmentStrategy
6
+ from .types import ComplexityScore, ModelTier, RoutingResult
7
+
8
+ __all__ = [
9
+ # Main router class
10
+ "ModelRouter",
11
+ # Base classes for extensibility
12
+ "BaseRoutingStrategy",
13
+ # Types and enums
14
+ "ModelTier",
15
+ "ComplexityScore",
16
+ "RoutingResult",
17
+ # Built-in strategies
18
+ "SelfAssessmentStrategy",
19
+ "DynamicComplexityStrategy",
20
+ ]
@@ -0,0 +1,66 @@
1
+ """Base classes for routing strategies."""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import Any, Dict, Optional
5
+
6
+ from noesium.core.llm.base import BaseLLMClient
7
+
8
+ from .types import ComplexityScore, ModelTier, RoutingResult
9
+
10
+
11
+ class BaseRoutingStrategy(ABC):
12
+ """Abstract base class for routing strategies."""
13
+
14
+ def __init__(self, lite_client: Optional[BaseLLMClient] = None, config: Optional[Dict[str, Any]] = None):
15
+ """
16
+ Initialize the routing strategy.
17
+
18
+ Args:
19
+ lite_client: Optional LLM client for lite model operations
20
+ config: Strategy-specific configuration parameters
21
+ """
22
+ self.lite_client = lite_client
23
+ self.config = (config or {}).copy() # Make a copy to avoid modifying original
24
+
25
+ @abstractmethod
26
+ def route(self, query: str) -> RoutingResult:
27
+ """
28
+ Route a query to the appropriate model tier.
29
+
30
+ Args:
31
+ query: The input query to route
32
+
33
+ Returns:
34
+ RoutingResult with tier recommendation and analysis
35
+ """
36
+
37
+ @abstractmethod
38
+ def get_strategy_name(self) -> str:
39
+ """Return the name of this routing strategy."""
40
+
41
+ def _create_result(
42
+ self,
43
+ tier: ModelTier,
44
+ confidence: float,
45
+ complexity_score: ComplexityScore,
46
+ metadata: Optional[Dict[str, Any]] = None,
47
+ ) -> RoutingResult:
48
+ """
49
+ Helper method to create a RoutingResult.
50
+
51
+ Args:
52
+ tier: Recommended model tier
53
+ confidence: Confidence in the decision
54
+ complexity_score: Complexity analysis
55
+ metadata: Additional metadata
56
+
57
+ Returns:
58
+ RoutingResult instance
59
+ """
60
+ return RoutingResult(
61
+ tier=tier,
62
+ confidence=confidence,
63
+ complexity_score=complexity_score,
64
+ strategy=self.get_strategy_name(),
65
+ metadata=metadata,
66
+ )