mojentic 0.6.1__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _examples/async_dispatcher_example.py +241 -0
- _examples/async_llm_example.py +236 -0
- mojentic/agents/async_aggregator_agent.py +162 -0
- mojentic/agents/async_aggregator_agent_spec.py +227 -0
- mojentic/agents/async_llm_agent.py +197 -0
- mojentic/agents/async_llm_agent_spec.py +166 -0
- mojentic/agents/base_async_agent.py +27 -0
- mojentic/async_dispatcher.py +134 -0
- mojentic/async_dispatcher_spec.py +244 -0
- mojentic/llm/gateways/models.py +3 -3
- mojentic/llm/gateways/ollama.py +4 -4
- mojentic/llm/gateways/openai.py +5 -5
- mojentic/llm/gateways/openai_messages_adapter.py +8 -4
- mojentic/llm/llm_broker.py +4 -4
- {mojentic-0.6.1.dist-info → mojentic-0.7.1.dist-info}/METADATA +2 -1
- {mojentic-0.6.1.dist-info → mojentic-0.7.1.dist-info}/RECORD +19 -10
- {mojentic-0.6.1.dist-info → mojentic-0.7.1.dist-info}/WHEEL +0 -0
- {mojentic-0.6.1.dist-info → mojentic-0.7.1.dist-info}/licenses/LICENSE.md +0 -0
- {mojentic-0.6.1.dist-info → mojentic-0.7.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Example script demonstrating how to use the AsyncDispatcher, BaseAsyncAgent, and AsyncAggregatorAgent.
|
|
3
|
+
|
|
4
|
+
This script shows how to create and use asynchronous agents with the AsyncDispatcher.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import List
|
|
10
|
+
|
|
11
|
+
from pydantic import BaseModel, Field
|
|
12
|
+
|
|
13
|
+
from mojentic.agents.async_aggregator_agent import AsyncAggregatorAgent
|
|
14
|
+
from mojentic.agents.async_llm_agent import BaseAsyncLLMAgent
|
|
15
|
+
from mojentic.agents.base_async_agent import BaseAsyncAgent
|
|
16
|
+
from mojentic.async_dispatcher import AsyncDispatcher
|
|
17
|
+
from mojentic.event import Event, TerminateEvent
|
|
18
|
+
from mojentic.llm import LLMBroker
|
|
19
|
+
from mojentic.llm.gateways.models import LLMMessage, MessageRole
|
|
20
|
+
from mojentic.router import Router
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Define some example events
|
|
24
|
+
class TextEvent(Event):
|
|
25
|
+
text: str = Field(..., description="The text content of the event")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class AnalysisEvent(Event):
|
|
29
|
+
analysis: str = Field(..., description="The analysis of the text")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class SummaryEvent(Event):
|
|
33
|
+
summary: str = Field(..., description="The summary of the text")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class CombinedResultEvent(Event):
|
|
37
|
+
text: str = Field(..., description="The original text")
|
|
38
|
+
analysis: str = Field(..., description="The analysis of the text")
|
|
39
|
+
summary: str = Field(..., description="The summary of the text")
|
|
40
|
+
combined: str = Field(..., description="The combined result")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# Define response models for LLM agents
|
|
44
|
+
class AnalysisResponse(BaseModel):
|
|
45
|
+
analysis: str = Field(..., description="The analysis of the text")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class SummaryResponse(BaseModel):
|
|
49
|
+
summary: str = Field(..., description="The summary of the text")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class CombinedResponse(BaseModel):
|
|
53
|
+
combined: str = Field(..., description="The combined result of the analysis and summary")
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# Define some example agents
|
|
57
|
+
class TextAnalyzerAgent(BaseAsyncLLMAgent):
|
|
58
|
+
"""
|
|
59
|
+
An agent that analyzes text and produces an analysis.
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
def __init__(self, llm: LLMBroker):
|
|
63
|
+
super().__init__(
|
|
64
|
+
llm=llm,
|
|
65
|
+
behaviour="You are a text analysis assistant. Your job is to provide a detailed analysis of the given text, including key themes, structure, and notable elements.",
|
|
66
|
+
response_model=AnalysisResponse
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
async def receive_event_async(self, event: Event) -> List[Event]:
|
|
70
|
+
if isinstance(event, TextEvent):
|
|
71
|
+
prompt = f"""
|
|
72
|
+
Please analyze the following text in detail. Consider:
|
|
73
|
+
- Main themes and topics
|
|
74
|
+
- Structure and organization
|
|
75
|
+
- Key points and arguments
|
|
76
|
+
- Style and tone
|
|
77
|
+
- Intended audience
|
|
78
|
+
- Any notable or unique elements
|
|
79
|
+
|
|
80
|
+
Text to analyze:
|
|
81
|
+
{event.text[:1000]}... (text truncated for brevity)
|
|
82
|
+
"""
|
|
83
|
+
response = await self.generate_response(prompt)
|
|
84
|
+
return [AnalysisEvent(source=type(self), correlation_id=event.correlation_id, analysis=response.analysis)]
|
|
85
|
+
return []
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class TextSummarizerAgent(BaseAsyncLLMAgent):
|
|
89
|
+
"""
|
|
90
|
+
An agent that summarizes text.
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
def __init__(self, llm: LLMBroker):
|
|
94
|
+
super().__init__(
|
|
95
|
+
llm=llm,
|
|
96
|
+
behaviour="You are a text summarization assistant. Your job is to provide concise, accurate summaries of texts while preserving the key information and main points.",
|
|
97
|
+
response_model=SummaryResponse
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
async def receive_event_async(self, event: Event) -> List[Event]:
|
|
101
|
+
if isinstance(event, TextEvent):
|
|
102
|
+
prompt = f"""
|
|
103
|
+
Please provide a concise summary of the following text. The summary should:
|
|
104
|
+
- Capture the main points and key information
|
|
105
|
+
- Be significantly shorter than the original text
|
|
106
|
+
- Maintain the original meaning and intent
|
|
107
|
+
- Be clear and coherent
|
|
108
|
+
- Exclude unnecessary details
|
|
109
|
+
|
|
110
|
+
Text to summarize:
|
|
111
|
+
{event.text[:1000]}... (text truncated for brevity)
|
|
112
|
+
"""
|
|
113
|
+
response = await self.generate_response(prompt)
|
|
114
|
+
return [SummaryEvent(source=type(self), correlation_id=event.correlation_id, summary=response.summary)]
|
|
115
|
+
return []
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class ResultCombinerAgent(AsyncAggregatorAgent):
|
|
119
|
+
"""
|
|
120
|
+
An agent that combines the analysis and summary of a text using LLM.
|
|
121
|
+
"""
|
|
122
|
+
|
|
123
|
+
def __init__(self, llm: LLMBroker):
|
|
124
|
+
super().__init__(event_types_needed=[TextEvent, AnalysisEvent, SummaryEvent])
|
|
125
|
+
self.llm = llm
|
|
126
|
+
self.behaviour = "You are an assistant that combines text analysis and summaries into a comprehensive report."
|
|
127
|
+
self.response_model = CombinedResponse
|
|
128
|
+
|
|
129
|
+
async def process_events(self, events):
|
|
130
|
+
# Extract the events
|
|
131
|
+
text_event = next((e for e in events if isinstance(e, TextEvent)), None)
|
|
132
|
+
analysis_event = next((e for e in events if isinstance(e, AnalysisEvent)), None)
|
|
133
|
+
summary_event = next((e for e in events if isinstance(e, SummaryEvent)), None)
|
|
134
|
+
|
|
135
|
+
if text_event and analysis_event and summary_event:
|
|
136
|
+
# Use LLM to create a sophisticated combination
|
|
137
|
+
prompt = f"""
|
|
138
|
+
I have analyzed and summarized a text. Please combine these into a comprehensive report.
|
|
139
|
+
|
|
140
|
+
Original Text (excerpt): {text_event.text[:300]}... (text truncated for brevity)
|
|
141
|
+
|
|
142
|
+
Analysis: {analysis_event.analysis}
|
|
143
|
+
|
|
144
|
+
Summary: {summary_event.summary}
|
|
145
|
+
|
|
146
|
+
Please create a well-structured, insightful report that integrates the analysis and summary,
|
|
147
|
+
highlighting the most important aspects of the text. The report should provide a comprehensive
|
|
148
|
+
understanding of the text's content, structure, and significance.
|
|
149
|
+
"""
|
|
150
|
+
# Create a temporary LLM agent to generate the response
|
|
151
|
+
messages = [
|
|
152
|
+
LLMMessage(role=MessageRole.System, content=self.behaviour),
|
|
153
|
+
LLMMessage(role=MessageRole.User, content=prompt)
|
|
154
|
+
]
|
|
155
|
+
|
|
156
|
+
# Generate the response
|
|
157
|
+
import asyncio
|
|
158
|
+
response_json = await asyncio.to_thread(
|
|
159
|
+
self.llm.generate_object,
|
|
160
|
+
messages=messages,
|
|
161
|
+
object_model=self.response_model
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
combined = response_json.combined
|
|
165
|
+
|
|
166
|
+
return [CombinedResultEvent(
|
|
167
|
+
source=type(self),
|
|
168
|
+
correlation_id=text_event.correlation_id,
|
|
169
|
+
text=text_event.text,
|
|
170
|
+
analysis=analysis_event.analysis,
|
|
171
|
+
summary=summary_event.summary,
|
|
172
|
+
combined=combined
|
|
173
|
+
)]
|
|
174
|
+
return []
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
class ResultOutputAgent(BaseAsyncAgent):
|
|
178
|
+
"""
|
|
179
|
+
An agent that receives the CombinedResultEvent, outputs the result to the user,
|
|
180
|
+
and emits a TerminateEvent to exit the event loop.
|
|
181
|
+
"""
|
|
182
|
+
|
|
183
|
+
async def receive_event_async(self, event: Event) -> List[Event]:
|
|
184
|
+
if isinstance(event, CombinedResultEvent):
|
|
185
|
+
# Output the result to the user
|
|
186
|
+
print("\n=== FINAL ANSWER ===")
|
|
187
|
+
print(event.combined)
|
|
188
|
+
print("===================\n")
|
|
189
|
+
|
|
190
|
+
# Emit a TerminateEvent to exit the event loop
|
|
191
|
+
return [TerminateEvent(source=type(self), correlation_id=event.correlation_id)]
|
|
192
|
+
return []
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
async def main():
|
|
196
|
+
"""
|
|
197
|
+
Main function that demonstrates the usage of AsyncDispatcher and async agents.
|
|
198
|
+
"""
|
|
199
|
+
# Initialize the LLM broker with the same model as in async_llm_example
|
|
200
|
+
llm = LLMBroker(model="qwen3:30b-a3b-q4_K_M")
|
|
201
|
+
|
|
202
|
+
# Create a router and register agents
|
|
203
|
+
router = Router()
|
|
204
|
+
|
|
205
|
+
# Create agents with LLM
|
|
206
|
+
analyzer = TextAnalyzerAgent(llm)
|
|
207
|
+
summarizer = TextSummarizerAgent(llm)
|
|
208
|
+
combiner = ResultCombinerAgent(llm)
|
|
209
|
+
output_agent = ResultOutputAgent()
|
|
210
|
+
|
|
211
|
+
# Register agents with the router
|
|
212
|
+
router.add_route(TextEvent, analyzer)
|
|
213
|
+
router.add_route(TextEvent, summarizer)
|
|
214
|
+
router.add_route(TextEvent, combiner)
|
|
215
|
+
router.add_route(AnalysisEvent, combiner)
|
|
216
|
+
router.add_route(SummaryEvent, combiner)
|
|
217
|
+
router.add_route(CombinedResultEvent, output_agent)
|
|
218
|
+
|
|
219
|
+
# Create and start the dispatcher
|
|
220
|
+
dispatcher = await AsyncDispatcher(router).start()
|
|
221
|
+
|
|
222
|
+
# Create a text event
|
|
223
|
+
with open(Path.cwd().parent.parent / "README.md", "r") as f:
|
|
224
|
+
text = f.read()
|
|
225
|
+
event = TextEvent(source=type("ExampleSource", (), {}), text=text)
|
|
226
|
+
|
|
227
|
+
# Dispatch the event
|
|
228
|
+
dispatcher.dispatch(event)
|
|
229
|
+
|
|
230
|
+
# Wait for all events in the queue to be processed
|
|
231
|
+
print("Waiting for all events to be processed...")
|
|
232
|
+
queue_empty = await dispatcher.wait_for_empty_queue(timeout=10)
|
|
233
|
+
if not queue_empty:
|
|
234
|
+
print("Warning: Not all events were processed within the timeout period.")
|
|
235
|
+
|
|
236
|
+
# Stop the dispatcher
|
|
237
|
+
await dispatcher.stop()
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
if __name__ == "__main__":
|
|
241
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Example script demonstrating how to use the AsyncDispatcher with BaseAsyncLLMAgent.
|
|
3
|
+
|
|
4
|
+
This script shows how to create and use asynchronous LLM agents with the AsyncDispatcher.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
from typing import List, Optional
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
from mojentic.agents.async_aggregator_agent import AsyncAggregatorAgent
|
|
13
|
+
from mojentic.agents.async_llm_agent import BaseAsyncLLMAgent
|
|
14
|
+
from mojentic.async_dispatcher import AsyncDispatcher
|
|
15
|
+
from mojentic.context.shared_working_memory import SharedWorkingMemory
|
|
16
|
+
from mojentic.event import Event
|
|
17
|
+
from mojentic.llm import LLMBroker
|
|
18
|
+
from mojentic.router import Router
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Define some example events
|
|
22
|
+
class QuestionEvent(Event):
|
|
23
|
+
question: str = Field(..., description="The question to answer")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class FactCheckEvent(Event):
|
|
27
|
+
question: str = Field(..., description="The original question")
|
|
28
|
+
facts: List[str] = Field(..., description="The facts related to the question")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class AnswerEvent(Event):
|
|
32
|
+
question: str = Field(..., description="The original question")
|
|
33
|
+
answer: str = Field(..., description="The answer to the question")
|
|
34
|
+
confidence: float = Field(..., description="The confidence level of the answer (0-1)")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class FinalAnswerEvent(Event):
|
|
38
|
+
question: str = Field(..., description="The original question")
|
|
39
|
+
answer: str = Field(..., description="The final answer to the question")
|
|
40
|
+
facts: List[str] = Field(..., description="The facts used to answer the question")
|
|
41
|
+
confidence: float = Field(..., description="The confidence level of the answer (0-1)")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
# Define response models for LLM agents
|
|
45
|
+
class FactCheckResponse(BaseModel):
|
|
46
|
+
facts: List[str] = Field(..., description="The facts related to the question")
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AnswerResponse(BaseModel):
|
|
50
|
+
answer: str = Field(..., description="The answer to the question")
|
|
51
|
+
confidence: float = Field(..., description="The confidence level of the answer (0-1)")
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# Define some example agents
|
|
55
|
+
class FactCheckerAgent(BaseAsyncLLMAgent):
|
|
56
|
+
"""
|
|
57
|
+
An agent that checks facts related to a question.
|
|
58
|
+
"""
|
|
59
|
+
def __init__(self, llm: LLMBroker):
|
|
60
|
+
super().__init__(
|
|
61
|
+
llm=llm,
|
|
62
|
+
behaviour="You are a fact-checking assistant. Your job is to provide relevant facts about a question.",
|
|
63
|
+
response_model=FactCheckResponse
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
async def receive_event_async(self, event: Event) -> List[Event]:
|
|
67
|
+
if isinstance(event, QuestionEvent):
|
|
68
|
+
prompt = f"Please provide relevant facts about the following question: {event.question}"
|
|
69
|
+
response = await self.generate_response(prompt)
|
|
70
|
+
return [FactCheckEvent(
|
|
71
|
+
source=type(self),
|
|
72
|
+
correlation_id=event.correlation_id,
|
|
73
|
+
question=event.question,
|
|
74
|
+
facts=response.facts
|
|
75
|
+
)]
|
|
76
|
+
return []
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class AnswerGeneratorAgent(BaseAsyncLLMAgent):
|
|
80
|
+
"""
|
|
81
|
+
An agent that generates an answer to a question.
|
|
82
|
+
"""
|
|
83
|
+
def __init__(self, llm: LLMBroker):
|
|
84
|
+
super().__init__(
|
|
85
|
+
llm=llm,
|
|
86
|
+
behaviour="You are a question-answering assistant. Your job is to provide accurate answers to questions.",
|
|
87
|
+
response_model=AnswerResponse
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
async def receive_event_async(self, event: Event) -> List[Event]:
|
|
91
|
+
if isinstance(event, QuestionEvent):
|
|
92
|
+
prompt = f"Please answer the following question: {event.question}"
|
|
93
|
+
response = await self.generate_response(prompt)
|
|
94
|
+
return [AnswerEvent(
|
|
95
|
+
source=type(self),
|
|
96
|
+
correlation_id=event.correlation_id,
|
|
97
|
+
question=event.question,
|
|
98
|
+
answer=response.answer,
|
|
99
|
+
confidence=response.confidence
|
|
100
|
+
)]
|
|
101
|
+
return []
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class FinalAnswerAgent(AsyncAggregatorAgent):
|
|
105
|
+
"""
|
|
106
|
+
An agent that combines facts and answers to produce a final answer.
|
|
107
|
+
"""
|
|
108
|
+
def __init__(self, llm: LLMBroker):
|
|
109
|
+
super().__init__(event_types_needed=[FactCheckEvent, AnswerEvent])
|
|
110
|
+
self.llm = llm
|
|
111
|
+
self.final_answer_event = None
|
|
112
|
+
|
|
113
|
+
async def receive_event_async(self, event: Event) -> list:
|
|
114
|
+
print(f"FinalAnswerAgent received event: {type(event).__name__}")
|
|
115
|
+
result = await super().receive_event_async(event)
|
|
116
|
+
# Store any FinalAnswerEvent created
|
|
117
|
+
for e in result:
|
|
118
|
+
if isinstance(e, FinalAnswerEvent):
|
|
119
|
+
self.final_answer_event = e
|
|
120
|
+
return result
|
|
121
|
+
|
|
122
|
+
async def process_events(self, events):
|
|
123
|
+
print(f"FinalAnswerAgent processing events: {[type(e).__name__ for e in events]}")
|
|
124
|
+
# Extract the events
|
|
125
|
+
fact_check_event = next((e for e in events if isinstance(e, FactCheckEvent)), None)
|
|
126
|
+
answer_event = next((e for e in events if isinstance(e, AnswerEvent)), None)
|
|
127
|
+
|
|
128
|
+
if fact_check_event and answer_event:
|
|
129
|
+
print("FinalAnswerAgent has both FactCheckEvent and AnswerEvent")
|
|
130
|
+
# In a real implementation, we might use the LLM to refine the answer based on the facts
|
|
131
|
+
# For this example, we'll just combine them
|
|
132
|
+
|
|
133
|
+
# Adjust confidence based on facts
|
|
134
|
+
confidence = answer_event.confidence
|
|
135
|
+
if len(fact_check_event.facts) > 0:
|
|
136
|
+
# Increase confidence if we have facts
|
|
137
|
+
confidence = min(1.0, confidence + 0.1)
|
|
138
|
+
|
|
139
|
+
final_answer_event = FinalAnswerEvent(
|
|
140
|
+
source=type(self),
|
|
141
|
+
correlation_id=fact_check_event.correlation_id,
|
|
142
|
+
question=fact_check_event.question,
|
|
143
|
+
answer=answer_event.answer,
|
|
144
|
+
facts=fact_check_event.facts,
|
|
145
|
+
confidence=confidence
|
|
146
|
+
)
|
|
147
|
+
print(f"FinalAnswerAgent created FinalAnswerEvent: {final_answer_event}")
|
|
148
|
+
self.final_answer_event = final_answer_event
|
|
149
|
+
return [final_answer_event]
|
|
150
|
+
print("FinalAnswerAgent missing either FactCheckEvent or AnswerEvent")
|
|
151
|
+
return []
|
|
152
|
+
|
|
153
|
+
async def get_final_answer(self, correlation_id, timeout=30):
|
|
154
|
+
"""
|
|
155
|
+
Get the final answer for a specific correlation_id.
|
|
156
|
+
|
|
157
|
+
Parameters
|
|
158
|
+
----------
|
|
159
|
+
correlation_id : str
|
|
160
|
+
The correlation_id to get the final answer for
|
|
161
|
+
timeout : float, optional
|
|
162
|
+
The timeout in seconds
|
|
163
|
+
|
|
164
|
+
Returns
|
|
165
|
+
-------
|
|
166
|
+
FinalAnswerEvent or None
|
|
167
|
+
The final answer event, or None if not found
|
|
168
|
+
"""
|
|
169
|
+
# First wait for all needed events
|
|
170
|
+
await self.wait_for_events(correlation_id, timeout)
|
|
171
|
+
|
|
172
|
+
# Then check if we have a final answer
|
|
173
|
+
if self.final_answer_event and self.final_answer_event.correlation_id == correlation_id:
|
|
174
|
+
return self.final_answer_event
|
|
175
|
+
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
async def main():
|
|
180
|
+
"""
|
|
181
|
+
Main function that demonstrates the usage of AsyncDispatcher with async LLM agents.
|
|
182
|
+
"""
|
|
183
|
+
# Initialize the LLM broker with your preferred model
|
|
184
|
+
llm = LLMBroker(model="qwen3:30b-a3b-q4_K_M")
|
|
185
|
+
|
|
186
|
+
# Create a router and register agents
|
|
187
|
+
router = Router()
|
|
188
|
+
|
|
189
|
+
# Create agents
|
|
190
|
+
fact_checker = FactCheckerAgent(llm)
|
|
191
|
+
answer_generator = AnswerGeneratorAgent(llm)
|
|
192
|
+
final_answer_agent = FinalAnswerAgent(llm)
|
|
193
|
+
|
|
194
|
+
# Register agents with the router
|
|
195
|
+
router.add_route(QuestionEvent, fact_checker)
|
|
196
|
+
router.add_route(QuestionEvent, answer_generator)
|
|
197
|
+
router.add_route(QuestionEvent, final_answer_agent)
|
|
198
|
+
router.add_route(FactCheckEvent, final_answer_agent)
|
|
199
|
+
router.add_route(AnswerEvent, final_answer_agent)
|
|
200
|
+
|
|
201
|
+
# Create and start the dispatcher
|
|
202
|
+
dispatcher = await AsyncDispatcher(router).start()
|
|
203
|
+
|
|
204
|
+
# Create a question event
|
|
205
|
+
question = "What is the capital of France?"
|
|
206
|
+
event = QuestionEvent(source=type("ExampleSource", (), {}), question=question)
|
|
207
|
+
|
|
208
|
+
# Dispatch the event
|
|
209
|
+
print("Dispatching question event")
|
|
210
|
+
dispatcher.dispatch(event)
|
|
211
|
+
|
|
212
|
+
# Give the dispatcher a moment to start processing the event
|
|
213
|
+
print("Waiting for dispatcher to start processing")
|
|
214
|
+
await asyncio.sleep(0.1)
|
|
215
|
+
|
|
216
|
+
# Wait for the final answer from the FinalAnswerAgent
|
|
217
|
+
print("Waiting for final answer from FinalAnswerAgent")
|
|
218
|
+
final_answer_event = await final_answer_agent.get_final_answer(event.correlation_id, timeout=30)
|
|
219
|
+
|
|
220
|
+
# Print the final answer
|
|
221
|
+
if final_answer_event:
|
|
222
|
+
print(f"Question: {final_answer_event.question}")
|
|
223
|
+
print(f"Answer: {final_answer_event.answer}")
|
|
224
|
+
print(f"Confidence: {final_answer_event.confidence}")
|
|
225
|
+
print("Facts:")
|
|
226
|
+
for fact in final_answer_event.facts:
|
|
227
|
+
print(f" - {fact}")
|
|
228
|
+
else:
|
|
229
|
+
print("No FinalAnswerEvent found")
|
|
230
|
+
|
|
231
|
+
# Stop the dispatcher
|
|
232
|
+
await dispatcher.stop()
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
if __name__ == "__main__":
|
|
236
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import structlog
|
|
3
|
+
|
|
4
|
+
from mojentic.agents.base_async_agent import BaseAsyncAgent
|
|
5
|
+
from mojentic.event import Event
|
|
6
|
+
|
|
7
|
+
logger = structlog.get_logger()
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AsyncAggregatorAgent(BaseAsyncAgent):
|
|
11
|
+
"""
|
|
12
|
+
AsyncAggregatorAgent is an asynchronous version of the BaseAggregatingAgent.
|
|
13
|
+
It aggregates events based on their correlation_id and processes them when all required events are available.
|
|
14
|
+
"""
|
|
15
|
+
def __init__(self, event_types_needed=None):
|
|
16
|
+
"""
|
|
17
|
+
Initialize the AsyncAggregatorAgent.
|
|
18
|
+
|
|
19
|
+
Parameters
|
|
20
|
+
----------
|
|
21
|
+
event_types_needed : list, optional
|
|
22
|
+
List of event types that need to be captured before processing
|
|
23
|
+
"""
|
|
24
|
+
super().__init__()
|
|
25
|
+
self.results = {}
|
|
26
|
+
self.event_types_needed = event_types_needed or []
|
|
27
|
+
self.futures = {} # Maps correlation_id to Future objects
|
|
28
|
+
|
|
29
|
+
async def _get_and_reset_results(self, event):
|
|
30
|
+
"""
|
|
31
|
+
Get and reset the results for a specific correlation_id.
|
|
32
|
+
|
|
33
|
+
Parameters
|
|
34
|
+
----------
|
|
35
|
+
event : Event
|
|
36
|
+
The event to get results for
|
|
37
|
+
|
|
38
|
+
Returns
|
|
39
|
+
-------
|
|
40
|
+
list
|
|
41
|
+
The results for the event
|
|
42
|
+
"""
|
|
43
|
+
results = self.results[event.correlation_id]
|
|
44
|
+
self.results[event.correlation_id] = None
|
|
45
|
+
return results
|
|
46
|
+
|
|
47
|
+
async def _capture_results_if_needed(self, event):
|
|
48
|
+
"""
|
|
49
|
+
Capture results for a specific correlation_id.
|
|
50
|
+
|
|
51
|
+
Parameters
|
|
52
|
+
----------
|
|
53
|
+
event : Event
|
|
54
|
+
The event to capture results for
|
|
55
|
+
"""
|
|
56
|
+
results = self.results.get(event.correlation_id, [])
|
|
57
|
+
results.append(event)
|
|
58
|
+
self.results[event.correlation_id] = results
|
|
59
|
+
|
|
60
|
+
# Check if we have all needed events and set the future if we do
|
|
61
|
+
event_types_captured = [type(e) for e in self.results.get(event.correlation_id, [])]
|
|
62
|
+
finished = all([event_type in event_types_captured for event_type in self.event_types_needed])
|
|
63
|
+
|
|
64
|
+
if finished and event.correlation_id in self.futures:
|
|
65
|
+
future = self.futures[event.correlation_id]
|
|
66
|
+
if not future.done():
|
|
67
|
+
future.set_result(self.results[event.correlation_id])
|
|
68
|
+
|
|
69
|
+
async def _has_all_needed(self, event):
|
|
70
|
+
"""
|
|
71
|
+
Check if all needed event types have been captured for a specific correlation_id.
|
|
72
|
+
|
|
73
|
+
Parameters
|
|
74
|
+
----------
|
|
75
|
+
event : Event
|
|
76
|
+
The event to check
|
|
77
|
+
|
|
78
|
+
Returns
|
|
79
|
+
-------
|
|
80
|
+
bool
|
|
81
|
+
True if all needed event types have been captured, False otherwise
|
|
82
|
+
"""
|
|
83
|
+
event_types_captured = [type(e) for e in self.results.get(event.correlation_id, [])]
|
|
84
|
+
finished = all([event_type in event_types_captured for event_type in self.event_types_needed])
|
|
85
|
+
logger.debug(f"Captured: {event_types_captured}, Needed: {self.event_types_needed}, Finished: {finished}")
|
|
86
|
+
return finished
|
|
87
|
+
|
|
88
|
+
async def wait_for_events(self, correlation_id, timeout=None):
|
|
89
|
+
"""
|
|
90
|
+
Wait for all needed events for a specific correlation_id.
|
|
91
|
+
|
|
92
|
+
Parameters
|
|
93
|
+
----------
|
|
94
|
+
correlation_id : str
|
|
95
|
+
The correlation_id to wait for
|
|
96
|
+
timeout : float, optional
|
|
97
|
+
The timeout in seconds
|
|
98
|
+
|
|
99
|
+
Returns
|
|
100
|
+
-------
|
|
101
|
+
list
|
|
102
|
+
The events for the correlation_id
|
|
103
|
+
"""
|
|
104
|
+
if correlation_id not in self.futures:
|
|
105
|
+
self.futures[correlation_id] = asyncio.Future()
|
|
106
|
+
|
|
107
|
+
# If we already have all needed events, return them
|
|
108
|
+
if correlation_id in self.results:
|
|
109
|
+
event_types_captured = [type(e) for e in self.results.get(correlation_id, [])]
|
|
110
|
+
if all([event_type in event_types_captured for event_type in self.event_types_needed]):
|
|
111
|
+
return self.results[correlation_id]
|
|
112
|
+
|
|
113
|
+
# Otherwise, wait for the future to be set
|
|
114
|
+
try:
|
|
115
|
+
return await asyncio.wait_for(self.futures[correlation_id], timeout)
|
|
116
|
+
except asyncio.TimeoutError:
|
|
117
|
+
logger.warning(f"Timeout waiting for events for correlation_id {correlation_id}")
|
|
118
|
+
return self.results.get(correlation_id, [])
|
|
119
|
+
|
|
120
|
+
async def receive_event_async(self, event: Event) -> list:
|
|
121
|
+
"""
|
|
122
|
+
Receive an event and process it if all needed events are available.
|
|
123
|
+
|
|
124
|
+
Parameters
|
|
125
|
+
----------
|
|
126
|
+
event : Event
|
|
127
|
+
The event to process
|
|
128
|
+
|
|
129
|
+
Returns
|
|
130
|
+
-------
|
|
131
|
+
list
|
|
132
|
+
The events to be processed next
|
|
133
|
+
"""
|
|
134
|
+
# First capture the event
|
|
135
|
+
await self._capture_results_if_needed(event)
|
|
136
|
+
|
|
137
|
+
# Then check if we have all needed events
|
|
138
|
+
event_types_captured = [type(e) for e in self.results.get(event.correlation_id, [])]
|
|
139
|
+
finished = all([event_type in event_types_captured for event_type in self.event_types_needed])
|
|
140
|
+
|
|
141
|
+
# If we have all needed events, process them
|
|
142
|
+
if finished:
|
|
143
|
+
return await self.process_events(await self._get_and_reset_results(event))
|
|
144
|
+
|
|
145
|
+
return []
|
|
146
|
+
|
|
147
|
+
async def process_events(self, events):
|
|
148
|
+
"""
|
|
149
|
+
Process a list of events.
|
|
150
|
+
This method should be overridden by subclasses.
|
|
151
|
+
|
|
152
|
+
Parameters
|
|
153
|
+
----------
|
|
154
|
+
events : list
|
|
155
|
+
The events to process
|
|
156
|
+
|
|
157
|
+
Returns
|
|
158
|
+
-------
|
|
159
|
+
list
|
|
160
|
+
The events to be processed next
|
|
161
|
+
"""
|
|
162
|
+
return []
|