mojentic 0.6.2__py3-none-any.whl → 0.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. _examples/async_dispatcher_example.py +241 -0
  2. _examples/async_llm_example.py +236 -0
  3. _examples/broker_as_tool.py +13 -10
  4. _examples/coding_file_tool.py +170 -77
  5. _examples/file_tool.py +5 -3
  6. mojentic/__init__.py +2 -7
  7. mojentic/agents/__init__.py +11 -2
  8. mojentic/agents/async_aggregator_agent.py +162 -0
  9. mojentic/agents/async_aggregator_agent_spec.py +227 -0
  10. mojentic/agents/async_llm_agent.py +197 -0
  11. mojentic/agents/async_llm_agent_spec.py +166 -0
  12. mojentic/agents/base_async_agent.py +27 -0
  13. mojentic/async_dispatcher.py +134 -0
  14. mojentic/async_dispatcher_spec.py +244 -0
  15. mojentic/context/__init__.py +4 -0
  16. mojentic/llm/__init__.py +14 -2
  17. mojentic/llm/gateways/__init__.py +22 -0
  18. mojentic/llm/gateways/models.py +3 -3
  19. mojentic/llm/gateways/ollama.py +4 -4
  20. mojentic/llm/gateways/openai.py +3 -3
  21. mojentic/llm/gateways/openai_messages_adapter.py +8 -4
  22. mojentic/llm/llm_broker.py +4 -4
  23. mojentic/llm/message_composers.py +1 -1
  24. mojentic/llm/registry/__init__.py +6 -0
  25. mojentic/llm/registry/populate_registry_from_ollama.py +13 -12
  26. mojentic/llm/tools/__init__.py +18 -0
  27. mojentic/llm/tools/date_resolver.py +5 -2
  28. mojentic/llm/tools/ephemeral_task_manager/__init__.py +8 -8
  29. mojentic/llm/tools/file_manager.py +603 -42
  30. mojentic/llm/tools/file_manager_spec.py +723 -0
  31. mojentic/llm/tools/tool_wrapper.py +7 -3
  32. mojentic/tracer/__init__.py +8 -3
  33. {mojentic-0.6.2.dist-info → mojentic-0.7.2.dist-info}/METADATA +4 -2
  34. {mojentic-0.6.2.dist-info → mojentic-0.7.2.dist-info}/RECORD +37 -27
  35. {mojentic-0.6.2.dist-info → mojentic-0.7.2.dist-info}/WHEEL +0 -0
  36. {mojentic-0.6.2.dist-info → mojentic-0.7.2.dist-info}/licenses/LICENSE.md +0 -0
  37. {mojentic-0.6.2.dist-info → mojentic-0.7.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,241 @@
1
+ """
2
+ Example script demonstrating how to use the AsyncDispatcher, BaseAsyncAgent, and AsyncAggregatorAgent.
3
+
4
+ This script shows how to create and use asynchronous agents with the AsyncDispatcher.
5
+ """
6
+
7
+ import asyncio
8
+ from pathlib import Path
9
+ from typing import List
10
+
11
+ from pydantic import BaseModel, Field
12
+
13
+ from mojentic.agents.async_aggregator_agent import AsyncAggregatorAgent
14
+ from mojentic.agents.async_llm_agent import BaseAsyncLLMAgent
15
+ from mojentic.agents.base_async_agent import BaseAsyncAgent
16
+ from mojentic.async_dispatcher import AsyncDispatcher
17
+ from mojentic.event import Event, TerminateEvent
18
+ from mojentic.llm import LLMBroker
19
+ from mojentic.llm.gateways.models import LLMMessage, MessageRole
20
+ from mojentic.router import Router
21
+
22
+
23
+ # Define some example events
24
+ class TextEvent(Event):
25
+ text: str = Field(..., description="The text content of the event")
26
+
27
+
28
+ class AnalysisEvent(Event):
29
+ analysis: str = Field(..., description="The analysis of the text")
30
+
31
+
32
+ class SummaryEvent(Event):
33
+ summary: str = Field(..., description="The summary of the text")
34
+
35
+
36
+ class CombinedResultEvent(Event):
37
+ text: str = Field(..., description="The original text")
38
+ analysis: str = Field(..., description="The analysis of the text")
39
+ summary: str = Field(..., description="The summary of the text")
40
+ combined: str = Field(..., description="The combined result")
41
+
42
+
43
+ # Define response models for LLM agents
44
+ class AnalysisResponse(BaseModel):
45
+ analysis: str = Field(..., description="The analysis of the text")
46
+
47
+
48
+ class SummaryResponse(BaseModel):
49
+ summary: str = Field(..., description="The summary of the text")
50
+
51
+
52
+ class CombinedResponse(BaseModel):
53
+ combined: str = Field(..., description="The combined result of the analysis and summary")
54
+
55
+
56
+ # Define some example agents
57
+ class TextAnalyzerAgent(BaseAsyncLLMAgent):
58
+ """
59
+ An agent that analyzes text and produces an analysis.
60
+ """
61
+
62
+ def __init__(self, llm: LLMBroker):
63
+ super().__init__(
64
+ llm=llm,
65
+ behaviour="You are a text analysis assistant. Your job is to provide a detailed analysis of the given text, including key themes, structure, and notable elements.",
66
+ response_model=AnalysisResponse
67
+ )
68
+
69
+ async def receive_event_async(self, event: Event) -> List[Event]:
70
+ if isinstance(event, TextEvent):
71
+ prompt = f"""
72
+ Please analyze the following text in detail. Consider:
73
+ - Main themes and topics
74
+ - Structure and organization
75
+ - Key points and arguments
76
+ - Style and tone
77
+ - Intended audience
78
+ - Any notable or unique elements
79
+
80
+ Text to analyze:
81
+ {event.text[:1000]}... (text truncated for brevity)
82
+ """
83
+ response = await self.generate_response(prompt)
84
+ return [AnalysisEvent(source=type(self), correlation_id=event.correlation_id, analysis=response.analysis)]
85
+ return []
86
+
87
+
88
+ class TextSummarizerAgent(BaseAsyncLLMAgent):
89
+ """
90
+ An agent that summarizes text.
91
+ """
92
+
93
+ def __init__(self, llm: LLMBroker):
94
+ super().__init__(
95
+ llm=llm,
96
+ behaviour="You are a text summarization assistant. Your job is to provide concise, accurate summaries of texts while preserving the key information and main points.",
97
+ response_model=SummaryResponse
98
+ )
99
+
100
+ async def receive_event_async(self, event: Event) -> List[Event]:
101
+ if isinstance(event, TextEvent):
102
+ prompt = f"""
103
+ Please provide a concise summary of the following text. The summary should:
104
+ - Capture the main points and key information
105
+ - Be significantly shorter than the original text
106
+ - Maintain the original meaning and intent
107
+ - Be clear and coherent
108
+ - Exclude unnecessary details
109
+
110
+ Text to summarize:
111
+ {event.text[:1000]}... (text truncated for brevity)
112
+ """
113
+ response = await self.generate_response(prompt)
114
+ return [SummaryEvent(source=type(self), correlation_id=event.correlation_id, summary=response.summary)]
115
+ return []
116
+
117
+
118
+ class ResultCombinerAgent(AsyncAggregatorAgent):
119
+ """
120
+ An agent that combines the analysis and summary of a text using LLM.
121
+ """
122
+
123
+ def __init__(self, llm: LLMBroker):
124
+ super().__init__(event_types_needed=[TextEvent, AnalysisEvent, SummaryEvent])
125
+ self.llm = llm
126
+ self.behaviour = "You are an assistant that combines text analysis and summaries into a comprehensive report."
127
+ self.response_model = CombinedResponse
128
+
129
+ async def process_events(self, events):
130
+ # Extract the events
131
+ text_event = next((e for e in events if isinstance(e, TextEvent)), None)
132
+ analysis_event = next((e for e in events if isinstance(e, AnalysisEvent)), None)
133
+ summary_event = next((e for e in events if isinstance(e, SummaryEvent)), None)
134
+
135
+ if text_event and analysis_event and summary_event:
136
+ # Use LLM to create a sophisticated combination
137
+ prompt = f"""
138
+ I have analyzed and summarized a text. Please combine these into a comprehensive report.
139
+
140
+ Original Text (excerpt): {text_event.text[:300]}... (text truncated for brevity)
141
+
142
+ Analysis: {analysis_event.analysis}
143
+
144
+ Summary: {summary_event.summary}
145
+
146
+ Please create a well-structured, insightful report that integrates the analysis and summary,
147
+ highlighting the most important aspects of the text. The report should provide a comprehensive
148
+ understanding of the text's content, structure, and significance.
149
+ """
150
+ # Create a temporary LLM agent to generate the response
151
+ messages = [
152
+ LLMMessage(role=MessageRole.System, content=self.behaviour),
153
+ LLMMessage(role=MessageRole.User, content=prompt)
154
+ ]
155
+
156
+ # Generate the response
157
+ import asyncio
158
+ response_json = await asyncio.to_thread(
159
+ self.llm.generate_object,
160
+ messages=messages,
161
+ object_model=self.response_model
162
+ )
163
+
164
+ combined = response_json.combined
165
+
166
+ return [CombinedResultEvent(
167
+ source=type(self),
168
+ correlation_id=text_event.correlation_id,
169
+ text=text_event.text,
170
+ analysis=analysis_event.analysis,
171
+ summary=summary_event.summary,
172
+ combined=combined
173
+ )]
174
+ return []
175
+
176
+
177
+ class ResultOutputAgent(BaseAsyncAgent):
178
+ """
179
+ An agent that receives the CombinedResultEvent, outputs the result to the user,
180
+ and emits a TerminateEvent to exit the event loop.
181
+ """
182
+
183
+ async def receive_event_async(self, event: Event) -> List[Event]:
184
+ if isinstance(event, CombinedResultEvent):
185
+ # Output the result to the user
186
+ print("\n=== FINAL ANSWER ===")
187
+ print(event.combined)
188
+ print("===================\n")
189
+
190
+ # Emit a TerminateEvent to exit the event loop
191
+ return [TerminateEvent(source=type(self), correlation_id=event.correlation_id)]
192
+ return []
193
+
194
+
195
+ async def main():
196
+ """
197
+ Main function that demonstrates the usage of AsyncDispatcher and async agents.
198
+ """
199
+ # Initialize the LLM broker with the same model as in async_llm_example
200
+ llm = LLMBroker(model="qwen3:30b-a3b-q4_K_M")
201
+
202
+ # Create a router and register agents
203
+ router = Router()
204
+
205
+ # Create agents with LLM
206
+ analyzer = TextAnalyzerAgent(llm)
207
+ summarizer = TextSummarizerAgent(llm)
208
+ combiner = ResultCombinerAgent(llm)
209
+ output_agent = ResultOutputAgent()
210
+
211
+ # Register agents with the router
212
+ router.add_route(TextEvent, analyzer)
213
+ router.add_route(TextEvent, summarizer)
214
+ router.add_route(TextEvent, combiner)
215
+ router.add_route(AnalysisEvent, combiner)
216
+ router.add_route(SummaryEvent, combiner)
217
+ router.add_route(CombinedResultEvent, output_agent)
218
+
219
+ # Create and start the dispatcher
220
+ dispatcher = await AsyncDispatcher(router).start()
221
+
222
+ # Create a text event
223
+ with open(Path.cwd().parent.parent / "README.md", "r") as f:
224
+ text = f.read()
225
+ event = TextEvent(source=type("ExampleSource", (), {}), text=text)
226
+
227
+ # Dispatch the event
228
+ dispatcher.dispatch(event)
229
+
230
+ # Wait for all events in the queue to be processed
231
+ print("Waiting for all events to be processed...")
232
+ queue_empty = await dispatcher.wait_for_empty_queue(timeout=10)
233
+ if not queue_empty:
234
+ print("Warning: Not all events were processed within the timeout period.")
235
+
236
+ # Stop the dispatcher
237
+ await dispatcher.stop()
238
+
239
+
240
+ if __name__ == "__main__":
241
+ asyncio.run(main())
@@ -0,0 +1,236 @@
1
+ """
2
+ Example script demonstrating how to use the AsyncDispatcher with BaseAsyncLLMAgent.
3
+
4
+ This script shows how to create and use asynchronous LLM agents with the AsyncDispatcher.
5
+ """
6
+
7
+ import asyncio
8
+ from typing import List, Optional
9
+
10
+ from pydantic import BaseModel, Field
11
+
12
+ from mojentic.agents.async_aggregator_agent import AsyncAggregatorAgent
13
+ from mojentic.agents.async_llm_agent import BaseAsyncLLMAgent
14
+ from mojentic.async_dispatcher import AsyncDispatcher
15
+ from mojentic.context.shared_working_memory import SharedWorkingMemory
16
+ from mojentic.event import Event
17
+ from mojentic.llm import LLMBroker
18
+ from mojentic.router import Router
19
+
20
+
21
+ # Define some example events
22
+ class QuestionEvent(Event):
23
+ question: str = Field(..., description="The question to answer")
24
+
25
+
26
+ class FactCheckEvent(Event):
27
+ question: str = Field(..., description="The original question")
28
+ facts: List[str] = Field(..., description="The facts related to the question")
29
+
30
+
31
+ class AnswerEvent(Event):
32
+ question: str = Field(..., description="The original question")
33
+ answer: str = Field(..., description="The answer to the question")
34
+ confidence: float = Field(..., description="The confidence level of the answer (0-1)")
35
+
36
+
37
+ class FinalAnswerEvent(Event):
38
+ question: str = Field(..., description="The original question")
39
+ answer: str = Field(..., description="The final answer to the question")
40
+ facts: List[str] = Field(..., description="The facts used to answer the question")
41
+ confidence: float = Field(..., description="The confidence level of the answer (0-1)")
42
+
43
+
44
+ # Define response models for LLM agents
45
+ class FactCheckResponse(BaseModel):
46
+ facts: List[str] = Field(..., description="The facts related to the question")
47
+
48
+
49
+ class AnswerResponse(BaseModel):
50
+ answer: str = Field(..., description="The answer to the question")
51
+ confidence: float = Field(..., description="The confidence level of the answer (0-1)")
52
+
53
+
54
+ # Define some example agents
55
+ class FactCheckerAgent(BaseAsyncLLMAgent):
56
+ """
57
+ An agent that checks facts related to a question.
58
+ """
59
+ def __init__(self, llm: LLMBroker):
60
+ super().__init__(
61
+ llm=llm,
62
+ behaviour="You are a fact-checking assistant. Your job is to provide relevant facts about a question.",
63
+ response_model=FactCheckResponse
64
+ )
65
+
66
+ async def receive_event_async(self, event: Event) -> List[Event]:
67
+ if isinstance(event, QuestionEvent):
68
+ prompt = f"Please provide relevant facts about the following question: {event.question}"
69
+ response = await self.generate_response(prompt)
70
+ return [FactCheckEvent(
71
+ source=type(self),
72
+ correlation_id=event.correlation_id,
73
+ question=event.question,
74
+ facts=response.facts
75
+ )]
76
+ return []
77
+
78
+
79
+ class AnswerGeneratorAgent(BaseAsyncLLMAgent):
80
+ """
81
+ An agent that generates an answer to a question.
82
+ """
83
+ def __init__(self, llm: LLMBroker):
84
+ super().__init__(
85
+ llm=llm,
86
+ behaviour="You are a question-answering assistant. Your job is to provide accurate answers to questions.",
87
+ response_model=AnswerResponse
88
+ )
89
+
90
+ async def receive_event_async(self, event: Event) -> List[Event]:
91
+ if isinstance(event, QuestionEvent):
92
+ prompt = f"Please answer the following question: {event.question}"
93
+ response = await self.generate_response(prompt)
94
+ return [AnswerEvent(
95
+ source=type(self),
96
+ correlation_id=event.correlation_id,
97
+ question=event.question,
98
+ answer=response.answer,
99
+ confidence=response.confidence
100
+ )]
101
+ return []
102
+
103
+
104
+ class FinalAnswerAgent(AsyncAggregatorAgent):
105
+ """
106
+ An agent that combines facts and answers to produce a final answer.
107
+ """
108
+ def __init__(self, llm: LLMBroker):
109
+ super().__init__(event_types_needed=[FactCheckEvent, AnswerEvent])
110
+ self.llm = llm
111
+ self.final_answer_event = None
112
+
113
+ async def receive_event_async(self, event: Event) -> list:
114
+ print(f"FinalAnswerAgent received event: {type(event).__name__}")
115
+ result = await super().receive_event_async(event)
116
+ # Store any FinalAnswerEvent created
117
+ for e in result:
118
+ if isinstance(e, FinalAnswerEvent):
119
+ self.final_answer_event = e
120
+ return result
121
+
122
+ async def process_events(self, events):
123
+ print(f"FinalAnswerAgent processing events: {[type(e).__name__ for e in events]}")
124
+ # Extract the events
125
+ fact_check_event = next((e for e in events if isinstance(e, FactCheckEvent)), None)
126
+ answer_event = next((e for e in events if isinstance(e, AnswerEvent)), None)
127
+
128
+ if fact_check_event and answer_event:
129
+ print("FinalAnswerAgent has both FactCheckEvent and AnswerEvent")
130
+ # In a real implementation, we might use the LLM to refine the answer based on the facts
131
+ # For this example, we'll just combine them
132
+
133
+ # Adjust confidence based on facts
134
+ confidence = answer_event.confidence
135
+ if len(fact_check_event.facts) > 0:
136
+ # Increase confidence if we have facts
137
+ confidence = min(1.0, confidence + 0.1)
138
+
139
+ final_answer_event = FinalAnswerEvent(
140
+ source=type(self),
141
+ correlation_id=fact_check_event.correlation_id,
142
+ question=fact_check_event.question,
143
+ answer=answer_event.answer,
144
+ facts=fact_check_event.facts,
145
+ confidence=confidence
146
+ )
147
+ print(f"FinalAnswerAgent created FinalAnswerEvent: {final_answer_event}")
148
+ self.final_answer_event = final_answer_event
149
+ return [final_answer_event]
150
+ print("FinalAnswerAgent missing either FactCheckEvent or AnswerEvent")
151
+ return []
152
+
153
+ async def get_final_answer(self, correlation_id, timeout=30):
154
+ """
155
+ Get the final answer for a specific correlation_id.
156
+
157
+ Parameters
158
+ ----------
159
+ correlation_id : str
160
+ The correlation_id to get the final answer for
161
+ timeout : float, optional
162
+ The timeout in seconds
163
+
164
+ Returns
165
+ -------
166
+ FinalAnswerEvent or None
167
+ The final answer event, or None if not found
168
+ """
169
+ # First wait for all needed events
170
+ await self.wait_for_events(correlation_id, timeout)
171
+
172
+ # Then check if we have a final answer
173
+ if self.final_answer_event and self.final_answer_event.correlation_id == correlation_id:
174
+ return self.final_answer_event
175
+
176
+ return None
177
+
178
+
179
+ async def main():
180
+ """
181
+ Main function that demonstrates the usage of AsyncDispatcher with async LLM agents.
182
+ """
183
+ # Initialize the LLM broker with your preferred model
184
+ llm = LLMBroker(model="qwen3:30b-a3b-q4_K_M")
185
+
186
+ # Create a router and register agents
187
+ router = Router()
188
+
189
+ # Create agents
190
+ fact_checker = FactCheckerAgent(llm)
191
+ answer_generator = AnswerGeneratorAgent(llm)
192
+ final_answer_agent = FinalAnswerAgent(llm)
193
+
194
+ # Register agents with the router
195
+ router.add_route(QuestionEvent, fact_checker)
196
+ router.add_route(QuestionEvent, answer_generator)
197
+ router.add_route(QuestionEvent, final_answer_agent)
198
+ router.add_route(FactCheckEvent, final_answer_agent)
199
+ router.add_route(AnswerEvent, final_answer_agent)
200
+
201
+ # Create and start the dispatcher
202
+ dispatcher = await AsyncDispatcher(router).start()
203
+
204
+ # Create a question event
205
+ question = "What is the capital of France?"
206
+ event = QuestionEvent(source=type("ExampleSource", (), {}), question=question)
207
+
208
+ # Dispatch the event
209
+ print("Dispatching question event")
210
+ dispatcher.dispatch(event)
211
+
212
+ # Give the dispatcher a moment to start processing the event
213
+ print("Waiting for dispatcher to start processing")
214
+ await asyncio.sleep(0.1)
215
+
216
+ # Wait for the final answer from the FinalAnswerAgent
217
+ print("Waiting for final answer from FinalAnswerAgent")
218
+ final_answer_event = await final_answer_agent.get_final_answer(event.correlation_id, timeout=30)
219
+
220
+ # Print the final answer
221
+ if final_answer_event:
222
+ print(f"Question: {final_answer_event.question}")
223
+ print(f"Answer: {final_answer_event.answer}")
224
+ print(f"Confidence: {final_answer_event.confidence}")
225
+ print("Facts:")
226
+ for fact in final_answer_event.facts:
227
+ print(f" - {fact}")
228
+ else:
229
+ print("No FinalAnswerEvent found")
230
+
231
+ # Stop the dispatcher
232
+ await dispatcher.stop()
233
+
234
+
235
+ if __name__ == "__main__":
236
+ asyncio.run(main())
@@ -2,7 +2,7 @@ import os
2
2
  from mojentic.agents.base_llm_agent import BaseLLMAgent
3
3
  from mojentic.llm.llm_broker import LLMBroker
4
4
  from mojentic.llm.tools.date_resolver import ResolveDateTool
5
- from mojentic.llm.tools.file_manager import FileManager, ListFilesTool, ReadFileTool, WriteFileTool
5
+ from mojentic.llm.tools.file_manager import FileManager, ListFilesTool, ReadFileTool, WriteFileTool, FilesystemGateway
6
6
  from mojentic.llm.tools.tool_wrapper import ToolWrapper
7
7
 
8
8
  #
@@ -15,19 +15,22 @@ temporal_specialist = BaseLLMAgent(
15
15
  behaviour="You are a historian and sociologist who focuses on sorting out temporal events, determining what happened or will happen when."
16
16
  )
17
17
 
18
+ if not os.path.exists("local"):
19
+ os.mkdir("local")
20
+
21
+ # Create a filesystem gateway for the local directory
22
+ fs = FilesystemGateway(base_path="local")
23
+
18
24
  knowledge_specialist = BaseLLMAgent(
19
25
  llm=LLMBroker(model="llama3.3-70b-32k"),
20
26
  tools=[
21
- ListFilesTool(path="local"),
22
- ReadFileTool(path="local"),
23
- WriteFileTool(path="local"),
27
+ ListFilesTool(fs),
28
+ ReadFileTool(fs),
29
+ WriteFileTool(fs),
24
30
  ],
25
31
  behaviour="You are a knowledge management agent who focuses on sorting out facts and information, able to organize elemental ideas and make connections between them. You can list files to find out where you stored information, read files to review that information, and write files to store that information for later retrieval."
26
32
  )
27
33
 
28
- if not os.path.exists("local"):
29
- os.mkdir("local")
30
-
31
34
 
32
35
 
33
36
  coordinator = BaseLLMAgent(
@@ -42,14 +45,14 @@ coordinator = BaseLLMAgent(
42
45
  result = coordinator.generate_response("""
43
46
 
44
47
  I have several things I need to do this week:
45
-
48
+
46
49
  - On Monday, I need to ensure that I have called Scotiabank and ordered replacement cards for my current, credit, and line of credit accounts.
47
50
  - On Wednesday, I need to drive into Toronto for work. While in Toronto I need to pick up razors. I need to make sure I see Gregg, Britney and Vikram.
48
51
  - On Thursday, I need to ensure I'm up by 7am so that I can be showered and ready for work by 9.
49
52
  - On Friday, I need to ensure that I have my laundry done and my bags packed for my trip to Ottawa.
50
-
53
+
51
54
  Create me a markdown file for each day of the week, named "YYYY-MM-DD-ToDo.md" where the date is the date of that day.
52
55
  Make a list of to-do items in the markdown file, and add a section for the day's daily notes that I can fill out each day.
53
56
  """)
54
57
 
55
- print(result)
58
+ print(result)