mcal-ai-autogen 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Shiva
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,246 @@
1
+ Metadata-Version: 2.4
2
+ Name: mcal-ai-autogen
3
+ Version: 0.2.0
4
+ Summary: Microsoft AutoGen integration for MCAL - Goal-aware memory for multi-agent systems
5
+ Author: MCAL Team
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/Shivakoreddi/mcal-ai
8
+ Project-URL: Documentation, https://github.com/Shivakoreddi/mcal-ai/tree/main/packages/mcal-autogen
9
+ Project-URL: Repository, https://github.com/Shivakoreddi/mcal-ai
10
+ Keywords: mcal,autogen,memory,llm,agents,goal-aware,multi-agent
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
19
+ Requires-Python: >=3.10
20
+ Description-Content-Type: text/markdown
21
+ License-File: LICENSE
22
+ Requires-Dist: mcal-ai>=0.1.0
23
+ Provides-Extra: autogen
24
+ Requires-Dist: autogen-core>=0.4.0; extra == "autogen"
25
+ Requires-Dist: autogen-agentchat>=0.4.0; extra == "autogen"
26
+ Provides-Extra: dev
27
+ Requires-Dist: pytest>=7.0; extra == "dev"
28
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
29
+ Requires-Dist: pytest-cov>=4.0; extra == "dev"
30
+ Provides-Extra: all
31
+ Requires-Dist: mcal-autogen[autogen,dev]; extra == "all"
32
+ Dynamic: license-file
33
+
34
+ # mcal-autogen
35
+
36
+ Microsoft AutoGen integration for MCAL (Multi-turn Conversation Abstraction Layer), bringing goal-aware memory to AutoGen agents.
37
+
38
+ ## Installation
39
+
40
+ ```bash
41
+ pip install mcal-autogen
42
+
43
+ # With AutoGen dependencies
44
+ pip install mcal-autogen[autogen]
45
+ ```
46
+
47
+ ## Quick Start
48
+
49
+ ```python
50
+ from autogen_agentchat.agents import AssistantAgent
51
+ from autogen_ext.models.openai import OpenAIChatCompletionClient
52
+ from mcal import MCAL
53
+ from mcal_autogen import MCALMemory
54
+
55
+ # Initialize MCAL with your project goal
56
+ mcal = MCAL(goal="Help users build data pipelines")
57
+
58
+ # Create MCAL-backed memory
59
+ memory = MCALMemory(mcal, user_id="user_123")
60
+
61
+ # Create an agent with MCAL memory
62
+ model_client = OpenAIChatCompletionClient(model="gpt-4")
63
+ agent = AssistantAgent(
64
+ name="data_engineer",
65
+ model_client=model_client,
66
+ memory=[memory],
67
+ system_message="You are a helpful data engineering assistant.",
68
+ )
69
+
70
+ # Use the agent - MCAL automatically tracks context and decisions
71
+ result = await agent.run(task="How should I set up my ETL pipeline?")
72
+ ```
73
+
74
+ ## Features
75
+
76
+ ### Goal-Aware Memory
77
+
78
+ MCAL's unique value is understanding your project's goals and maintaining context across conversations:
79
+
80
+ ```python
81
+ # Initialize with a clear goal
82
+ mcal = MCAL(goal="Build a real-time fraud detection system")
83
+ memory = MCALMemory(mcal)
84
+
85
+ # Add relevant context
86
+ from autogen_core.memory import MemoryContent
87
+ await memory.add(MemoryContent(
88
+ content="We decided to use Kafka for streaming",
89
+ mime_type="text/plain",
90
+ metadata={"category": "architecture", "decision": True}
91
+ ))
92
+
93
+ # Query returns goal-relevant results
94
+ results = await memory.query("What messaging system should I use?")
95
+ # Returns Kafka decision with goal-relevance scoring
96
+ ```
97
+
98
+ ### Decision Tracking
99
+
100
+ Track architectural and project decisions automatically:
101
+
102
+ ```python
103
+ memory = MCALMemory(
104
+ mcal,
105
+ enable_goal_tracking=True, # Extract goals from content
106
+ include_decisions=True, # Include decisions in search
107
+ )
108
+
109
+ # Decisions are automatically tracked
110
+ await memory.add(MemoryContent(
111
+ content="After evaluating options, we chose PostgreSQL for its JSON support",
112
+ mime_type="text/plain"
113
+ ))
114
+
115
+ # Query finds relevant decisions
116
+ results = await memory.query("database selection")
117
+ ```
118
+
119
+ ### User Isolation
120
+
121
+ Support multi-tenant scenarios with user isolation:
122
+
123
+ ```python
124
+ # Create separate memories for different users
125
+ user1_memory = MCALMemory(mcal, user_id="alice")
126
+ user2_memory = MCALMemory(mcal, user_id="bob")
127
+
128
+ # Each user has isolated memory
129
+ await user1_memory.add(MemoryContent(content="Alice prefers Python"))
130
+ await user2_memory.add(MemoryContent(content="Bob prefers Rust"))
131
+
132
+ # Queries only return user-specific results
133
+ results = await user1_memory.query("language preference")
134
+ # Only returns Alice's preference
135
+ ```
136
+
137
+ ### TTL Support
138
+
139
+ Configure time-to-live for memory entries:
140
+
141
+ ```python
142
+ memory = MCALMemory(mcal, default_ttl_minutes=60) # 1 hour default
143
+
144
+ # Or per-entry TTL via metadata
145
+ await memory.add(MemoryContent(
146
+ content="Temporary context",
147
+ mime_type="text/plain",
148
+ metadata={"ttl_minutes": 15} # 15 minute TTL
149
+ ))
150
+ ```
151
+
152
+ ## Integration with AutoGen Features
153
+
154
+ ### With AssistantAgent
155
+
156
+ ```python
157
+ from autogen_agentchat.agents import AssistantAgent
158
+
159
+ agent = AssistantAgent(
160
+ name="assistant",
161
+ model_client=model_client,
162
+ memory=[memory], # MCAL memory integrates seamlessly
163
+ )
164
+ ```
165
+
166
+ ### With Teams
167
+
168
+ ```python
169
+ from autogen_agentchat.teams import RoundRobinGroupChat
170
+
171
+ # Share MCAL memory across team members
172
+ shared_memory = MCALMemory(mcal, user_id="team_alpha")
173
+
174
+ coder = AssistantAgent("coder", model_client=model_client, memory=[shared_memory])
175
+ reviewer = AssistantAgent("reviewer", model_client=model_client, memory=[shared_memory])
176
+
177
+ team = RoundRobinGroupChat([coder, reviewer])
178
+ ```
179
+
180
+ ### Context Window Management
181
+
182
+ MCAL automatically manages context relevance:
183
+
184
+ ```python
185
+ memory = MCALMemory(
186
+ mcal,
187
+ max_results=10, # Limit results per query
188
+ score_threshold=0.5, # Minimum relevance score
189
+ )
190
+
191
+ # update_context adds relevant memories to the agent's context
192
+ result = await memory.update_context(model_context)
193
+ ```
194
+
195
+ ## API Reference
196
+
197
+ ### MCALMemory
198
+
199
+ ```python
200
+ class MCALMemory(Memory):
201
+ def __init__(
202
+ self,
203
+ mcal: MCAL,
204
+ user_id: str = "default",
205
+ name: str = "mcal_memory",
206
+ max_results: int = 10,
207
+ score_threshold: float = 0.0,
208
+ default_ttl_minutes: Optional[float] = None,
209
+ enable_goal_tracking: bool = True,
210
+ include_decisions: bool = True,
211
+ ):
212
+ """
213
+ Initialize MCAL-backed memory for AutoGen.
214
+
215
+ Args:
216
+ mcal: Initialized MCAL instance
217
+ user_id: User identifier for memory isolation
218
+ name: Memory instance name
219
+ max_results: Maximum results to return from queries
220
+ score_threshold: Minimum relevance score (0-1)
221
+ default_ttl_minutes: Default TTL in minutes
222
+ enable_goal_tracking: Extract goals from content
223
+ include_decisions: Include decisions in search results
224
+ """
225
+ ```
226
+
227
+ ### Key Methods
228
+
229
+ | Method | Description |
230
+ |--------|-------------|
231
+ | `add(content)` | Add content to memory |
232
+ | `query(query)` | Search for relevant memories |
233
+ | `update_context(model_context)` | Update agent context with memories |
234
+ | `clear()` | Clear all memory entries |
235
+ | `close()` | Cleanup resources |
236
+
237
+ ## Requirements
238
+
239
+ - Python >= 3.10
240
+ - mcal >= 0.1.0
241
+ - autogen-core >= 0.4.0 (optional)
242
+ - autogen-agentchat >= 0.4.0 (optional)
243
+
244
+ ## License
245
+
246
+ MIT License
@@ -0,0 +1,213 @@
1
+ # mcal-autogen
2
+
3
+ Microsoft AutoGen integration for MCAL (Multi-turn Conversation Abstraction Layer), bringing goal-aware memory to AutoGen agents.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install mcal-autogen
9
+
10
+ # With AutoGen dependencies
11
+ pip install mcal-autogen[autogen]
12
+ ```
13
+
14
+ ## Quick Start
15
+
16
+ ```python
17
+ from autogen_agentchat.agents import AssistantAgent
18
+ from autogen_ext.models.openai import OpenAIChatCompletionClient
19
+ from mcal import MCAL
20
+ from mcal_autogen import MCALMemory
21
+
22
+ # Initialize MCAL with your project goal
23
+ mcal = MCAL(goal="Help users build data pipelines")
24
+
25
+ # Create MCAL-backed memory
26
+ memory = MCALMemory(mcal, user_id="user_123")
27
+
28
+ # Create an agent with MCAL memory
29
+ model_client = OpenAIChatCompletionClient(model="gpt-4")
30
+ agent = AssistantAgent(
31
+ name="data_engineer",
32
+ model_client=model_client,
33
+ memory=[memory],
34
+ system_message="You are a helpful data engineering assistant.",
35
+ )
36
+
37
+ # Use the agent - MCAL automatically tracks context and decisions
38
+ result = await agent.run(task="How should I set up my ETL pipeline?")
39
+ ```
40
+
41
+ ## Features
42
+
43
+ ### Goal-Aware Memory
44
+
45
+ MCAL's unique value is understanding your project's goals and maintaining context across conversations:
46
+
47
+ ```python
48
+ # Initialize with a clear goal
49
+ mcal = MCAL(goal="Build a real-time fraud detection system")
50
+ memory = MCALMemory(mcal)
51
+
52
+ # Add relevant context
53
+ from autogen_core.memory import MemoryContent
54
+ await memory.add(MemoryContent(
55
+ content="We decided to use Kafka for streaming",
56
+ mime_type="text/plain",
57
+ metadata={"category": "architecture", "decision": True}
58
+ ))
59
+
60
+ # Query returns goal-relevant results
61
+ results = await memory.query("What messaging system should I use?")
62
+ # Returns Kafka decision with goal-relevance scoring
63
+ ```
64
+
65
+ ### Decision Tracking
66
+
67
+ Track architectural and project decisions automatically:
68
+
69
+ ```python
70
+ memory = MCALMemory(
71
+ mcal,
72
+ enable_goal_tracking=True, # Extract goals from content
73
+ include_decisions=True, # Include decisions in search
74
+ )
75
+
76
+ # Decisions are automatically tracked
77
+ await memory.add(MemoryContent(
78
+ content="After evaluating options, we chose PostgreSQL for its JSON support",
79
+ mime_type="text/plain"
80
+ ))
81
+
82
+ # Query finds relevant decisions
83
+ results = await memory.query("database selection")
84
+ ```
85
+
86
+ ### User Isolation
87
+
88
+ Support multi-tenant scenarios with user isolation:
89
+
90
+ ```python
91
+ # Create separate memories for different users
92
+ user1_memory = MCALMemory(mcal, user_id="alice")
93
+ user2_memory = MCALMemory(mcal, user_id="bob")
94
+
95
+ # Each user has isolated memory
96
+ await user1_memory.add(MemoryContent(content="Alice prefers Python"))
97
+ await user2_memory.add(MemoryContent(content="Bob prefers Rust"))
98
+
99
+ # Queries only return user-specific results
100
+ results = await user1_memory.query("language preference")
101
+ # Only returns Alice's preference
102
+ ```
103
+
104
+ ### TTL Support
105
+
106
+ Configure time-to-live for memory entries:
107
+
108
+ ```python
109
+ memory = MCALMemory(mcal, default_ttl_minutes=60) # 1 hour default
110
+
111
+ # Or per-entry TTL via metadata
112
+ await memory.add(MemoryContent(
113
+ content="Temporary context",
114
+ mime_type="text/plain",
115
+ metadata={"ttl_minutes": 15} # 15 minute TTL
116
+ ))
117
+ ```
118
+
119
+ ## Integration with AutoGen Features
120
+
121
+ ### With AssistantAgent
122
+
123
+ ```python
124
+ from autogen_agentchat.agents import AssistantAgent
125
+
126
+ agent = AssistantAgent(
127
+ name="assistant",
128
+ model_client=model_client,
129
+ memory=[memory], # MCAL memory integrates seamlessly
130
+ )
131
+ ```
132
+
133
+ ### With Teams
134
+
135
+ ```python
136
+ from autogen_agentchat.teams import RoundRobinGroupChat
137
+
138
+ # Share MCAL memory across team members
139
+ shared_memory = MCALMemory(mcal, user_id="team_alpha")
140
+
141
+ coder = AssistantAgent("coder", model_client=model_client, memory=[shared_memory])
142
+ reviewer = AssistantAgent("reviewer", model_client=model_client, memory=[shared_memory])
143
+
144
+ team = RoundRobinGroupChat([coder, reviewer])
145
+ ```
146
+
147
+ ### Context Window Management
148
+
149
+ MCAL automatically manages context relevance:
150
+
151
+ ```python
152
+ memory = MCALMemory(
153
+ mcal,
154
+ max_results=10, # Limit results per query
155
+ score_threshold=0.5, # Minimum relevance score
156
+ )
157
+
158
+ # update_context adds relevant memories to the agent's context
159
+ result = await memory.update_context(model_context)
160
+ ```
161
+
162
+ ## API Reference
163
+
164
+ ### MCALMemory
165
+
166
+ ```python
167
+ class MCALMemory(Memory):
168
+ def __init__(
169
+ self,
170
+ mcal: MCAL,
171
+ user_id: str = "default",
172
+ name: str = "mcal_memory",
173
+ max_results: int = 10,
174
+ score_threshold: float = 0.0,
175
+ default_ttl_minutes: Optional[float] = None,
176
+ enable_goal_tracking: bool = True,
177
+ include_decisions: bool = True,
178
+ ):
179
+ """
180
+ Initialize MCAL-backed memory for AutoGen.
181
+
182
+ Args:
183
+ mcal: Initialized MCAL instance
184
+ user_id: User identifier for memory isolation
185
+ name: Memory instance name
186
+ max_results: Maximum results to return from queries
187
+ score_threshold: Minimum relevance score (0-1)
188
+ default_ttl_minutes: Default TTL in minutes
189
+ enable_goal_tracking: Extract goals from content
190
+ include_decisions: Include decisions in search results
191
+ """
192
+ ```
193
+
194
+ ### Key Methods
195
+
196
+ | Method | Description |
197
+ |--------|-------------|
198
+ | `add(content)` | Add content to memory |
199
+ | `query(query)` | Search for relevant memories |
200
+ | `update_context(model_context)` | Update agent context with memories |
201
+ | `clear()` | Clear all memory entries |
202
+ | `close()` | Cleanup resources |
203
+
204
+ ## Requirements
205
+
206
+ - Python >= 3.10
207
+ - mcal >= 0.1.0
208
+ - autogen-core >= 0.4.0 (optional)
209
+ - autogen-agentchat >= 0.4.0 (optional)
210
+
211
+ ## License
212
+
213
+ MIT License
@@ -0,0 +1,63 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "mcal-ai-autogen"
7
+ version = "0.2.0"
8
+ description = "Microsoft AutoGen integration for MCAL - Goal-aware memory for multi-agent systems"
9
+ readme = "README.md"
10
+ license = {text = "MIT"}
11
+ requires-python = ">=3.10"
12
+ authors = [
13
+ {name = "MCAL Team"}
14
+ ]
15
+ keywords = [
16
+ "mcal",
17
+ "autogen",
18
+ "memory",
19
+ "llm",
20
+ "agents",
21
+ "goal-aware",
22
+ "multi-agent"
23
+ ]
24
+ classifiers = [
25
+ "Development Status :: 4 - Beta",
26
+ "Intended Audience :: Developers",
27
+ "License :: OSI Approved :: MIT License",
28
+ "Programming Language :: Python :: 3",
29
+ "Programming Language :: Python :: 3.10",
30
+ "Programming Language :: Python :: 3.11",
31
+ "Programming Language :: Python :: 3.12",
32
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
33
+ ]
34
+
35
+ dependencies = [
36
+ "mcal-ai>=0.1.0",
37
+ ]
38
+
39
+ [project.optional-dependencies]
40
+ autogen = [
41
+ "autogen-core>=0.4.0",
42
+ "autogen-agentchat>=0.4.0",
43
+ ]
44
+ dev = [
45
+ "pytest>=7.0",
46
+ "pytest-asyncio>=0.21.0",
47
+ "pytest-cov>=4.0",
48
+ ]
49
+ all = [
50
+ "mcal-autogen[autogen,dev]",
51
+ ]
52
+
53
+ [project.urls]
54
+ Homepage = "https://github.com/Shivakoreddi/mcal-ai"
55
+ Documentation = "https://github.com/Shivakoreddi/mcal-ai/tree/main/packages/mcal-autogen"
56
+ Repository = "https://github.com/Shivakoreddi/mcal-ai"
57
+
58
+ [tool.setuptools.packages.find]
59
+ where = ["src"]
60
+
61
+ [tool.pytest.ini_options]
62
+ asyncio_mode = "auto"
63
+ testpaths = ["tests"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+