mmar-carl 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mmar-carl might be problematic. Click here for more details.
- mmar_carl/__init__.py +25 -0
- mmar_carl/chain.py +344 -0
- mmar_carl/executor.py +343 -0
- mmar_carl/llm.py +51 -0
- mmar_carl/models.py +219 -0
- mmar_carl-0.0.3.dist-info/METADATA +29 -0
- mmar_carl-0.0.3.dist-info/RECORD +9 -0
- mmar_carl-0.0.3.dist-info/WHEEL +4 -0
- mmar_carl-0.0.3.dist-info/licenses/LICENSE +21 -0
mmar_carl/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MAESTRO CARL (Collaborative Agent Reasoning Library)
|
|
3
|
+
|
|
4
|
+
A library for building chain-of-thought reasoning systems with DAG-based parallel execution.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .chain import ChainBuilder, ReasoningChain
|
|
8
|
+
from .executor import DAGExecutor
|
|
9
|
+
from .llm import LLMClient, LLMClientBase
|
|
10
|
+
from .models import Language, PromptTemplate, ReasoningContext, ReasoningResult, StepDescription, StepExecutionResult
|
|
11
|
+
|
|
12
|
+
__version__ = "0.0.3"
|
|
13
|
+
__all__ = [
|
|
14
|
+
"Language",
|
|
15
|
+
"StepDescription",
|
|
16
|
+
"ReasoningContext",
|
|
17
|
+
"StepExecutionResult",
|
|
18
|
+
"ReasoningResult",
|
|
19
|
+
"PromptTemplate",
|
|
20
|
+
"ReasoningChain",
|
|
21
|
+
"ChainBuilder",
|
|
22
|
+
"DAGExecutor",
|
|
23
|
+
"LLMClientBase",
|
|
24
|
+
"LLMClient",
|
|
25
|
+
]
|
mmar_carl/chain.py
ADDED
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main chain definition and execution API for CARL.
|
|
3
|
+
|
|
4
|
+
Provides the primary interface for defining and executing reasoning chains.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from .executor import DAGExecutor
|
|
11
|
+
from .models import PromptTemplate, ReasoningContext, ReasoningResult, StepDescription
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ReasoningChain:
|
|
15
|
+
"""
|
|
16
|
+
Main interface for defining and executing reasoning chains.
|
|
17
|
+
|
|
18
|
+
Provides a high-level API that combines chain definition with DAG execution.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
steps: list[StepDescription],
|
|
24
|
+
max_workers: int = 3,
|
|
25
|
+
prompt_template: PromptTemplate | None = None,
|
|
26
|
+
enable_progress: bool = False,
|
|
27
|
+
metadata: dict[str, Any] | None = None,
|
|
28
|
+
):
|
|
29
|
+
self.steps = steps
|
|
30
|
+
self.max_workers = max_workers
|
|
31
|
+
self.prompt_template = prompt_template or PromptTemplate()
|
|
32
|
+
self.enable_progress = enable_progress
|
|
33
|
+
self.metadata = metadata or {}
|
|
34
|
+
self._validate_steps()
|
|
35
|
+
self.executor = DAGExecutor(
|
|
36
|
+
max_workers=max_workers, prompt_template=self.prompt_template, enable_progress=enable_progress
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
def _validate_steps(self) -> None:
|
|
40
|
+
if not self.steps:
|
|
41
|
+
raise ValueError("Reasoning chain must have at least one step")
|
|
42
|
+
step_numbers = [step.number for step in self.steps]
|
|
43
|
+
|
|
44
|
+
# Check for duplicate step numbers
|
|
45
|
+
if len(step_numbers) != len(set(step_numbers)):
|
|
46
|
+
duplicates = [num for num in step_numbers if step_numbers.count(num) > 1]
|
|
47
|
+
raise ValueError(f"Duplicate step numbers found: {duplicates}")
|
|
48
|
+
|
|
49
|
+
# Check for missing dependencies
|
|
50
|
+
for step in self.steps:
|
|
51
|
+
for dep in step.dependencies:
|
|
52
|
+
if dep not in step_numbers:
|
|
53
|
+
raise ValueError(f"Step {step.number} depends on non-existent step {dep}")
|
|
54
|
+
|
|
55
|
+
# Check for cycles (basic validation)
|
|
56
|
+
self._check_for_cycles()
|
|
57
|
+
|
|
58
|
+
def _check_for_cycles(self) -> None:
|
|
59
|
+
"""
|
|
60
|
+
Basic cycle detection using dependency graph.
|
|
61
|
+
|
|
62
|
+
Raises:
|
|
63
|
+
ValueError: If cycles are detected
|
|
64
|
+
"""
|
|
65
|
+
visited = set()
|
|
66
|
+
rec_stack = set()
|
|
67
|
+
|
|
68
|
+
def visit(step_num: int) -> bool:
|
|
69
|
+
if step_num in rec_stack:
|
|
70
|
+
return True # Cycle detected
|
|
71
|
+
if step_num in visited:
|
|
72
|
+
return False
|
|
73
|
+
|
|
74
|
+
visited.add(step_num)
|
|
75
|
+
rec_stack.add(step_num)
|
|
76
|
+
|
|
77
|
+
# Visit dependencies
|
|
78
|
+
step = next(s for s in self.steps if s.number == step_num)
|
|
79
|
+
for dep in step.dependencies:
|
|
80
|
+
if visit(dep):
|
|
81
|
+
return True
|
|
82
|
+
|
|
83
|
+
rec_stack.remove(step_num)
|
|
84
|
+
return False
|
|
85
|
+
|
|
86
|
+
for step in self.steps:
|
|
87
|
+
if step.number not in visited:
|
|
88
|
+
if visit(step.number):
|
|
89
|
+
raise ValueError(f"Cycle detected involving step {step.number}")
|
|
90
|
+
|
|
91
|
+
async def execute_async(self, context: ReasoningContext) -> ReasoningResult:
|
|
92
|
+
"""
|
|
93
|
+
Execute the reasoning chain asynchronously.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
context: Reasoning context with input data and LLM client
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Complete reasoning result
|
|
100
|
+
"""
|
|
101
|
+
# Add chain metadata to context
|
|
102
|
+
context.metadata.update({"chain_steps": len(self.steps), "chain_metadata": self.metadata})
|
|
103
|
+
|
|
104
|
+
return await self.executor.execute(self.steps, context)
|
|
105
|
+
|
|
106
|
+
def execute(self, context: ReasoningContext) -> ReasoningResult:
|
|
107
|
+
"""
|
|
108
|
+
Execute the reasoning chain synchronously.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
context: Reasoning context with input data and LLM client
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Complete reasoning result
|
|
115
|
+
"""
|
|
116
|
+
try:
|
|
117
|
+
# Check if we're already in an event loop
|
|
118
|
+
_ = asyncio.get_running_loop()
|
|
119
|
+
except RuntimeError:
|
|
120
|
+
# No event loop running, safe to use asyncio.run
|
|
121
|
+
return asyncio.run(self.execute_async(context))
|
|
122
|
+
else:
|
|
123
|
+
# We're in an event loop, create a task and run it
|
|
124
|
+
import concurrent.futures
|
|
125
|
+
|
|
126
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
127
|
+
future = executor.submit(asyncio.run, self.execute_async(context))
|
|
128
|
+
return future.result()
|
|
129
|
+
|
|
130
|
+
def get_execution_plan(self) -> dict[str, Any]:
|
|
131
|
+
"""
|
|
132
|
+
Get the execution plan showing parallelization opportunities.
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
Dictionary describing the execution plan
|
|
136
|
+
"""
|
|
137
|
+
# Build dependency levels
|
|
138
|
+
levels = []
|
|
139
|
+
remaining_steps = self.steps.copy()
|
|
140
|
+
|
|
141
|
+
while remaining_steps:
|
|
142
|
+
current_level = []
|
|
143
|
+
for step in remaining_steps[:]:
|
|
144
|
+
if all(dep not in [s.number for s in remaining_steps] for dep in step.dependencies):
|
|
145
|
+
current_level.append(step)
|
|
146
|
+
remaining_steps.remove(step)
|
|
147
|
+
|
|
148
|
+
if current_level:
|
|
149
|
+
levels.append(
|
|
150
|
+
{
|
|
151
|
+
"level": len(levels) + 1,
|
|
152
|
+
"steps": [step.number for step in current_level],
|
|
153
|
+
"parallelizable": len(current_level) > 1,
|
|
154
|
+
"step_titles": [step.title for step in current_level],
|
|
155
|
+
}
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return {
|
|
159
|
+
"total_steps": len(self.steps),
|
|
160
|
+
"max_workers": self.max_workers,
|
|
161
|
+
"execution_levels": levels,
|
|
162
|
+
"estimated_parallel_batches": len(levels),
|
|
163
|
+
"parallelization_ratio": len([s for level in levels for s in level["steps"] if level["parallelizable"]])
|
|
164
|
+
/ len(self.steps)
|
|
165
|
+
if self.steps
|
|
166
|
+
else 0,
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
def get_step_dependencies(self) -> dict[int, list[int]]:
|
|
170
|
+
"""
|
|
171
|
+
Get a mapping of step dependencies.
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
Dictionary mapping step numbers to their dependencies
|
|
175
|
+
"""
|
|
176
|
+
return {step.number: step.dependencies.copy() for step in self.steps}
|
|
177
|
+
|
|
178
|
+
def get_steps_summary(self) -> list[dict[str, Any]]:
|
|
179
|
+
"""
|
|
180
|
+
Get a summary of all steps in the chain.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
List of step summaries
|
|
184
|
+
"""
|
|
185
|
+
return [
|
|
186
|
+
{
|
|
187
|
+
"number": step.number,
|
|
188
|
+
"title": step.title,
|
|
189
|
+
"aim": step.aim,
|
|
190
|
+
"dependencies": step.dependencies,
|
|
191
|
+
"entities": step.entities,
|
|
192
|
+
"has_dependencies": step.has_dependencies(),
|
|
193
|
+
}
|
|
194
|
+
for step in self.steps
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
class ChainBuilder:
|
|
199
|
+
"""
|
|
200
|
+
Builder pattern for constructing reasoning chains.
|
|
201
|
+
|
|
202
|
+
Provides a fluent interface for building complex reasoning chains.
|
|
203
|
+
"""
|
|
204
|
+
|
|
205
|
+
def __init__(self):
|
|
206
|
+
"""Initialize the chain builder."""
|
|
207
|
+
self.steps: list[StepDescription] = []
|
|
208
|
+
self.max_workers: int = 3
|
|
209
|
+
self.prompt_template: PromptTemplate | None = None
|
|
210
|
+
self.enable_progress: bool = False
|
|
211
|
+
self.metadata: dict[str, Any] = {}
|
|
212
|
+
|
|
213
|
+
def add_step(
|
|
214
|
+
self,
|
|
215
|
+
number: int,
|
|
216
|
+
title: str,
|
|
217
|
+
aim: str,
|
|
218
|
+
reasoning_questions: str,
|
|
219
|
+
stage_action: str,
|
|
220
|
+
example_reasoning: str,
|
|
221
|
+
dependencies: list[int] | None = None,
|
|
222
|
+
entities: list[str] | None = None,
|
|
223
|
+
) -> "ChainBuilder":
|
|
224
|
+
"""
|
|
225
|
+
Add a step to the chain.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
number: Step number
|
|
229
|
+
title: Step title
|
|
230
|
+
aim: Step objective
|
|
231
|
+
reasoning_questions: Key questions to answer
|
|
232
|
+
stage_action: Action to perform
|
|
233
|
+
example_reasoning: Example of expert reasoning
|
|
234
|
+
dependencies: List of step numbers this depends on
|
|
235
|
+
entities: Entities/concepts this works with
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
Self for method chaining
|
|
239
|
+
"""
|
|
240
|
+
step = StepDescription(
|
|
241
|
+
number=number,
|
|
242
|
+
title=title,
|
|
243
|
+
aim=aim,
|
|
244
|
+
reasoning_questions=reasoning_questions,
|
|
245
|
+
stage_action=stage_action,
|
|
246
|
+
example_reasoning=example_reasoning,
|
|
247
|
+
dependencies=dependencies or [],
|
|
248
|
+
entities=entities or [],
|
|
249
|
+
)
|
|
250
|
+
self.steps.append(step)
|
|
251
|
+
return self
|
|
252
|
+
|
|
253
|
+
def with_max_workers(self, max_workers: int) -> "ChainBuilder":
|
|
254
|
+
"""
|
|
255
|
+
Set maximum number of parallel workers.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
max_workers: Maximum workers
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
Self for method chaining
|
|
262
|
+
"""
|
|
263
|
+
self.max_workers = max_workers
|
|
264
|
+
return self
|
|
265
|
+
|
|
266
|
+
def with_prompt_template(self, template: PromptTemplate) -> "ChainBuilder":
|
|
267
|
+
"""
|
|
268
|
+
Set custom prompt template.
|
|
269
|
+
|
|
270
|
+
Args:
|
|
271
|
+
template: Prompt template to use
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
Self for method chaining
|
|
275
|
+
"""
|
|
276
|
+
self.prompt_template = template
|
|
277
|
+
return self
|
|
278
|
+
|
|
279
|
+
def with_progress(self, enable: bool = True) -> "ChainBuilder":
|
|
280
|
+
"""
|
|
281
|
+
Enable or disable progress tracking.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
enable: Whether to enable progress
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
Self for method chaining
|
|
288
|
+
"""
|
|
289
|
+
self.enable_progress = enable
|
|
290
|
+
return self
|
|
291
|
+
|
|
292
|
+
def with_metadata(self, **metadata) -> "ChainBuilder":
|
|
293
|
+
"""
|
|
294
|
+
Add metadata to the chain.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
**metadata: Metadata key-value pairs
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
Self for method chaining
|
|
301
|
+
"""
|
|
302
|
+
self.metadata.update(metadata)
|
|
303
|
+
return self
|
|
304
|
+
|
|
305
|
+
def build(self) -> ReasoningChain:
|
|
306
|
+
"""
|
|
307
|
+
Build the reasoning chain.
|
|
308
|
+
|
|
309
|
+
Returns:
|
|
310
|
+
Constructed reasoning chain
|
|
311
|
+
|
|
312
|
+
Raises:
|
|
313
|
+
ValueError: If chain configuration is invalid
|
|
314
|
+
"""
|
|
315
|
+
return ReasoningChain(
|
|
316
|
+
steps=self.steps,
|
|
317
|
+
max_workers=self.max_workers,
|
|
318
|
+
prompt_template=self.prompt_template,
|
|
319
|
+
enable_progress=self.enable_progress,
|
|
320
|
+
metadata=self.metadata,
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def create_chain_from_config(config: dict[str, Any]) -> ReasoningChain:
|
|
325
|
+
"""
|
|
326
|
+
Create a reasoning chain from a configuration dictionary.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
config: Configuration dictionary
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
Constructed reasoning chain
|
|
333
|
+
"""
|
|
334
|
+
steps = []
|
|
335
|
+
for step_config in config.get("steps", []):
|
|
336
|
+
step = StepDescription(**step_config)
|
|
337
|
+
steps.append(step)
|
|
338
|
+
|
|
339
|
+
return ReasoningChain(
|
|
340
|
+
steps=steps,
|
|
341
|
+
max_workers=config.get("max_workers", 3),
|
|
342
|
+
enable_progress=config.get("enable_progress", False),
|
|
343
|
+
metadata=config.get("metadata", {}),
|
|
344
|
+
)
|
mmar_carl/executor.py
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
"""
|
|
2
|
+
DAG execution engine for CARL reasoning chains.
|
|
3
|
+
|
|
4
|
+
Handles parallel execution of reasoning steps based on their dependencies.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import time
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from .models import Language, PromptTemplate, ReasoningContext, ReasoningResult, StepDescription, StepExecutionResult
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ExecutionNode:
|
|
17
|
+
"""
|
|
18
|
+
Represents a node in the execution DAG.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
step: StepDescription
|
|
22
|
+
dependencies: list["ExecutionNode"]
|
|
23
|
+
dependents: list["ExecutionNode"]
|
|
24
|
+
executed: bool = False
|
|
25
|
+
executing: bool = False
|
|
26
|
+
result: StepExecutionResult | None = None
|
|
27
|
+
|
|
28
|
+
def __post_init__(self):
|
|
29
|
+
if self.dependencies is None:
|
|
30
|
+
self.dependencies = []
|
|
31
|
+
if self.dependents is None:
|
|
32
|
+
self.dependents = []
|
|
33
|
+
|
|
34
|
+
def can_execute(self) -> bool:
|
|
35
|
+
"""Check if this node can be executed (all dependencies completed)."""
|
|
36
|
+
return all(dep.executed for dep in self.dependencies)
|
|
37
|
+
|
|
38
|
+
def is_ready(self) -> bool:
|
|
39
|
+
"""Check if this node is ready for execution (not executing or executed)."""
|
|
40
|
+
return not self.executing and not self.executed and self.can_execute()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class DAGExecutor:
|
|
44
|
+
"""
|
|
45
|
+
Executes reasoning chains as a Directed Acyclic Graph (DAG).
|
|
46
|
+
|
|
47
|
+
Automatically parallelizes execution where dependencies allow.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
max_workers: int = 3,
|
|
53
|
+
prompt_template: PromptTemplate | None = None,
|
|
54
|
+
enable_progress: bool = False,
|
|
55
|
+
):
|
|
56
|
+
"""
|
|
57
|
+
Initialize the DAG executor.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
max_workers: Maximum number of parallel executions
|
|
61
|
+
prompt_template: Template for generating prompts
|
|
62
|
+
enable_progress: Whether to enable progress tracking
|
|
63
|
+
"""
|
|
64
|
+
self.max_workers = max_workers
|
|
65
|
+
self.prompt_template = prompt_template or PromptTemplate()
|
|
66
|
+
self.enable_progress = enable_progress
|
|
67
|
+
self._execution_stats = {
|
|
68
|
+
"total_steps": 0,
|
|
69
|
+
"executed_steps": 0,
|
|
70
|
+
"failed_steps": 0,
|
|
71
|
+
"parallel_batches": 0,
|
|
72
|
+
"total_time": 0.0,
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
def build_execution_graph(self, steps: list[StepDescription]) -> list[ExecutionNode]:
|
|
76
|
+
"""
|
|
77
|
+
Build an execution graph from step descriptions.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
steps: list of step descriptions
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
list of execution nodes forming the DAG
|
|
84
|
+
"""
|
|
85
|
+
if not steps:
|
|
86
|
+
return []
|
|
87
|
+
|
|
88
|
+
# Create nodes
|
|
89
|
+
step_map: dict[int, ExecutionNode] = {}
|
|
90
|
+
for step in steps:
|
|
91
|
+
node = ExecutionNode(step=step, dependencies=[], dependents=[])
|
|
92
|
+
step_map[step.number] = node
|
|
93
|
+
|
|
94
|
+
# Build dependencies
|
|
95
|
+
for step in steps:
|
|
96
|
+
node = step_map[step.number]
|
|
97
|
+
for dep_number in step.dependencies:
|
|
98
|
+
if dep_number in step_map:
|
|
99
|
+
dependency_node = step_map[dep_number]
|
|
100
|
+
node.dependencies.append(dependency_node)
|
|
101
|
+
dependency_node.dependents.append(node)
|
|
102
|
+
else:
|
|
103
|
+
raise ValueError(f"Step {step.number} depends on non-existent step {dep_number}")
|
|
104
|
+
|
|
105
|
+
# Validate no cycles
|
|
106
|
+
self._validate_no_cycles(step_map)
|
|
107
|
+
|
|
108
|
+
return list(step_map.values())
|
|
109
|
+
|
|
110
|
+
def _validate_no_cycles(self, nodes: dict[int, ExecutionNode]) -> None:
|
|
111
|
+
"""
|
|
112
|
+
Validate that the execution graph has no cycles.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
nodes: Dictionary of execution nodes
|
|
116
|
+
|
|
117
|
+
Raises:
|
|
118
|
+
ValueError: If cycles are detected
|
|
119
|
+
"""
|
|
120
|
+
visited = set()
|
|
121
|
+
rec_stack = set()
|
|
122
|
+
|
|
123
|
+
def has_cycle(node_num: int) -> bool:
|
|
124
|
+
visited.add(node_num)
|
|
125
|
+
rec_stack.add(node_num)
|
|
126
|
+
|
|
127
|
+
node = nodes[node_num]
|
|
128
|
+
for dep in node.dependencies:
|
|
129
|
+
if dep.step.number not in visited:
|
|
130
|
+
if has_cycle(dep.step.number):
|
|
131
|
+
return True
|
|
132
|
+
elif dep.step.number in rec_stack:
|
|
133
|
+
return True
|
|
134
|
+
|
|
135
|
+
rec_stack.remove(node_num)
|
|
136
|
+
return False
|
|
137
|
+
|
|
138
|
+
for node_num in nodes:
|
|
139
|
+
if node_num not in visited:
|
|
140
|
+
if has_cycle(node_num):
|
|
141
|
+
raise ValueError(f"Cycle detected in execution graph involving step {node_num}")
|
|
142
|
+
|
|
143
|
+
async def execute_step(self, node: ExecutionNode, context: ReasoningContext) -> StepExecutionResult:
|
|
144
|
+
"""
|
|
145
|
+
Execute a single reasoning step.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
node: Execution node to execute
|
|
149
|
+
context: Reasoning context
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Step execution result
|
|
153
|
+
"""
|
|
154
|
+
start_time = time.time()
|
|
155
|
+
step = node.step
|
|
156
|
+
|
|
157
|
+
try:
|
|
158
|
+
# Generate prompt for this step
|
|
159
|
+
step_prompt = self.prompt_template.format_step_prompt(step, context.language)
|
|
160
|
+
full_prompt = self.prompt_template.format_chain_prompt(
|
|
161
|
+
outer_context=context.outer_context,
|
|
162
|
+
current_task=step_prompt,
|
|
163
|
+
history=context.get_current_history(),
|
|
164
|
+
language=context.language,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Execute LLM call with retries
|
|
168
|
+
result = await context.llm_client.get_response_with_retries(prompt=full_prompt, retries=context.retry_max)
|
|
169
|
+
|
|
170
|
+
# Update context history
|
|
171
|
+
if context.language == Language.ENGLISH:
|
|
172
|
+
step_result = f"Step {step.number}. {step.title}\nResult: {result}\n"
|
|
173
|
+
else: # Russian
|
|
174
|
+
step_result = f"Шаг {step.number}. {step.title}\nРезультат: {result}\n"
|
|
175
|
+
updated_history = context.history.copy()
|
|
176
|
+
updated_history.append(step_result)
|
|
177
|
+
|
|
178
|
+
execution_time = time.time() - start_time
|
|
179
|
+
|
|
180
|
+
return StepExecutionResult(
|
|
181
|
+
step_number=step.number,
|
|
182
|
+
step_title=step.title,
|
|
183
|
+
result=result,
|
|
184
|
+
success=True,
|
|
185
|
+
execution_time=execution_time,
|
|
186
|
+
updated_history=updated_history,
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
except Exception as e:
|
|
190
|
+
execution_time = time.time() - start_time
|
|
191
|
+
|
|
192
|
+
return StepExecutionResult(
|
|
193
|
+
step_number=step.number,
|
|
194
|
+
step_title=step.title,
|
|
195
|
+
result="",
|
|
196
|
+
success=False,
|
|
197
|
+
error_message=str(e),
|
|
198
|
+
execution_time=execution_time,
|
|
199
|
+
updated_history=context.history.copy(),
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
async def execute_batch(
|
|
203
|
+
self, ready_nodes: list[ExecutionNode], context: ReasoningContext
|
|
204
|
+
) -> list[StepExecutionResult]:
|
|
205
|
+
"""
|
|
206
|
+
Execute a batch of ready nodes in parallel.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
ready_nodes: list of nodes ready for execution
|
|
210
|
+
context: Reasoning context
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
list of step execution results
|
|
214
|
+
"""
|
|
215
|
+
if not ready_nodes:
|
|
216
|
+
return []
|
|
217
|
+
|
|
218
|
+
# Create independent contexts for parallel execution
|
|
219
|
+
context_snapshots = []
|
|
220
|
+
for _ in ready_nodes:
|
|
221
|
+
snapshot = ReasoningContext(
|
|
222
|
+
outer_context=context.outer_context,
|
|
223
|
+
llm_client=context.llm_client,
|
|
224
|
+
retry_max=context.retry_max,
|
|
225
|
+
history=context.history.copy(),
|
|
226
|
+
metadata=context.metadata.copy(),
|
|
227
|
+
language=context.language,
|
|
228
|
+
)
|
|
229
|
+
context_snapshots.append(snapshot)
|
|
230
|
+
|
|
231
|
+
# Execute in parallel
|
|
232
|
+
tasks = [self.execute_step(node, ctx) for node, ctx in zip(ready_nodes, context_snapshots)]
|
|
233
|
+
|
|
234
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
235
|
+
|
|
236
|
+
# Process results and handle exceptions
|
|
237
|
+
processed_results = []
|
|
238
|
+
for i, result in enumerate(results):
|
|
239
|
+
if isinstance(result, Exception):
|
|
240
|
+
processed_results.append(
|
|
241
|
+
StepExecutionResult(
|
|
242
|
+
step_number=ready_nodes[i].step.number,
|
|
243
|
+
step_title=ready_nodes[i].step.title,
|
|
244
|
+
result="",
|
|
245
|
+
success=False,
|
|
246
|
+
error_message=str(result),
|
|
247
|
+
)
|
|
248
|
+
)
|
|
249
|
+
else:
|
|
250
|
+
processed_results.append(result)
|
|
251
|
+
|
|
252
|
+
return processed_results
|
|
253
|
+
|
|
254
|
+
async def execute(self, steps: list[StepDescription], context: ReasoningContext) -> ReasoningResult:
|
|
255
|
+
"""
|
|
256
|
+
Execute a complete reasoning chain.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
steps: list of step descriptions
|
|
260
|
+
context: Initial reasoning context
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
Complete reasoning result
|
|
264
|
+
"""
|
|
265
|
+
start_time = time.time()
|
|
266
|
+
self._execution_stats["total_steps"] = len(steps)
|
|
267
|
+
|
|
268
|
+
# Build execution graph
|
|
269
|
+
nodes = self.build_execution_graph(steps)
|
|
270
|
+
if not nodes:
|
|
271
|
+
return ReasoningResult(
|
|
272
|
+
success=True, history=[], step_results=[], total_execution_time=time.time() - start_time
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# Execute DAG
|
|
276
|
+
executed_nodes: set[int] = set()
|
|
277
|
+
all_results: list[StepExecutionResult] = []
|
|
278
|
+
current_history = context.history.copy()
|
|
279
|
+
batch_count = 0
|
|
280
|
+
|
|
281
|
+
while len(executed_nodes) < len(nodes):
|
|
282
|
+
# Find ready nodes
|
|
283
|
+
ready_nodes = [node for node in nodes if node.step.number not in executed_nodes and node.can_execute()]
|
|
284
|
+
|
|
285
|
+
if not ready_nodes:
|
|
286
|
+
# This should not happen in a valid DAG
|
|
287
|
+
remaining = [n.step.number for n in nodes if n.step.number not in executed_nodes]
|
|
288
|
+
raise ValueError(f"Deadlock detected: unable to execute steps {remaining}")
|
|
289
|
+
|
|
290
|
+
batch_count += 1
|
|
291
|
+
if self.enable_progress:
|
|
292
|
+
print(f"Executing batch {batch_count} with {len(ready_nodes)} steps")
|
|
293
|
+
|
|
294
|
+
# Execute batch
|
|
295
|
+
batch_results = await self.execute_batch(ready_nodes, context)
|
|
296
|
+
all_results.extend(batch_results)
|
|
297
|
+
|
|
298
|
+
# Update history from successful results
|
|
299
|
+
# Sort by step number to maintain deterministic order
|
|
300
|
+
batch_results.sort(key=lambda r: r.step_number)
|
|
301
|
+
seen_steps = set()
|
|
302
|
+
|
|
303
|
+
for result in batch_results:
|
|
304
|
+
if result.success and result.step_number not in seen_steps:
|
|
305
|
+
# Add the latest history entry from this step
|
|
306
|
+
if result.updated_history:
|
|
307
|
+
new_entry = result.updated_history[-1]
|
|
308
|
+
current_history.append(new_entry)
|
|
309
|
+
seen_steps.add(result.step_number)
|
|
310
|
+
|
|
311
|
+
# Mark nodes as executed
|
|
312
|
+
for node in ready_nodes:
|
|
313
|
+
node.executed = True
|
|
314
|
+
executed_nodes.add(node.step.number)
|
|
315
|
+
|
|
316
|
+
# Update context with current history
|
|
317
|
+
context.history = current_history.copy()
|
|
318
|
+
|
|
319
|
+
# Calculate final stats
|
|
320
|
+
total_time = time.time() - start_time
|
|
321
|
+
successful_steps = [r for r in all_results if r.success]
|
|
322
|
+
failed_steps = [r for r in all_results if not r.success]
|
|
323
|
+
|
|
324
|
+
self._execution_stats.update(
|
|
325
|
+
{
|
|
326
|
+
"executed_steps": len(successful_steps),
|
|
327
|
+
"failed_steps": len(failed_steps),
|
|
328
|
+
"parallel_batches": batch_count,
|
|
329
|
+
"total_time": total_time,
|
|
330
|
+
}
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
return ReasoningResult(
|
|
334
|
+
success=len(failed_steps) == 0,
|
|
335
|
+
history=current_history,
|
|
336
|
+
step_results=all_results,
|
|
337
|
+
total_execution_time=total_time,
|
|
338
|
+
metadata={"execution_stats": self._execution_stats.copy(), "parallel_batches": batch_count},
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
def get_execution_stats(self) -> dict[str, Any]:
|
|
342
|
+
"""Get execution statistics from the last run."""
|
|
343
|
+
return self._execution_stats.copy()
|
mmar_carl/llm.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM client implementations for CARL using mmar-llm library.
|
|
3
|
+
|
|
4
|
+
Provides integration with mmar-llm library for production use.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from mmar_llm import EntrypointsAccessor
|
|
8
|
+
|
|
9
|
+
from .models import LLMClientBase
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class LLMClient(LLMClientBase):
|
|
13
|
+
"""
|
|
14
|
+
LLM client implementation using mmar-llm library.
|
|
15
|
+
|
|
16
|
+
Integrates with the mmar-llm EntrypointsAccessor for LLM calls.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, entrypoints: EntrypointsAccessor, entrypoint_key: str):
|
|
20
|
+
if not entrypoint_key:
|
|
21
|
+
raise ValueError("entrypoint_key is required and cannot be empty")
|
|
22
|
+
|
|
23
|
+
self.entrypoints = entrypoints
|
|
24
|
+
self.entrypoint_key = entrypoint_key
|
|
25
|
+
|
|
26
|
+
async def get_response(self, prompt: str) -> str:
|
|
27
|
+
# Get the specific entrypoint
|
|
28
|
+
ep = self.entrypoints[self.entrypoint_key]
|
|
29
|
+
|
|
30
|
+
# Check if the method is async or sync
|
|
31
|
+
result = ep.get_response_with_retries(prompt, retries=1)
|
|
32
|
+
|
|
33
|
+
# If the result is a coroutine (async), await it
|
|
34
|
+
if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
|
|
35
|
+
return await result
|
|
36
|
+
else:
|
|
37
|
+
# If it's already a string, return it directly
|
|
38
|
+
return result
|
|
39
|
+
|
|
40
|
+
async def get_response_with_retries(self, prompt: str, retries: int = 3) -> str:
|
|
41
|
+
ep = self.entrypoints[self.entrypoint_key]
|
|
42
|
+
|
|
43
|
+
# Use mmar-llm's built-in retry functionality
|
|
44
|
+
result = ep.get_response_with_retries(prompt, retries=retries)
|
|
45
|
+
|
|
46
|
+
# If the result is a coroutine (async), await it
|
|
47
|
+
if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
|
|
48
|
+
return await result
|
|
49
|
+
else:
|
|
50
|
+
# If it's already a string, return it directly
|
|
51
|
+
return result
|
mmar_carl/models.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core data models for CARL reasoning system.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from enum import StrEnum
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel, Field
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Language(StrEnum):
|
|
13
|
+
"""Supported languages."""
|
|
14
|
+
|
|
15
|
+
RUSSIAN = "ru"
|
|
16
|
+
ENGLISH = "en"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class StepDescription(BaseModel):
|
|
20
|
+
"""
|
|
21
|
+
Defines a single reasoning step in a chain.
|
|
22
|
+
|
|
23
|
+
This model encapsulates all the metadata needed for a reasoning step,
|
|
24
|
+
including its dependencies, objectives, and execution guidance.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
number: int = Field(..., description="Step number in the sequence")
|
|
28
|
+
title: str = Field(..., description="Human-readable title of the step")
|
|
29
|
+
aim: str = Field(..., description="Primary objective of this step")
|
|
30
|
+
reasoning_questions: str = Field(..., description="Key questions to answer")
|
|
31
|
+
dependencies: list[int] = Field(default_factory=list, description="List of step numbers this step depends on")
|
|
32
|
+
entities: list[str] = Field(default_factory=list, description="Entities/concepts this step works with")
|
|
33
|
+
stage_action: str = Field(..., description="Specific action to perform")
|
|
34
|
+
example_reasoning: str = Field(..., description="Example of expert reasoning")
|
|
35
|
+
|
|
36
|
+
def depends_on(self, step_number: int) -> bool:
|
|
37
|
+
"""Check if this step depends on a given step number."""
|
|
38
|
+
return step_number in self.dependencies
|
|
39
|
+
|
|
40
|
+
def has_dependencies(self) -> bool:
|
|
41
|
+
"""Check if this step has any dependencies."""
|
|
42
|
+
return len(self.dependencies) > 0
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class ReasoningContext(BaseModel):
|
|
46
|
+
"""
|
|
47
|
+
Context object that maintains state during reasoning execution.
|
|
48
|
+
|
|
49
|
+
Contains the input data, LLM client, execution history, and configuration.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
outer_context: str = Field(..., description="Input data as string (it can be CSV or other text information)")
|
|
53
|
+
llm_client: Any = Field(..., description="LLM client for execution")
|
|
54
|
+
retry_max: int = Field(default=3, description="Maximum retry attempts")
|
|
55
|
+
history: list[str] = Field(default_factory=list, description="Accumulated reasoning history")
|
|
56
|
+
metadata: dict[str, Any] = Field(default_factory=dict, description="Additional metadata and state")
|
|
57
|
+
language: Language = Field(default=Language.RUSSIAN, description="Language for reasoning prompts")
|
|
58
|
+
|
|
59
|
+
def add_to_history(self, entry: str) -> None:
|
|
60
|
+
"""Add a new entry to the reasoning history."""
|
|
61
|
+
self.history.append(entry)
|
|
62
|
+
|
|
63
|
+
def get_current_history(self) -> str:
|
|
64
|
+
"""Get the current reasoning history as a single string."""
|
|
65
|
+
return "\n".join(self.history)
|
|
66
|
+
|
|
67
|
+
model_config = {"arbitrary_types_allowed": True}
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class StepExecutionResult(BaseModel):
|
|
71
|
+
"""
|
|
72
|
+
Result of executing a single reasoning step.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
step_number: int = Field(..., description="Number of the executed step")
|
|
76
|
+
step_title: str = Field(..., description="Title of the executed step")
|
|
77
|
+
result: str = Field(..., description="Result content from LLM")
|
|
78
|
+
success: bool = Field(..., description="Whether execution succeeded")
|
|
79
|
+
error_message: str | None = Field(default=None, description="Error message if execution failed")
|
|
80
|
+
execution_time: float | None = Field(default=None, description="Time taken for execution in seconds")
|
|
81
|
+
updated_history: list[str] = Field(default_factory=list, description="History after this step's execution")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class ReasoningResult(BaseModel):
|
|
85
|
+
"""
|
|
86
|
+
Final result of executing a complete reasoning chain.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
success: bool = Field(..., description="Whether overall execution succeeded")
|
|
90
|
+
history: list[str] = Field(..., description="Complete reasoning history")
|
|
91
|
+
step_results: list[StepExecutionResult] = Field(..., description="Results from each step")
|
|
92
|
+
total_execution_time: float | None = Field(default=None, description="Total execution time in seconds")
|
|
93
|
+
metadata: dict[str, Any] = Field(default_factory=dict, description="Additional result metadata")
|
|
94
|
+
|
|
95
|
+
def get_final_output(self) -> str:
|
|
96
|
+
"""Get the final reasoning output as a single string."""
|
|
97
|
+
return "\n".join(self.history)
|
|
98
|
+
|
|
99
|
+
def get_successful_steps(self) -> list[StepExecutionResult]:
|
|
100
|
+
"""Get all successfully executed steps."""
|
|
101
|
+
return [step for step in self.step_results if step.success]
|
|
102
|
+
|
|
103
|
+
def get_failed_steps(self) -> list[StepExecutionResult]:
|
|
104
|
+
"""Get all failed steps."""
|
|
105
|
+
return [step for step in self.step_results if not step.success]
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class LLMClientBase(ABC):
|
|
109
|
+
"""
|
|
110
|
+
Abstract base class for LLM clients.
|
|
111
|
+
|
|
112
|
+
This interface allows CARL to work with different LLM providers
|
|
113
|
+
while maintaining a consistent API.
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
@abstractmethod
|
|
117
|
+
async def get_response(self, prompt: str) -> str:
|
|
118
|
+
"""
|
|
119
|
+
Get a response from the LLM.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
prompt: The prompt to send to the LLM
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
The LLM's response as a string
|
|
126
|
+
|
|
127
|
+
Raises:
|
|
128
|
+
Exception: If the LLM call fails
|
|
129
|
+
"""
|
|
130
|
+
pass
|
|
131
|
+
|
|
132
|
+
@abstractmethod
|
|
133
|
+
async def get_response_with_retries(self, prompt: str, retries: int = 3) -> str:
|
|
134
|
+
"""
|
|
135
|
+
Get a response from the LLM with retry logic.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
prompt: The prompt to send to the LLM
|
|
139
|
+
retries: Maximum number of retry attempts
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
The LLM's response as a string
|
|
143
|
+
|
|
144
|
+
Raises:
|
|
145
|
+
Exception: If all retry attempts fail
|
|
146
|
+
"""
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class PromptTemplate(BaseModel):
|
|
151
|
+
"""
|
|
152
|
+
Template for generating prompts from reasoning steps.
|
|
153
|
+
"""
|
|
154
|
+
|
|
155
|
+
system_prompt: str | None = Field(default=None, description="System-level instructions")
|
|
156
|
+
|
|
157
|
+
# Russian templates
|
|
158
|
+
ru_step_template: str = Field(
|
|
159
|
+
default="Шаг {step_number}. {step_title}\nЦель: {aim}\nЗадача: {stage_action}\nВопросы: {reasoning_questions}\nПример рассуждений: {example_reasoning}",
|
|
160
|
+
description="Template for individual step prompts in Russian",
|
|
161
|
+
)
|
|
162
|
+
ru_chain_template: str = Field(
|
|
163
|
+
default="Данные для анализа:\n{outer_context}\n{step_prompt}\nОтвечай кратко, подумай какие можно сделать выводы о результатах. Ответ должен состоять из одного параграфа. Не задавай дополнительных вопросов и не передавай инструкций. Пиши только текстом, без математических формул.",
|
|
164
|
+
description="Template for complete chain prompts in Russian",
|
|
165
|
+
)
|
|
166
|
+
ru_history_template: str = Field(
|
|
167
|
+
default="История предыдущих шагов:\n{history}\nОсновываясь на результатах предыдущих шагов, выполни следующую задачу:\n{current_task}",
|
|
168
|
+
description="Template for including history in prompts in Russian",
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# English templates
|
|
172
|
+
en_step_template: str = Field(
|
|
173
|
+
default="Step {step_number}. {step_title}\nObjective: {aim}\nTask: {stage_action}\nQuestions: {reasoning_questions}\nExample reasoning: {example_reasoning}",
|
|
174
|
+
description="Template for individual step prompts in English",
|
|
175
|
+
)
|
|
176
|
+
en_chain_template: str = Field(
|
|
177
|
+
default="Data for analysis:\n{outer_context}\n{step_prompt}\nRespond concisely, consider what conclusions can be drawn from the results. Response should be one paragraph. Do not ask additional questions or provide instructions. Write in text only, without mathematical formulas.",
|
|
178
|
+
description="Template for complete chain prompts in English",
|
|
179
|
+
)
|
|
180
|
+
en_history_template: str = Field(
|
|
181
|
+
default="History of previous steps:\n{history}\nBased on the results of previous steps, perform the following task:\n{current_task}",
|
|
182
|
+
description="Template for including history in prompts in English",
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
def format_step_prompt(self, step: StepDescription, language: Language = Language.RUSSIAN) -> str:
|
|
186
|
+
"""Format a single step prompt."""
|
|
187
|
+
if language == Language.ENGLISH:
|
|
188
|
+
return self.en_step_template.format(
|
|
189
|
+
step_number=step.number,
|
|
190
|
+
step_title=step.title,
|
|
191
|
+
aim=step.aim,
|
|
192
|
+
stage_action=step.stage_action,
|
|
193
|
+
reasoning_questions=step.reasoning_questions,
|
|
194
|
+
example_reasoning=step.example_reasoning,
|
|
195
|
+
)
|
|
196
|
+
else: # Russian
|
|
197
|
+
return self.ru_step_template.format(
|
|
198
|
+
step_number=step.number,
|
|
199
|
+
step_title=step.title,
|
|
200
|
+
aim=step.aim,
|
|
201
|
+
stage_action=step.stage_action,
|
|
202
|
+
reasoning_questions=step.reasoning_questions,
|
|
203
|
+
example_reasoning=step.example_reasoning,
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
def format_chain_prompt(
|
|
207
|
+
self, outer_context: str, current_task: str, history: str = "", language: Language = Language.RUSSIAN
|
|
208
|
+
) -> str:
|
|
209
|
+
"""Format a complete chain prompt."""
|
|
210
|
+
if language == Language.ENGLISH:
|
|
211
|
+
if history:
|
|
212
|
+
current_task = self.en_history_template.format(history=history, current_task=current_task)
|
|
213
|
+
|
|
214
|
+
return self.en_chain_template.format(outer_context=outer_context, step_prompt=current_task)
|
|
215
|
+
else: # Russian
|
|
216
|
+
if history:
|
|
217
|
+
current_task = self.ru_history_template.format(history=history, current_task=current_task)
|
|
218
|
+
|
|
219
|
+
return self.ru_chain_template.format(outer_context=outer_context, step_prompt=current_task)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mmar-carl
|
|
3
|
+
Version: 0.0.3
|
|
4
|
+
Summary: Collaborative Agent Reasoning Library
|
|
5
|
+
Keywords:
|
|
6
|
+
Author: glazkov, shaposhnikov, tagin
|
|
7
|
+
Author-email: glazkov <glazkov@airi.net>, shaposhnikov <shaposhnikov@airi.net>, tagin <tagin@airi.net>
|
|
8
|
+
License-Expression: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: Programming Language :: Python
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
18
|
+
Classifier: Topic :: Documentation
|
|
19
|
+
Classifier: Topic :: Software Development
|
|
20
|
+
Classifier: Topic :: Utilities
|
|
21
|
+
Classifier: Typing :: Typed
|
|
22
|
+
Requires-Dist: mmar-llm>=1.0.6
|
|
23
|
+
Requires-Dist: pydantic>=2.12.4
|
|
24
|
+
Requires-Python: >=3.12
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
|
|
27
|
+
# mmar-carl
|
|
28
|
+
|
|
29
|
+
...
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
mmar_carl/__init__.py,sha256=b6ucT35TUvjPgocZ4LE-OZyQ1NrZnRAxA9t7EbKYd1U,669
|
|
2
|
+
mmar_carl/chain.py,sha256=KVsfRVIWcf8djV7OabNpIx48jDq_LrIh8hzvKDmvVzc,10483
|
|
3
|
+
mmar_carl/executor.py,sha256=Mqm7nxT5kvV2vPd1obgG6kG6QC2bUt1Bmusn-nBDj34,11777
|
|
4
|
+
mmar_carl/llm.py,sha256=82W8diXo1XL9RCUBYO5PeX5yzvfP3Zv4YHzJxfE3saA,1701
|
|
5
|
+
mmar_carl/models.py,sha256=qV_V2z9DZ5lg3AINdzrujiBwcTH5kOU8Tqw_dBZWEXY,9476
|
|
6
|
+
mmar_carl-0.0.3.dist-info/licenses/LICENSE,sha256=2A90w8WjhOgQXnFuUijKJYazaqZ4_NTokYb9Po4y-9k,1061
|
|
7
|
+
mmar_carl-0.0.3.dist-info/WHEEL,sha256=eh7sammvW2TypMMMGKgsM83HyA_3qQ5Lgg3ynoecH3M,79
|
|
8
|
+
mmar_carl-0.0.3.dist-info/METADATA,sha256=WsieZRYaUbbS54ezVXKzFhxQDsa6N6dVF77HJZnagQo,964
|
|
9
|
+
mmar_carl-0.0.3.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 AIRI
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|