mmar-carl 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mmar-carl might be problematic. Click here for more details.

mmar_carl/__init__.py CHANGED
@@ -6,10 +6,9 @@ A library for building chain-of-thought reasoning systems with DAG-based paralle
6
6
 
7
7
  from .chain import ChainBuilder, ReasoningChain
8
8
  from .executor import DAGExecutor
9
- from .llm import LLMClient, LLMClientBase
10
9
  from .models import Language, PromptTemplate, ReasoningContext, ReasoningResult, StepDescription, StepExecutionResult
11
10
 
12
- __version__ = "0.0.3"
11
+ __version__ = "0.0.5"
13
12
  __all__ = [
14
13
  "Language",
15
14
  "StepDescription",
@@ -20,6 +19,4 @@ __all__ = [
20
19
  "ReasoningChain",
21
20
  "ChainBuilder",
22
21
  "DAGExecutor",
23
- "LLMClientBase",
24
- "LLMClient",
25
22
  ]
mmar_carl/executor.py CHANGED
@@ -164,8 +164,13 @@ class DAGExecutor:
164
164
  language=context.language,
165
165
  )
166
166
 
167
- # Execute LLM call with retries
168
- result = await context.llm_client.get_response_with_retries(prompt=full_prompt, retries=context.retry_max)
167
+ # Execute LLM call with retries using EntrypointsAccessor
168
+ ep = context.entrypoints[context.entrypoint_key]
169
+ result = ep.get_response_with_retries(full_prompt, retries=context.retry_max)
170
+
171
+ # If the result is a coroutine (async), await it
172
+ if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
173
+ result = await result
169
174
 
170
175
  # Update context history
171
176
  if context.language == Language.ENGLISH:
@@ -220,7 +225,8 @@ class DAGExecutor:
220
225
  for _ in ready_nodes:
221
226
  snapshot = ReasoningContext(
222
227
  outer_context=context.outer_context,
223
- llm_client=context.llm_client,
228
+ entrypoints=context.entrypoints,
229
+ entrypoint_key=context.entrypoint_key,
224
230
  retry_max=context.retry_max,
225
231
  history=context.history.copy(),
226
232
  metadata=context.metadata.copy(),
mmar_carl/models.py CHANGED
@@ -2,7 +2,6 @@
2
2
  Core data models for CARL reasoning system.
3
3
  """
4
4
 
5
- from abc import ABC, abstractmethod
6
5
  from enum import StrEnum
7
6
  from typing import Any
8
7
 
@@ -46,11 +45,12 @@ class ReasoningContext(BaseModel):
46
45
  """
47
46
  Context object that maintains state during reasoning execution.
48
47
 
49
- Contains the input data, LLM client, execution history, and configuration.
48
+ Contains the input data, entrypoints accessor, execution history, and configuration.
50
49
  """
51
50
 
52
51
  outer_context: str = Field(..., description="Input data as string (it can be CSV or other text information)")
53
- llm_client: Any = Field(..., description="LLM client for execution")
52
+ entrypoints: Any = Field(..., description="EntrypointsAccessor for LLM execution")
53
+ entrypoint_key: str = Field(default="default", description="Key for the specific entrypoint to use")
54
54
  retry_max: int = Field(default=3, description="Maximum retry attempts")
55
55
  history: list[str] = Field(default_factory=list, description="Accumulated reasoning history")
56
56
  metadata: dict[str, Any] = Field(default_factory=dict, description="Additional metadata and state")
@@ -105,46 +105,6 @@ class ReasoningResult(BaseModel):
105
105
  return [step for step in self.step_results if not step.success]
106
106
 
107
107
 
108
- class LLMClientBase(ABC):
109
- """
110
- Abstract base class for LLM clients.
111
-
112
- This interface allows CARL to work with different LLM providers
113
- while maintaining a consistent API.
114
- """
115
-
116
- @abstractmethod
117
- async def get_response(self, prompt: str) -> str:
118
- """
119
- Get a response from the LLM.
120
-
121
- Args:
122
- prompt: The prompt to send to the LLM
123
-
124
- Returns:
125
- The LLM's response as a string
126
-
127
- Raises:
128
- Exception: If the LLM call fails
129
- """
130
- pass
131
-
132
- @abstractmethod
133
- async def get_response_with_retries(self, prompt: str, retries: int = 3) -> str:
134
- """
135
- Get a response from the LLM with retry logic.
136
-
137
- Args:
138
- prompt: The prompt to send to the LLM
139
- retries: Maximum number of retry attempts
140
-
141
- Returns:
142
- The LLM's response as a string
143
-
144
- Raises:
145
- Exception: If all retry attempts fail
146
- """
147
- pass
148
108
 
149
109
 
150
110
  class PromptTemplate(BaseModel):
@@ -0,0 +1,224 @@
1
+ Metadata-Version: 2.4
2
+ Name: mmar-carl
3
+ Version: 0.0.5
4
+ Summary: Collaborative Agent Reasoning Library
5
+ Keywords:
6
+ Author: glazkov, shaposhnikov, tagin
7
+ Author-email: glazkov <glazkov@airi.net>, shaposhnikov <shaposhnikov@airi.net>, tagin <tagin@airi.net>
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Programming Language :: Python
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3 :: Only
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Classifier: Programming Language :: Python :: 3.14
18
+ Classifier: Topic :: Documentation
19
+ Classifier: Topic :: Software Development
20
+ Classifier: Topic :: Utilities
21
+ Classifier: Typing :: Typed
22
+ Requires-Dist: mmar-llm>=1.0.6
23
+ Requires-Dist: pydantic>=2.12.4
24
+ Requires-Python: >=3.12
25
+ Description-Content-Type: text/markdown
26
+
27
+ # MMAR CARL - Collaborative Agent Reasoning Library
28
+
29
+ A Python library for building chain-of-thought reasoning systems with DAG-based parallel execution and mmar-llm integration.
30
+
31
+ ## Overview
32
+
33
+ CARL provides a structured framework for creating complex reasoning chains that can execute steps in parallel where dependencies allow. It's designed to work seamlessly with the `mmar-llm` library for production LLM integration and supports multi-language reasoning (Russian/English).
34
+
35
+ ## Key Features
36
+
37
+ - **DAG-based Execution**: Automatically parallelizes reasoning steps based on dependencies
38
+ - **Direct mmar-llm Integration**: Uses EntrypointsAccessor directly without unnecessary abstractions
39
+ - **Multi-language Support**: Built-in support for Russian and English languages
40
+ - **Clean Architecture**: Simple and straightforward usage pattern
41
+ - **Production Ready**: Async/sync compatibility, error handling, and retry logic
42
+ - **Parallel Processing**: Optimized execution with configurable worker pools
43
+ - **Financial Focus**: Designed for financial analysis with specialized prompts
44
+
45
+ ## Quick Start
46
+
47
+ ```python
48
+ import asyncio
49
+ from mmar_carl import (
50
+ ReasoningChain, StepDescription, ReasoningContext,
51
+ Language
52
+ )
53
+ from mmar_llm import EntrypointsAccessor, EntrypointsConfig
54
+
55
+ # Define a reasoning chain
56
+ EBITDA_ANALYSIS = [
57
+ StepDescription(
58
+ number=1,
59
+ title="Рост EBITDA и стабильность",
60
+ aim="Выяснить, стабильно ли растет EBITDA и насколько выросла",
61
+ reasoning_questions="Насколько изменилась (выросла/уменьшилась) EBITDA за рассматриваемый период?",
62
+ dependencies=[],
63
+ entities=["EBITDA"],
64
+ stage_action="Рассчитать темп прироста EBITDA за 12 последних месяцев год к году",
65
+ example_reasoning="1) если темп > 0%, то положительный сигнал; если темп < 0%, то отрицательный сигнал"
66
+ ),
67
+ StepDescription(
68
+ number=2,
69
+ title="Маржинальность EBITDA",
70
+ aim="Выяснить, сколько процентов выручки забирают операционные расходы",
71
+ reasoning_questions="Какая маржинальность по EBITDA за рассматриваемый период?",
72
+ dependencies=[1], # Depends on step 1
73
+ entities=["EBITDA_MARGIN", "EBITDA", "SALES_REVENUE"],
74
+ stage_action="EBITDA маржа = EBITDA / SALES_REVENUE",
75
+ example_reasoning="1) если маржа > 0%, то положительный сигнал: компания зарабатывает больше своих операционных расходов"
76
+ )
77
+ ]
78
+
79
+ # Create entrypoints accessor from configuration file
80
+ def create_entrypoints(entrypoints_path: str):
81
+ """Create EntrypointsAccessor from configuration file."""
82
+ import json
83
+ with open(entrypoints_path, encoding="utf-8") as f:
84
+ config_data = json.load(f)
85
+
86
+ entrypoints_config = EntrypointsConfig.model_validate(config_data)
87
+ return EntrypointsAccessor(entrypoints_config)
88
+
89
+ # Create and execute the reasoning chain
90
+ entrypoints = create_entrypoints("entrypoints.json")
91
+ chain = ReasoningChain(
92
+ steps=EBITDA_ANALYSIS,
93
+ max_workers=2,
94
+ enable_progress=True
95
+ )
96
+
97
+ context = ReasoningContext(
98
+ outer_context="Период,EBITDA,SALES_REVENUE\n2023-Q1,1000000,5000000\n2023-Q2,1200000,5500000",
99
+ entrypoints=entrypoints,
100
+ entrypoint_key="my_entrypoint",
101
+ language=Language.RUSSIAN,
102
+ retry_max=3
103
+ )
104
+
105
+ result = chain.execute(context)
106
+ print(result.get_final_output())
107
+ ```
108
+
109
+ ## Installation
110
+
111
+ ```bash
112
+ # For production use
113
+ pip install mmar-carl
114
+
115
+ # For development with mmar-llm integration
116
+ pip install mmar-carl mmar-llm>=1.0.3
117
+
118
+ # Development version with all dependencies
119
+ pip install mmar-carl[dev]
120
+ ```
121
+
122
+ ## Requirements
123
+
124
+ - Python 3.12+
125
+ - mmar-llm>=1.0.3 (for LLM integration)
126
+ - Pydantic for data models
127
+ - asyncio for parallel execution
128
+
129
+ ## Documentation
130
+
131
+ - **Quick Start**: [docs/quickstart.md](docs/quickstart.md) - Get up and running quickly
132
+ - **Examples**: [docs/examples.md](docs/examples.md) - Real-world usage examples
133
+ - **Advanced Usage**: [docs/advanced.md](docs/advanced.md) - Advanced features and optimization
134
+ - **Methodology**: [docs/methodology.md](docs/methodology.md) - Development methodology (in Russian)
135
+
136
+ ## Architecture
137
+
138
+ CARL is built around several key components:
139
+
140
+ - **StepDescription**: Defines individual reasoning steps with metadata and dependencies
141
+ - **ReasoningChain**: Orchestrates the execution of reasoning steps with DAG optimization
142
+ - **DAGExecutor**: Handles parallel execution based on dependencies with configurable workers
143
+ - **ReasoningContext**: Manages execution state, history, and multi-language support
144
+ - **EntrypointsAccessor**: Direct integration with mmar-llm EntrypointsAccessor
145
+ - **Language**: Built-in support for Russian and English languages
146
+ - **PromptTemplate**: Multi-language prompt templates for different analysis types
147
+
148
+ ## Key Concepts
149
+
150
+ ### DAG-Based Parallel Execution
151
+
152
+ CARL automatically analyzes step dependencies and creates execution batches for maximum parallelization:
153
+
154
+ ```python
155
+ # Steps 1 and 2 execute in parallel
156
+ StepDescription(number=1, title="Revenue Analysis", dependencies=[])
157
+ StepDescription(number=2, title="Cost Analysis", dependencies=[])
158
+ # Step 3 waits for both to complete
159
+ StepDescription(number=3, title="Profitability Analysis", dependencies=[1, 2])
160
+ ```
161
+
162
+ ### Multi-language Support
163
+
164
+ Built-in support for Russian and English with appropriate prompt templates:
165
+
166
+ ```python
167
+ # Russian language reasoning
168
+ context = ReasoningContext(
169
+ outer_context=data,
170
+ entrypoints=entrypoints,
171
+ entrypoint_key="my_entrypoint",
172
+ language=Language.RUSSIAN
173
+ )
174
+
175
+ # English language reasoning
176
+ context = ReasoningContext(
177
+ outer_context=data,
178
+ entrypoints=entrypoints,
179
+ entrypoint_key="my_entrypoint",
180
+ language=Language.ENGLISH
181
+ )
182
+ ```
183
+
184
+ ### Direct mmar-llm Integration
185
+
186
+ Simple and straightforward usage without unnecessary abstractions:
187
+
188
+ ```python
189
+ from mmar_llm import EntrypointsAccessor
190
+
191
+ # Direct usage pattern
192
+ context = ReasoningContext(
193
+ outer_context=data,
194
+ entrypoints=entrypoints,
195
+ entrypoint_key="my_entrypoint"
196
+ )
197
+ ```
198
+
199
+ ## Example Usage
200
+
201
+ See the [example.py](example.py) file for a complete end-to-end demonstration with:
202
+
203
+ - Direct mmar-llm EntrypointsAccessor integration
204
+ - Multi-language support (Russian/English)
205
+ - Parallel execution demonstration
206
+ - Error handling and retry logic
207
+ - Performance metrics
208
+
209
+ Run it with:
210
+
211
+ ```bash
212
+ # Set entrypoints configuration
213
+ export ENTRYPOINTS_PATH=/path/to/your/entrypoints.json
214
+
215
+ # Run the demonstration
216
+ python example.py entrypoints.json my_entrypoint_key
217
+
218
+ # Or run with environment variable
219
+ ENTRYPOINTS_PATH=entrypoints.json python example.py
220
+ ```
221
+
222
+ ## License
223
+
224
+ MIT License - see LICENSE file for details.
@@ -0,0 +1,9 @@
1
+ mmar_carl/__init__.py,sha256=UhK3QNaAIGO4WCfCBjgzANnmWgPI-eYdiwR_QvrxWo0,589
2
+ mmar_carl/chain.py,sha256=KVsfRVIWcf8djV7OabNpIx48jDq_LrIh8hzvKDmvVzc,10483
3
+ mmar_carl/executor.py,sha256=RzxGXAgv56Wwskqmrma8shItTEWkpY6CvuT-AWUx67Q,12069
4
+ mmar_carl/llm.py,sha256=82W8diXo1XL9RCUBYO5PeX5yzvfP3Zv4YHzJxfE3saA,1701
5
+ mmar_carl/models.py,sha256=cOtnWGnRk1ymPJGhfoDJbV8dJTISII6TrYZ0xul95fM,8596
6
+ mmar_carl-0.0.5.dist-info/licenses/LICENSE,sha256=2A90w8WjhOgQXnFuUijKJYazaqZ4_NTokYb9Po4y-9k,1061
7
+ mmar_carl-0.0.5.dist-info/WHEEL,sha256=eh7sammvW2TypMMMGKgsM83HyA_3qQ5Lgg3ynoecH3M,79
8
+ mmar_carl-0.0.5.dist-info/METADATA,sha256=Y3VdutXTMvfs8l6pRNDY2i93upXf9GbvWzz1GXOUez4,7942
9
+ mmar_carl-0.0.5.dist-info/RECORD,,
@@ -1,29 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: mmar-carl
3
- Version: 0.0.3
4
- Summary: Collaborative Agent Reasoning Library
5
- Keywords:
6
- Author: glazkov, shaposhnikov, tagin
7
- Author-email: glazkov <glazkov@airi.net>, shaposhnikov <shaposhnikov@airi.net>, tagin <tagin@airi.net>
8
- License-Expression: MIT
9
- License-File: LICENSE
10
- Classifier: Development Status :: 4 - Beta
11
- Classifier: Intended Audience :: Developers
12
- Classifier: Programming Language :: Python
13
- Classifier: Programming Language :: Python :: 3
14
- Classifier: Programming Language :: Python :: 3 :: Only
15
- Classifier: Programming Language :: Python :: 3.12
16
- Classifier: Programming Language :: Python :: 3.13
17
- Classifier: Programming Language :: Python :: 3.14
18
- Classifier: Topic :: Documentation
19
- Classifier: Topic :: Software Development
20
- Classifier: Topic :: Utilities
21
- Classifier: Typing :: Typed
22
- Requires-Dist: mmar-llm>=1.0.6
23
- Requires-Dist: pydantic>=2.12.4
24
- Requires-Python: >=3.12
25
- Description-Content-Type: text/markdown
26
-
27
- # mmar-carl
28
-
29
- ...
@@ -1,9 +0,0 @@
1
- mmar_carl/__init__.py,sha256=b6ucT35TUvjPgocZ4LE-OZyQ1NrZnRAxA9t7EbKYd1U,669
2
- mmar_carl/chain.py,sha256=KVsfRVIWcf8djV7OabNpIx48jDq_LrIh8hzvKDmvVzc,10483
3
- mmar_carl/executor.py,sha256=Mqm7nxT5kvV2vPd1obgG6kG6QC2bUt1Bmusn-nBDj34,11777
4
- mmar_carl/llm.py,sha256=82W8diXo1XL9RCUBYO5PeX5yzvfP3Zv4YHzJxfE3saA,1701
5
- mmar_carl/models.py,sha256=qV_V2z9DZ5lg3AINdzrujiBwcTH5kOU8Tqw_dBZWEXY,9476
6
- mmar_carl-0.0.3.dist-info/licenses/LICENSE,sha256=2A90w8WjhOgQXnFuUijKJYazaqZ4_NTokYb9Po4y-9k,1061
7
- mmar_carl-0.0.3.dist-info/WHEEL,sha256=eh7sammvW2TypMMMGKgsM83HyA_3qQ5Lgg3ynoecH3M,79
8
- mmar_carl-0.0.3.dist-info/METADATA,sha256=WsieZRYaUbbS54ezVXKzFhxQDsa6N6dVF77HJZnagQo,964
9
- mmar_carl-0.0.3.dist-info/RECORD,,