aloop 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. agent/__init__.py +0 -0
  2. agent/agent.py +182 -0
  3. agent/base.py +406 -0
  4. agent/context.py +126 -0
  5. agent/prompts/__init__.py +1 -0
  6. agent/todo.py +149 -0
  7. agent/tool_executor.py +54 -0
  8. agent/verification.py +135 -0
  9. aloop-0.1.1.dist-info/METADATA +252 -0
  10. aloop-0.1.1.dist-info/RECORD +66 -0
  11. aloop-0.1.1.dist-info/WHEEL +5 -0
  12. aloop-0.1.1.dist-info/entry_points.txt +2 -0
  13. aloop-0.1.1.dist-info/licenses/LICENSE +21 -0
  14. aloop-0.1.1.dist-info/top_level.txt +9 -0
  15. cli.py +19 -0
  16. config.py +146 -0
  17. interactive.py +865 -0
  18. llm/__init__.py +51 -0
  19. llm/base.py +26 -0
  20. llm/compat.py +226 -0
  21. llm/content_utils.py +309 -0
  22. llm/litellm_adapter.py +450 -0
  23. llm/message_types.py +245 -0
  24. llm/model_manager.py +265 -0
  25. llm/retry.py +95 -0
  26. main.py +246 -0
  27. memory/__init__.py +20 -0
  28. memory/compressor.py +554 -0
  29. memory/manager.py +538 -0
  30. memory/serialization.py +82 -0
  31. memory/short_term.py +88 -0
  32. memory/store/__init__.py +6 -0
  33. memory/store/memory_store.py +100 -0
  34. memory/store/yaml_file_memory_store.py +414 -0
  35. memory/token_tracker.py +203 -0
  36. memory/types.py +51 -0
  37. tools/__init__.py +6 -0
  38. tools/advanced_file_ops.py +557 -0
  39. tools/base.py +51 -0
  40. tools/calculator.py +50 -0
  41. tools/code_navigator.py +975 -0
  42. tools/explore.py +254 -0
  43. tools/file_ops.py +150 -0
  44. tools/git_tools.py +791 -0
  45. tools/notify.py +69 -0
  46. tools/parallel_execute.py +420 -0
  47. tools/session_manager.py +205 -0
  48. tools/shell.py +147 -0
  49. tools/shell_background.py +470 -0
  50. tools/smart_edit.py +491 -0
  51. tools/todo.py +130 -0
  52. tools/web_fetch.py +673 -0
  53. tools/web_search.py +61 -0
  54. utils/__init__.py +15 -0
  55. utils/logger.py +105 -0
  56. utils/model_pricing.py +49 -0
  57. utils/runtime.py +75 -0
  58. utils/terminal_ui.py +422 -0
  59. utils/tui/__init__.py +39 -0
  60. utils/tui/command_registry.py +49 -0
  61. utils/tui/components.py +306 -0
  62. utils/tui/input_handler.py +393 -0
  63. utils/tui/model_ui.py +204 -0
  64. utils/tui/progress.py +292 -0
  65. utils/tui/status_bar.py +178 -0
  66. utils/tui/theme.py +165 -0
llm/model_manager.py ADDED
@@ -0,0 +1,265 @@
1
+ """Model manager for handling multiple models with YAML persistence."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import tempfile
7
+ from contextlib import suppress
8
+ from dataclasses import dataclass, field
9
+ from typing import Any
10
+ from urllib.parse import urlparse
11
+
12
+ from utils import get_logger
13
+
14
+ logger = get_logger(__name__)
15
+
16
+ DEFAULT_CONFIG_TEMPLATE = """# Model Configuration
17
+ # This file is gitignored - do not commit to version control
18
+ #
19
+ # The key under `models` is the LiteLLM model ID (provider/model).
20
+ # Fill in `api_key` directly in this file.
21
+ #
22
+ # Supported fields:
23
+ # - api_key: API key (required for most hosted providers)
24
+ # - api_base: Custom base URL (optional)
25
+ # - timeout: Request timeout in seconds (default: 600)
26
+ # - drop_params: Drop unsupported params (default: true)
27
+
28
+ models:
29
+ # openai/gpt-4o:
30
+ # api_key: sk-...
31
+ # timeout: 300
32
+ # anthropic/claude-3-5-sonnet-20241022:
33
+ # api_key: sk-ant-...
34
+ # ollama/llama2:
35
+ # api_base: http://localhost:11434
36
+ default: null
37
+ """
38
+
39
+
40
+ def _coerce_int(value: Any, default: int) -> int:
41
+ if value is None:
42
+ return default
43
+ if isinstance(value, bool):
44
+ return default
45
+ if isinstance(value, int):
46
+ return value
47
+ try:
48
+ return int(str(value).strip())
49
+ except (ValueError, TypeError):
50
+ return default
51
+
52
+
53
+ def _coerce_bool(value: Any, default: bool) -> bool:
54
+ if value is None:
55
+ return default
56
+ if isinstance(value, bool):
57
+ return value
58
+ if isinstance(value, str):
59
+ v = value.strip().lower()
60
+ if v in {"true", "1", "yes", "y", "on"}:
61
+ return True
62
+ if v in {"false", "0", "no", "n", "off"}:
63
+ return False
64
+ return default
65
+
66
+
67
+ def _is_local_api_base(api_base: str | None) -> bool:
68
+ if not api_base:
69
+ return False
70
+ raw = str(api_base).strip()
71
+ if not raw:
72
+ return False
73
+ if "://" not in raw:
74
+ raw = f"http://{raw}"
75
+ parsed = urlparse(raw)
76
+ host = (parsed.hostname or "").lower()
77
+ return host in {"localhost", "127.0.0.1", "::1"}
78
+
79
+
80
+ @dataclass
81
+ class ModelProfile:
82
+ """Configuration for a single model."""
83
+
84
+ model_id: str # LiteLLM model ID (e.g. "openai/gpt-4o")
85
+ api_key: str | None = None
86
+ api_base: str | None = None
87
+ timeout: int = 600
88
+ drop_params: bool = True
89
+ extra: dict[str, Any] = field(default_factory=dict)
90
+
91
+ @property
92
+ def provider(self) -> str:
93
+ return self.model_id.split("/")[0] if "/" in self.model_id else "unknown"
94
+
95
+ @property
96
+ def display_name(self) -> str:
97
+ return self.model_id
98
+
99
+ def to_dict(self) -> dict[str, Any]:
100
+ result: dict[str, Any] = {"timeout": self.timeout, "drop_params": self.drop_params}
101
+ if self.api_key:
102
+ result["api_key"] = self.api_key
103
+ if self.api_base is not None:
104
+ result["api_base"] = self.api_base
105
+ if self.extra:
106
+ result.update(self.extra)
107
+ return result
108
+
109
+
110
+ class ModelManager:
111
+ """Manages multiple models with YAML persistence."""
112
+
113
+ CONFIG_PATH = os.path.join(os.path.expanduser("~"), ".aloop", "models.yaml")
114
+
115
+ def __init__(self, config_path: str | None = None):
116
+ self.config_path = config_path or self.CONFIG_PATH
117
+ self.models: dict[str, ModelProfile] = {}
118
+ self.default_model_id: str | None = None
119
+ self.current_model_id: str | None = None
120
+ self._load()
121
+
122
+ def _ensure_yaml(self) -> None:
123
+ try:
124
+ import yaml # noqa: F401
125
+ except ImportError as e:
126
+ raise RuntimeError(
127
+ "PyYAML is required for model configuration. Install it (e.g. `uv add pyyaml`)."
128
+ ) from e
129
+
130
+ def _atomic_write(self, content: str) -> None:
131
+ directory = os.path.dirname(self.config_path) or "."
132
+ os.makedirs(directory, exist_ok=True)
133
+
134
+ fd, tmp_path = tempfile.mkstemp(prefix=".models.", suffix=".tmp", dir=directory)
135
+ try:
136
+ with os.fdopen(fd, "w", encoding="utf-8") as f:
137
+ f.write(content)
138
+ os.replace(tmp_path, self.config_path)
139
+ with suppress(OSError):
140
+ os.chmod(self.config_path, 0o600)
141
+ finally:
142
+ with suppress(OSError):
143
+ os.unlink(tmp_path)
144
+
145
+ def _create_default_config(self) -> None:
146
+ self._atomic_write(DEFAULT_CONFIG_TEMPLATE)
147
+ logger.info(f"Created model config template at {self.config_path}")
148
+
149
+ def _load(self) -> None:
150
+ self._ensure_yaml()
151
+ import yaml
152
+
153
+ if not os.path.exists(self.config_path):
154
+ self._create_default_config()
155
+
156
+ with open(self.config_path, encoding="utf-8") as f:
157
+ config = yaml.safe_load(f) or {}
158
+
159
+ models = config.get("models") or {}
160
+ if not isinstance(models, dict):
161
+ logger.warning("Invalid models.yaml format: 'models' should be a mapping")
162
+ models = {}
163
+
164
+ for model_id, data in models.items():
165
+ if not isinstance(model_id, str) or not model_id.strip():
166
+ continue
167
+ if not isinstance(data, dict):
168
+ logger.warning(f"Invalid model config for '{model_id}', skipping")
169
+ continue
170
+
171
+ api_key = data.get("api_key")
172
+ api_base = data.get("api_base")
173
+ timeout = _coerce_int(data.get("timeout"), default=600)
174
+ drop_params = _coerce_bool(data.get("drop_params"), default=True)
175
+ extra = {
176
+ k: v
177
+ for k, v in data.items()
178
+ if k not in {"name", "api_key", "api_base", "timeout", "drop_params"}
179
+ }
180
+
181
+ self.models[model_id] = ModelProfile(
182
+ model_id=model_id,
183
+ api_key=None if api_key is None else str(api_key),
184
+ api_base=None if api_base is None else str(api_base),
185
+ timeout=timeout,
186
+ drop_params=drop_params,
187
+ extra=extra,
188
+ )
189
+
190
+ default = config.get("default")
191
+ self.default_model_id = default if isinstance(default, str) else None
192
+ if self.default_model_id not in self.models:
193
+ self.default_model_id = next(iter(self.models.keys()), None)
194
+
195
+ self.current_model_id = self.default_model_id
196
+ logger.info(f"Loaded {len(self.models)} models from {self.config_path}")
197
+
198
+ def _save(self) -> None:
199
+ self._ensure_yaml()
200
+ import yaml
201
+
202
+ config = {
203
+ "models": {mid: profile.to_dict() for mid, profile in self.models.items()},
204
+ "default": self.default_model_id,
205
+ }
206
+ header = "# Model Configuration\n# This file is gitignored - do not commit to version control\n\n"
207
+ body = yaml.safe_dump(config, sort_keys=False, allow_unicode=True)
208
+ self._atomic_write(header + body)
209
+
210
+ def is_configured(self) -> bool:
211
+ return bool(self.models) and bool(self.default_model_id)
212
+
213
+ def get_model(self, model_id: str) -> ModelProfile | None:
214
+ return self.models.get(model_id)
215
+
216
+ def list_models(self) -> list[ModelProfile]:
217
+ return list(self.models.values())
218
+
219
+ def get_model_ids(self) -> list[str]:
220
+ return list(self.models.keys())
221
+
222
+ def get_default_model_id(self) -> str | None:
223
+ return self.default_model_id
224
+
225
+ def get_current_model(self) -> ModelProfile | None:
226
+ if not self.current_model_id:
227
+ return None
228
+ return self.models.get(self.current_model_id)
229
+
230
+ def set_default(self, model_id: str) -> bool:
231
+ if model_id not in self.models:
232
+ return False
233
+ self.default_model_id = model_id
234
+ if not self.current_model_id:
235
+ self.current_model_id = model_id
236
+ self._save()
237
+ return True
238
+
239
+ def switch_model(self, model_id: str) -> ModelProfile | None:
240
+ if model_id not in self.models:
241
+ return None
242
+ self.current_model_id = model_id
243
+ return self.get_current_model()
244
+
245
+ def validate_model(self, model: ModelProfile) -> tuple[bool, str]:
246
+ """Validate a model has required configuration."""
247
+ if not model.model_id:
248
+ return False, "Model ID is missing."
249
+ if (
250
+ model.provider not in {"ollama", "localhost"}
251
+ and not _is_local_api_base(model.api_base)
252
+ and not (model.api_key or "").strip()
253
+ ):
254
+ return (
255
+ False,
256
+ f"API key not configured for {model.provider}. "
257
+ f"Edit `{self.config_path}` and set models['{model.model_id}'].api_key.",
258
+ )
259
+ return True, ""
260
+
261
+ def reload(self) -> None:
262
+ self.models.clear()
263
+ self.default_model_id = None
264
+ self.current_model_id = None
265
+ self._load()
llm/retry.py ADDED
@@ -0,0 +1,95 @@
1
+ """Retry utilities for LLM API calls using tenacity."""
2
+
3
+ import asyncio
4
+ from typing import Callable, TypeVar
5
+
6
+ from tenacity import retry, retry_if_exception, stop_after_attempt
7
+ from tenacity.wait import wait_base
8
+
9
+ from config import Config
10
+ from utils import get_logger
11
+
12
+ logger = get_logger(__name__)
13
+ T = TypeVar("T")
14
+
15
+
16
+ def is_rate_limit_error(error: BaseException) -> bool:
17
+ """Check if an error is a rate limit error."""
18
+ error_str = str(error).lower()
19
+ rate_limit_indicators = [
20
+ "429",
21
+ "rate limit",
22
+ "quota",
23
+ "too many requests",
24
+ "resourceexhausted",
25
+ ]
26
+ return any(indicator in error_str for indicator in rate_limit_indicators)
27
+
28
+
29
+ def is_retryable_error(error: BaseException) -> bool:
30
+ """Check if an error is retryable."""
31
+ if isinstance(error, asyncio.CancelledError):
32
+ return False
33
+
34
+ if is_rate_limit_error(error):
35
+ return True
36
+
37
+ error_str = str(error).lower()
38
+ error_type = type(error).__name__
39
+
40
+ if "RateLimitError" in error_type or "APIConnectionError" in error_type:
41
+ return True
42
+
43
+ retryable_indicators = [
44
+ "timeout",
45
+ "connection",
46
+ "server error",
47
+ "500",
48
+ "502",
49
+ "503",
50
+ "504",
51
+ ]
52
+ return any(indicator in error_str for indicator in retryable_indicators)
53
+
54
+
55
+ class _ConfigBackoff(wait_base):
56
+ def __call__(self, retry_state) -> float:
57
+ attempt = max(retry_state.attempt_number - 1, 0)
58
+ return Config.get_retry_delay(attempt)
59
+
60
+
61
+ def _log_before_sleep(retry_state) -> None:
62
+ error = retry_state.outcome.exception() if retry_state.outcome else None
63
+ if not error:
64
+ return
65
+ error_type = "Rate limit" if is_rate_limit_error(error) else "Retryable"
66
+ delay = _ConfigBackoff()(retry_state)
67
+ logger.warning(f"{error_type} error: {error}")
68
+ logger.warning(
69
+ "Retrying in %.1fs... (attempt %s/%s)",
70
+ delay,
71
+ retry_state.attempt_number,
72
+ Config.RETRY_MAX_ATTEMPTS + 1,
73
+ )
74
+
75
+
76
+ def with_retry():
77
+ """Decorator to add async retry logic with exponential backoff.
78
+
79
+ The total number of attempts is RETRY_MAX_ATTEMPTS + 1:
80
+ - 1 initial attempt
81
+ - RETRY_MAX_ATTEMPTS retry attempts (if initial fails)
82
+ """
83
+
84
+ def decorator(func: Callable[..., T]) -> Callable[..., T]:
85
+ # stop_after_attempt counts total attempts, not retries
86
+ # So for N retries, we need N+1 total attempts
87
+ return retry(
88
+ retry=retry_if_exception(is_retryable_error),
89
+ stop=stop_after_attempt(Config.RETRY_MAX_ATTEMPTS + 1),
90
+ wait=_ConfigBackoff(),
91
+ reraise=True,
92
+ before_sleep=_log_before_sleep,
93
+ )(func)
94
+
95
+ return decorator
main.py ADDED
@@ -0,0 +1,246 @@
1
+ """Main entry point for the agentic loop system."""
2
+
3
+ import argparse
4
+ import asyncio
5
+ import importlib.metadata
6
+ import warnings
7
+
8
+ from rich.console import Console
9
+
10
+ from agent.agent import LoopAgent
11
+ from config import Config
12
+ from interactive import run_interactive_mode, run_model_setup_mode
13
+ from llm import LiteLLMAdapter, ModelManager
14
+ from memory import MemoryManager
15
+ from tools.advanced_file_ops import EditTool, GlobTool, GrepTool
16
+ from tools.calculator import CalculatorTool
17
+ from tools.code_navigator import CodeNavigatorTool
18
+ from tools.explore import ExploreTool
19
+ from tools.file_ops import FileReadTool, FileSearchTool, FileWriteTool
20
+ from tools.notify import NotifyTool
21
+ from tools.parallel_execute import ParallelExecutionTool
22
+ from tools.shell import ShellTool
23
+ from tools.shell_background import BackgroundTaskManager, ShellTaskStatusTool
24
+ from tools.smart_edit import SmartEditTool
25
+ from tools.web_fetch import WebFetchTool
26
+ from tools.web_search import WebSearchTool
27
+ from utils import setup_logger, terminal_ui
28
+ from utils.runtime import ensure_runtime_dirs
29
+
30
+ warnings.filterwarnings("ignore", message="Pydantic serializer warnings.*", category=UserWarning)
31
+
32
+
33
+ def create_agent(model_id: str | None = None):
34
+ """Factory function to create agents with tools.
35
+
36
+ Args:
37
+ model_id: Optional LiteLLM model ID to use (defaults to current/default)
38
+
39
+ Returns:
40
+ Configured LoopAgent instance with all tools
41
+ """
42
+ # Initialize background task manager (shared between shell tools)
43
+ task_manager = BackgroundTaskManager.get_instance()
44
+
45
+ # Initialize base tools
46
+ tools = [
47
+ FileReadTool(),
48
+ FileWriteTool(),
49
+ FileSearchTool(),
50
+ CalculatorTool(),
51
+ WebSearchTool(),
52
+ WebFetchTool(),
53
+ GlobTool(),
54
+ GrepTool(),
55
+ EditTool(),
56
+ SmartEditTool(),
57
+ CodeNavigatorTool(),
58
+ ShellTool(task_manager=task_manager),
59
+ ShellTaskStatusTool(task_manager=task_manager),
60
+ NotifyTool(),
61
+ ]
62
+
63
+ # Initialize model manager
64
+ model_manager = ModelManager()
65
+
66
+ if not model_manager.is_configured():
67
+ raise ValueError(
68
+ "No models configured. Run `aloop` without --task and use /model edit, "
69
+ "or edit `.aloop/models.yaml` to add at least one model and set `default`."
70
+ )
71
+
72
+ # Get the model to use
73
+ if model_id:
74
+ profile = model_manager.get_model(model_id)
75
+ if profile:
76
+ model_manager.switch_model(model_id)
77
+ else:
78
+ available = ", ".join(model_manager.get_model_ids())
79
+ terminal_ui.print_error(f"Model '{model_id}' not found, using default")
80
+ if available:
81
+ terminal_ui.console.print(f"Available: {available}")
82
+
83
+ current_profile = model_manager.get_current_model()
84
+ if not current_profile:
85
+ raise ValueError("No model available. Please check `.aloop/models.yaml`.")
86
+
87
+ is_valid, error_msg = model_manager.validate_model(current_profile)
88
+ if not is_valid:
89
+ raise ValueError(error_msg)
90
+
91
+ # Create LLM instance with the current profile
92
+ llm = LiteLLMAdapter(
93
+ model=current_profile.model_id,
94
+ api_key=current_profile.api_key,
95
+ api_base=current_profile.api_base,
96
+ drop_params=current_profile.drop_params,
97
+ timeout=current_profile.timeout,
98
+ )
99
+
100
+ agent = LoopAgent(
101
+ llm=llm,
102
+ tools=tools,
103
+ max_iterations=Config.MAX_ITERATIONS,
104
+ model_manager=model_manager,
105
+ )
106
+
107
+ # Add tools that require agent reference
108
+ agent.tool_executor.add_tool(ExploreTool(agent))
109
+ agent.tool_executor.add_tool(ParallelExecutionTool(agent))
110
+
111
+ return agent
112
+
113
+
114
+ async def _resolve_session_id(resume_arg: str) -> str:
115
+ """Resolve --resume argument to a full session ID.
116
+
117
+ Args:
118
+ resume_arg: "latest" or a session ID / prefix
119
+
120
+ Returns:
121
+ Full session ID
122
+
123
+ Raises:
124
+ ValueError: If session cannot be found
125
+ """
126
+ if resume_arg == "latest":
127
+ session_id = await MemoryManager.find_latest_session()
128
+ if not session_id:
129
+ raise ValueError("No sessions found to resume.")
130
+ return session_id
131
+
132
+ session_id = await MemoryManager.find_session_by_prefix(resume_arg)
133
+ if not session_id:
134
+ raise ValueError(f"Session '{resume_arg}' not found.")
135
+ return session_id
136
+
137
+
138
+ def main():
139
+ """Main CLI entry point."""
140
+ parser = argparse.ArgumentParser(description="Run an AI agent with tool-calling capabilities")
141
+
142
+ try:
143
+ version = importlib.metadata.version("aloop")
144
+ except importlib.metadata.PackageNotFoundError:
145
+ version = "dev"
146
+ parser.add_argument("--version", "-V", action="version", version=f"aloop {version}")
147
+
148
+ parser.add_argument(
149
+ "--task",
150
+ "-t",
151
+ type=str,
152
+ help="Task for the agent to complete (if not provided, enters interactive mode)",
153
+ )
154
+ parser.add_argument(
155
+ "--verbose",
156
+ "-v",
157
+ action="store_true",
158
+ help="Enable verbose logging to .aloop/logs/",
159
+ )
160
+ parser.add_argument(
161
+ "--model",
162
+ "-m",
163
+ type=str,
164
+ help="Model to use (LiteLLM model ID, e.g. openai/gpt-4o)",
165
+ )
166
+ parser.add_argument(
167
+ "--resume",
168
+ "-r",
169
+ nargs="?",
170
+ const="latest",
171
+ help="Resume a previous session (session ID prefix or 'latest')",
172
+ )
173
+
174
+ args = parser.parse_args()
175
+
176
+ # Initialize runtime directories (create logs dir only in verbose mode)
177
+ ensure_runtime_dirs(create_logs=args.verbose)
178
+
179
+ # Initialize logging only in verbose mode
180
+ if args.verbose:
181
+ setup_logger()
182
+
183
+ # Validate config
184
+ try:
185
+ Config.validate()
186
+ except ValueError as e:
187
+ terminal_ui.print_error(str(e), title="Configuration Error")
188
+ return
189
+
190
+ # Resolve --resume session ID early (before agent creation) so we can fail fast
191
+ resume_session_id = None
192
+ if args.resume:
193
+ try:
194
+ resume_session_id = asyncio.run(_resolve_session_id(args.resume))
195
+ terminal_ui.print_info(f"Resuming session: {resume_session_id}")
196
+ except ValueError as e:
197
+ terminal_ui.print_error(str(e), title="Resume Error")
198
+ return
199
+
200
+ # Create agent with optional model selection. If we're going into interactive mode and
201
+ # models aren't configured yet, enter a setup session first.
202
+ try:
203
+ agent = create_agent(model_id=args.model)
204
+ except ValueError as e:
205
+ if args.task:
206
+ terminal_ui.print_error(str(e), title="Model Configuration Error")
207
+ terminal_ui.console.print(
208
+ "Edit `.aloop/models.yaml` to add models and set `default` (this file is gitignored). "
209
+ "Tip: run `aloop` (interactive) and use /model edit."
210
+ )
211
+ return
212
+
213
+ terminal_ui.print_error(str(e), title="Model Setup Required")
214
+ ready = asyncio.run(run_model_setup_mode())
215
+ if not ready:
216
+ return
217
+
218
+ # Retry after setup.
219
+ agent = create_agent(model_id=args.model)
220
+
221
+ async def _run() -> None:
222
+ # Load resumed session if requested
223
+ if resume_session_id:
224
+ await agent.load_session(resume_session_id)
225
+
226
+ # If no task provided, enter interactive mode (default behavior)
227
+ if not args.task:
228
+ await run_interactive_mode(agent)
229
+ return
230
+
231
+ # Single-turn mode: execute one task and exit
232
+ task = args.task
233
+
234
+ # Quiet mode: suppress all Rich UI output, print raw result only
235
+ terminal_ui.console = Console(quiet=True)
236
+
237
+ # Run agent
238
+ result = await agent.run(task)
239
+
240
+ print(result)
241
+
242
+ asyncio.run(_run())
243
+
244
+
245
+ if __name__ == "__main__":
246
+ main()
memory/__init__.py ADDED
@@ -0,0 +1,20 @@
1
+ """Memory management system for aloop framework.
2
+
3
+ This module provides intelligent memory management with automatic compression,
4
+ token tracking, cost optimization, and YAML-based persistence.
5
+ """
6
+
7
+ from .compressor import WorkingMemoryCompressor
8
+ from .manager import MemoryManager
9
+ from .short_term import ShortTermMemory
10
+ from .token_tracker import TokenTracker
11
+ from .types import CompressedMemory, CompressionStrategy
12
+
13
+ __all__ = [
14
+ "CompressedMemory",
15
+ "CompressionStrategy",
16
+ "MemoryManager",
17
+ "ShortTermMemory",
18
+ "WorkingMemoryCompressor",
19
+ "TokenTracker",
20
+ ]