ollaagent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ollaAgent/__init__.py ADDED
File without changes
ollaAgent/agent.py ADDED
@@ -0,0 +1,695 @@
1
+ import glob as glob_module
2
+ import json
3
+ import os
4
+ import re
5
+ import subprocess
6
+ from datetime import datetime
7
+ from functools import partial
8
+ from pathlib import Path
9
+ from typing import Any, NamedTuple
10
+
11
+ from ollaAgent.config_loader import build_system_prompt, load_config
12
+ from dotenv import load_dotenv
13
+ from ollaAgent.memory import SESSION_DIR, SessionMemory, save_session
14
+ from ollama import Client
15
+ from ollaAgent.permissions import PermissionConfig
16
+ from ollaAgent.plan_mode import run_plan
17
+ from rich.console import Console
18
+ from rich.live import Live
19
+ from rich.markdown import Markdown
20
+ from rich.panel import Panel
21
+ from ollaAgent.subagent import SubagentTask, run_subagents
22
+ from ollaAgent.tool_bash import tool_bash
23
+
24
+ load_dotenv()
25
+
26
+ console = Console()
27
+
28
+ MAX_ITERATIONS = 10
29
+ MAX_FILE_SIZE = 1 * 1024 * 1024 # 1MB
30
+ SAFE_BASE = Path.cwd()
31
+ TOKEN_THRESHOLD = 80_000 # 80k 초과 시 trim 트리거 (qwen3-coder-next 128k 기준)
32
+
33
+ BLOCKED_KEYWORDS = [
34
+ "shutil.rmtree",
35
+ "os.remove",
36
+ "os.unlink",
37
+ "sys.exit",
38
+ "__import__",
39
+ "eval(",
40
+ "exec(",
41
+ ]
42
+
43
+ TOOLS = [
44
+ {
45
+ "type": "function",
46
+ "function": {
47
+ "name": "run_python",
48
+ "description": "Execute Python code and return the output",
49
+ "parameters": {
50
+ "type": "object",
51
+ "properties": {
52
+ "code": {"type": "string", "description": "Python code to execute"}
53
+ },
54
+ "required": ["code"],
55
+ },
56
+ },
57
+ },
58
+ {
59
+ "type": "function",
60
+ "function": {
61
+ "name": "write_file",
62
+ "description": "Create a new file or overwrite an existing file with given content",
63
+ "parameters": {
64
+ "type": "object",
65
+ "properties": {
66
+ "path": {"type": "string", "description": "File path to write"},
67
+ "content": {"type": "string", "description": "Content to write"},
68
+ },
69
+ "required": ["path", "content"],
70
+ },
71
+ },
72
+ },
73
+ {
74
+ "type": "function",
75
+ "function": {
76
+ "name": "edit_file",
77
+ "description": "Overwrite an existing file entirely with new content",
78
+ "parameters": {
79
+ "type": "object",
80
+ "properties": {
81
+ "path": {"type": "string", "description": "File path to edit"},
82
+ "content": {
83
+ "type": "string",
84
+ "description": "New content to write",
85
+ },
86
+ },
87
+ "required": ["path", "content"],
88
+ },
89
+ },
90
+ },
91
+ {
92
+ "type": "function",
93
+ "function": {
94
+ "name": "glob",
95
+ "description": "Find files matching a glob pattern",
96
+ "parameters": {
97
+ "type": "object",
98
+ "properties": {
99
+ "pattern": {
100
+ "type": "string",
101
+ "description": "Glob pattern (e.g. **/*.py)",
102
+ },
103
+ "base_path": {
104
+ "type": "string",
105
+ "description": "Base directory to search (default: cwd)",
106
+ },
107
+ },
108
+ "required": ["pattern"],
109
+ },
110
+ },
111
+ },
112
+ {
113
+ "type": "function",
114
+ "function": {
115
+ "name": "grep",
116
+ "description": "Search file contents using a regex pattern",
117
+ "parameters": {
118
+ "type": "object",
119
+ "properties": {
120
+ "pattern": {
121
+ "type": "string",
122
+ "description": "Regex pattern to search",
123
+ },
124
+ "path": {
125
+ "type": "string",
126
+ "description": "File or directory to search",
127
+ },
128
+ "recursive": {
129
+ "type": "boolean",
130
+ "description": "Search recursively (default: true)",
131
+ },
132
+ },
133
+ "required": ["pattern", "path"],
134
+ },
135
+ },
136
+ },
137
+ {
138
+ "type": "function",
139
+ "function": {
140
+ "name": "bash",
141
+ "description": "Execute a bash command and return the output. Dangerous commands will be blocked or require user confirmation.",
142
+ "parameters": {
143
+ "type": "object",
144
+ "properties": {
145
+ "command": {
146
+ "type": "string",
147
+ "description": "Bash command to execute",
148
+ },
149
+ },
150
+ "required": ["command"],
151
+ },
152
+ },
153
+ },
154
+ ]
155
+
156
+
157
+ # ──────────────────────────────────────────
158
+ # Tool Helpers
159
+ # ──────────────────────────────────────────
160
+
161
+
162
+ def _is_safe_path(path: str) -> bool:
163
+ """경로가 SAFE_BASE 하위인지 확인한다."""
164
+ try:
165
+ return Path(path).resolve().is_relative_to(SAFE_BASE)
166
+ except Exception:
167
+ return False
168
+
169
+
170
+ def _tool_run_python(args: dict[str, Any]) -> str:
171
+ """Python 코드를 실행하고 stdout/stderr를 반환한다."""
172
+ code = args.get("code", "")
173
+ for keyword in BLOCKED_KEYWORDS:
174
+ if keyword in code:
175
+ return f"ERROR: Blocked - dangerous operation detected: '{keyword}'"
176
+ try:
177
+ result = subprocess.run(
178
+ ["python", "-c", code],
179
+ capture_output=True,
180
+ text=True,
181
+ timeout=10,
182
+ )
183
+ output = result.stdout or result.stderr
184
+ return output.strip() if output.strip() else "(no output)"
185
+ except subprocess.TimeoutExpired:
186
+ return "ERROR: Timeout (10s)"
187
+ except Exception as exc:
188
+ return f"ERROR: {exc}"
189
+
190
+
191
+ def _tool_write_file(args: dict[str, Any]) -> str:
192
+ """파일을 생성하거나 전체 내용을 덮어쓴다."""
193
+ path = args.get("path", "")
194
+ content = args.get("content", "")
195
+ if not _is_safe_path(path):
196
+ return f"ERROR: Blocked - path '{path}' is outside working directory"
197
+ try:
198
+ target = Path(path)
199
+ target.parent.mkdir(parents=True, exist_ok=True)
200
+ with target.open("w", encoding="utf-8") as fh:
201
+ fh.write(content)
202
+ return f"OK: Written {len(content)} chars to '{path}'"
203
+ except Exception as exc:
204
+ return f"ERROR: {exc}"
205
+
206
+
207
+ def _tool_edit_file(args: dict[str, Any]) -> str:
208
+ """기존 파일 전체를 overwrite한다. 파일이 없으면 오류를 반환한다."""
209
+ path = args.get("path", "")
210
+ if not Path(path).exists():
211
+ return (
212
+ f"ERROR: File '{path}' does not exist. Use write_file to create a new file."
213
+ )
214
+ return _tool_write_file(args)
215
+
216
+
217
+ def _tool_glob(args: dict[str, Any]) -> str:
218
+ """glob 패턴으로 파일 목록을 탐색하고 줄바꿈으로 구분된 경로를 반환한다."""
219
+ pattern = args.get("pattern", "")
220
+ base = args.get("base_path", str(SAFE_BASE))
221
+ if not _is_safe_path(base):
222
+ return f"ERROR: Blocked - path '{base}' is outside working directory"
223
+ try:
224
+ matches = sorted(Path(base).glob(pattern))
225
+ return "\n".join(str(m) for m in matches) if matches else "(no matches)"
226
+ except Exception as exc:
227
+ return f"ERROR: {exc}"
228
+
229
+
230
+ def _grep_file(file: Path, regex: re.Pattern) -> list[str]:
231
+ """단일 파일에서 정규식 매칭 라인을 반환한다."""
232
+ if file.stat().st_size > MAX_FILE_SIZE:
233
+ return []
234
+ try:
235
+ lines = file.read_text(encoding="utf-8").splitlines()
236
+ return [
237
+ f"{file}:{i}: {line}"
238
+ for i, line in enumerate(lines, 1)
239
+ if regex.search(line)
240
+ ]
241
+ except (UnicodeDecodeError, OSError):
242
+ return []
243
+
244
+
245
+ def _tool_grep(args: dict[str, Any]) -> str:
246
+ """정규식으로 파일 내용을 검색하고 '파일:라인: 내용' 형식으로 반환한다."""
247
+ pattern = args.get("pattern", "")
248
+ path = args.get("path", str(SAFE_BASE))
249
+ recursive = args.get("recursive", True)
250
+ if not _is_safe_path(path):
251
+ return f"ERROR: Blocked - path '{path}' is outside working directory"
252
+ try:
253
+ regex = re.compile(pattern)
254
+ except re.error as exc:
255
+ return f"ERROR: Invalid regex - {exc}"
256
+ target = Path(path)
257
+ files = target.rglob("*") if recursive else target.glob("*")
258
+ results: list[str] = []
259
+ for file in files:
260
+ if file.is_file():
261
+ results.extend(_grep_file(file, regex))
262
+ return "\n".join(results) if results else "(no matches)"
263
+
264
+
265
+ # ──────────────────────────────────────────
266
+ # Tool Dispatcher
267
+ # ──────────────────────────────────────────
268
+
269
+ _STATIC_DISPATCH: dict[str, Any] = {
270
+ "run_python": _tool_run_python,
271
+ "write_file": _tool_write_file,
272
+ "edit_file": _tool_edit_file,
273
+ "glob": _tool_glob,
274
+ "grep": _tool_grep,
275
+ }
276
+
277
+
278
+ def build_dispatch(perm_config: PermissionConfig) -> dict[str, Any]:
279
+ """PermissionConfig를 주입한 전체 tool dispatch 딕셔너리를 반환한다."""
280
+ return {
281
+ **_STATIC_DISPATCH,
282
+ "bash": partial(tool_bash, config=perm_config),
283
+ }
284
+
285
+
286
+ def execute_tool(name: str, args: dict[str, Any], dispatch: dict[str, Any]) -> str:
287
+ """Tool 이름과 인자를 받아 실행하고 결과 문자열을 반환한다."""
288
+ handler = dispatch.get(name)
289
+ if handler is None:
290
+ return f"ERROR: Unknown tool '{name}'"
291
+ return handler(args)
292
+
293
+
294
+ # ──────────────────────────────────────────
295
+ # Message Management
296
+ # ──────────────────────────────────────────
297
+
298
+
299
+ def trim_messages(messages: list[dict], max_turns: int = 20) -> list[dict]:
300
+ """system 메시지를 보존하고 최근 max_turns 개의 메시지만 유지한다."""
301
+ system = [m for m in messages if m["role"] == "system"]
302
+ rest = [m for m in messages if m["role"] != "system"]
303
+ return system + rest[-max_turns:]
304
+
305
+
306
+ def trim_by_tokens(
307
+ messages: list[dict],
308
+ token_count: int,
309
+ threshold: int = TOKEN_THRESHOLD,
310
+ ) -> list[dict]:
311
+ """token_count가 threshold를 초과하면 오래된 메시지를 제거해 컨텍스트를 줄인다.
312
+
313
+ 제거 우선순위:
314
+ 1. 가장 오래된 tool 메시지 (grep/bash 결과 등 용량 큼)
315
+ 2. 가장 오래된 assistant 메시지
316
+ 3. 최후 수단: trim_messages(max_turns=10) 슬라이딩 윈도우 폴백
317
+ system 메시지는 항상 보존한다.
318
+ """
319
+ if token_count is None or token_count <= threshold:
320
+ return messages
321
+
322
+ result = list(messages)
323
+
324
+ # 1순위: 오래된 tool 메시지 제거
325
+ for i, msg in enumerate(result):
326
+ if msg["role"] == "tool":
327
+ result.pop(i)
328
+ console.print(
329
+ f"[dim][Token] {token_count:,} > {threshold:,} — tool 메시지 trim[/]"
330
+ )
331
+ return result
332
+
333
+ # 2순위: 오래된 assistant 메시지 제거
334
+ for i, msg in enumerate(result):
335
+ if msg["role"] == "assistant":
336
+ result.pop(i)
337
+ console.print(
338
+ f"[dim][Token] {token_count:,} > {threshold:,} — assistant 메시지 trim[/]"
339
+ )
340
+ return result
341
+
342
+ # 최후 수단: 슬라이딩 윈도우 폴백
343
+ console.print(
344
+ f"[dim][Token] {token_count:,} > {threshold:,} — 슬라이딩 윈도우 폴백[/]"
345
+ )
346
+ return trim_messages(result, max_turns=10)
347
+
348
+
349
+ # ──────────────────────────────────────────
350
+ # Stream Helpers
351
+ # ──────────────────────────────────────────
352
+
353
+
354
+ def _accumulate_tool_calls(msg: dict[str, Any], accumulated: dict[int, dict]) -> None:
355
+ """스트림 chunk에서 tool_calls를 누적한다 (분산 전송 대응)."""
356
+ for tc in msg.get("tool_calls", []):
357
+ idx = tc.get("index", len(accumulated))
358
+ if idx not in accumulated:
359
+ accumulated[idx] = {"name": "", "arguments": ""}
360
+ fn = tc.get("function", {})
361
+ accumulated[idx]["name"] += fn.get("name", "")
362
+ raw_args = fn.get("arguments", "")
363
+ if isinstance(raw_args, dict):
364
+ accumulated[idx]["arguments"] = raw_args
365
+ else:
366
+ accumulated[idx]["arguments"] += raw_args
367
+
368
+
369
+ def _process_tool_calls(
370
+ accumulated: dict[int, dict],
371
+ messages: list[dict],
372
+ dispatch: dict[str, Any],
373
+ ) -> None:
374
+ """누적된 tool_calls를 실행하고 결과를 messages에 추가한다."""
375
+ for tc in accumulated.values():
376
+ tool_name = tc["name"]
377
+ raw_args = tc["arguments"]
378
+ if isinstance(raw_args, dict):
379
+ args: dict[str, Any] = raw_args
380
+ else:
381
+ try:
382
+ args = json.loads(raw_args) if raw_args else {}
383
+ except json.JSONDecodeError as exc:
384
+ args = {}
385
+ console.print(f"[red]Tool argument parse error: {exc}[/]")
386
+ console.print(f"\n[bold yellow][Tool Call][/] {tool_name}({args})")
387
+ result = execute_tool(tool_name, args, dispatch)
388
+ console.print(f"[bold cyan][Tool Result][/] {result}\n")
389
+ messages.append({"role": "tool", "content": result, "name": tool_name})
390
+
391
+
392
+ def _stream_response(stream: Any) -> tuple[str, str, dict[int, dict], int]:
393
+ """스트림을 소비하며 content, thinking, tool_calls, prompt_eval_count를 반환한다.
394
+
395
+ prompt_eval_count는 done=True인 마지막 chunk에서만 유효하게 제공된다.
396
+ 모델/버전에 따라 None일 수 있으므로 0으로 기본값 처리한다.
397
+ """
398
+ assistant_content = ""
399
+ thinking_content = ""
400
+ accumulated_tool_calls: dict[int, dict] = {}
401
+ prompt_eval_count: int = 0
402
+
403
+ with Live(console=console, refresh_per_second=10) as live:
404
+ for chunk in stream:
405
+ msg = chunk.get("message", {})
406
+ thinking = msg.get("thinking", "")
407
+ if thinking:
408
+ thinking_content += thinking
409
+ content = msg.get("content", "")
410
+ if content:
411
+ assistant_content += content
412
+ live.update(Markdown(assistant_content))
413
+ _accumulate_tool_calls(msg, accumulated_tool_calls)
414
+ # done=True 인 마지막 chunk에서 토큰 수 캡처
415
+ if chunk.get("done"):
416
+ prompt_eval_count = chunk.get("prompt_eval_count") or 0
417
+
418
+ return (
419
+ assistant_content,
420
+ thinking_content,
421
+ accumulated_tool_calls,
422
+ prompt_eval_count,
423
+ )
424
+
425
+
426
+ # ──────────────────────────────────────────
427
+ # Agentic Loop
428
+ # ──────────────────────────────────────────
429
+
430
+
431
+ def run_agentic_loop(
432
+ messages: list[dict],
433
+ client: Client,
434
+ dispatch: dict[str, Any],
435
+ model: str = "qwen3-coder-next:latest",
436
+ max_iterations: int = MAX_ITERATIONS,
437
+ ) -> str:
438
+ """tool_calls가 없어질 때까지 모델을 반복 호출하는 agentic loop."""
439
+ final_content = ""
440
+ for _ in range(max_iterations):
441
+ stream = client.chat(
442
+ model=model,
443
+ messages=messages,
444
+ tools=TOOLS,
445
+ stream=True,
446
+ )
447
+ assistant_content, thinking_content, tool_calls, token_count = _stream_response(
448
+ stream
449
+ )
450
+ if thinking_content:
451
+ console.print(
452
+ Panel(thinking_content, title="[dim]Thinking[/]", style="dim")
453
+ )
454
+ messages.append({"role": "assistant", "content": assistant_content})
455
+ if token_count:
456
+ console.print(
457
+ f"[dim][Token] {token_count:,} / 128k "
458
+ f"({'⚠️ ' if token_count > TOKEN_THRESHOLD else ''}used)[/]"
459
+ )
460
+ messages = trim_by_tokens(messages, token_count)
461
+ if not tool_calls:
462
+ final_content = assistant_content
463
+ break
464
+ _process_tool_calls(tool_calls, messages, dispatch)
465
+ else:
466
+ console.print("[bold red][경고] 최대 반복 횟수(10) 초과[/]")
467
+ return final_content
468
+
469
+
470
+ class ConnectionInfo(NamedTuple):
471
+ """Ollama 서버 연결 정보. 서브에이전트에 직렬화하여 전달한다."""
472
+
473
+ host: str
474
+ cf_client_id: str
475
+ cf_client_secret: str
476
+
477
+
478
+ def _build_full_system_prompt(base: str, memory: SessionMemory) -> str:
479
+ """base prompt에 메모리 컨텍스트를 추가한 system prompt를 반환한다."""
480
+ ctx = memory.to_context_string()
481
+ return f"{base}\n\n{ctx}" if ctx else base
482
+
483
+
484
+ def _handle_memory_command(user: str, memory: SessionMemory) -> bool:
485
+ """'/memory' 명령이면 처리하고 True를 반환한다. 일반 입력이면 False."""
486
+ if not user.startswith("/memory"):
487
+ return False
488
+ parts = user.split(maxsplit=2)
489
+ sub = parts[1] if len(parts) > 1 else ""
490
+
491
+ if sub == "list":
492
+ entries = memory.all()
493
+ if not entries:
494
+ console.print("[dim]저장된 메모리가 없습니다.[/]")
495
+ for e in entries:
496
+ tag_str = f" [{', '.join(e.tags)}]" if e.tags else ""
497
+ console.print(f" [cyan]{e.id[:8]}[/] {e.content}{tag_str}")
498
+ elif sub == "add" and len(parts) > 2:
499
+ text = parts[2]
500
+ tags = re.findall(r"#(\w+)", text)
501
+ content = re.sub(r"#\w+", "", text).strip()
502
+ entry = memory.add(content, tags)
503
+ console.print(f"[green]Memory saved:[/] {entry.id[:8]} — {entry.content}")
504
+ elif sub == "search" and len(parts) > 2:
505
+ results = memory.search(parts[2])
506
+ if not results:
507
+ console.print("[dim]검색 결과 없음[/]")
508
+ for e in results:
509
+ tag_str = f" [{', '.join(e.tags)}]" if e.tags else ""
510
+ console.print(f" [cyan]{e.id[:8]}[/] {e.content}{tag_str}")
511
+ elif sub == "clear":
512
+ count = memory.clear()
513
+ console.print(f"[yellow]Memory cleared ({count} entries)[/]")
514
+ else:
515
+ console.print(
516
+ "[dim]Usage: /memory add <text> [#tag] | "
517
+ "/memory list | /memory search <q> | /memory clear[/]"
518
+ )
519
+ return True
520
+
521
+
522
+ def _handle_plan_command(
523
+ user: str,
524
+ client: Client,
525
+ model: str,
526
+ base_prompt: str,
527
+ ) -> bool:
528
+ """'/plan <task>' 명령이면 plan 모드로 실행하고 True를 반환한다."""
529
+ if not user.startswith("/plan "):
530
+ return False
531
+ task = user[len("/plan ") :].strip()
532
+ if not task:
533
+ console.print("[dim]Usage: /plan <task description>[/]")
534
+ return True
535
+ run_plan(task, client, model, base_prompt)
536
+ return True
537
+
538
+
539
+ def _parse_subagent_input(
540
+ raw: str,
541
+ default_model: str,
542
+ ) -> list[tuple[str, str]]:
543
+ """subagent 커맨드 텍스트를 파싱해 (task, model) 목록을 반환한다.
544
+
545
+ 지원 문법:
546
+ --model <name> task1 | task2 → 전체 동일 모델 지정
547
+ @model task1 | @model2 task2 → 태스크별 개별 모델 지정
548
+ task1 | task2 → default_model 사용
549
+ """
550
+ global_model = default_model
551
+ flag_match = re.match(r"--model\s+(\S+)\s*(.*)", raw, re.DOTALL)
552
+ if flag_match:
553
+ global_model = flag_match.group(1)
554
+ raw = flag_match.group(2).strip()
555
+
556
+ result: list[tuple[str, str]] = []
557
+ for text in (t.strip() for t in raw.split("|") if t.strip()):
558
+ per_match = re.match(r"@(\S+)\s+(.*)", text, re.DOTALL)
559
+ if per_match:
560
+ result.append((per_match.group(2).strip(), per_match.group(1)))
561
+ else:
562
+ result.append((text, global_model))
563
+ return result
564
+
565
+
566
+ def list_available_models(client: Client) -> set[str] | None:
567
+ """올라마 서버에서 사용 가능한 모델 목록을 반환한다. 서버 오류 시 None."""
568
+ try:
569
+ response = client.list()
570
+ return {m.model for m in response.models}
571
+ except Exception:
572
+ return None
573
+
574
+
575
+ def _is_model_available(model: str, available: set[str]) -> bool:
576
+ """태그 포함/생략 둘 다 허용하여 모델 존재 여부를 확인한다.
577
+
578
+ 'llama3' 입력 시 'llama3:latest' 도 검색한다.
579
+ """
580
+ return model in available or f"{model}:latest" in available
581
+
582
+
583
+ def _handle_subagent_command(
584
+ user: str,
585
+ conn: ConnectionInfo,
586
+ client: Client,
587
+ model: str,
588
+ base_prompt: str,
589
+ ) -> bool:
590
+ """'/subagent task1 | task2' 명령이면 병렬 실행하고 True를 반환한다."""
591
+ if not user.startswith("/subagent "):
592
+ return False
593
+ raw = user[len("/subagent ") :].strip()
594
+ parsed = _parse_subagent_input(raw, model)
595
+ if not parsed:
596
+ console.print(
597
+ "[dim]Usage: /subagent [--model <m>] <task1> | [@<m>] <task2> | ...[/]"
598
+ )
599
+ return True
600
+ available = list_available_models(client)
601
+ if available is not None:
602
+ invalid = [m for _, m in parsed if not _is_model_available(m, available)]
603
+ if invalid:
604
+ console.print(f"[red]알 수 없는 모델: {invalid}[/]")
605
+ console.print(f"[dim]사용 가능: {', '.join(sorted(available))}[/]")
606
+ return True
607
+ tasks = [
608
+ SubagentTask(
609
+ name=f"agent-{i + 1}",
610
+ task=task,
611
+ model=task_model,
612
+ host=conn.host,
613
+ cf_client_id=conn.cf_client_id,
614
+ cf_client_secret=conn.cf_client_secret,
615
+ system_prompt=base_prompt,
616
+ )
617
+ for i, (task, task_model) in enumerate(parsed)
618
+ ]
619
+ console.print(
620
+ Panel(f"[bold magenta]SUBAGENTS[/] — {len(tasks)}개 병렬 실행", style="magenta")
621
+ )
622
+ results = run_subagents(tasks)
623
+ for task_obj, (name, result) in zip(tasks, results):
624
+ console.print(
625
+ Panel(result, title=f"[magenta]{name}[/] [dim]{task_obj.model}[/]")
626
+ )
627
+ return True
628
+
629
+
630
+ def _auto_save_session(messages: list[dict]) -> None:
631
+ """대화 히스토리를 .agents/sessions/ 에 타임스탬프 파일명으로 저장한다."""
632
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
633
+ path = SESSION_DIR / f"{ts}.json"
634
+ save_session(messages, path)
635
+ console.print(f"[dim][Session] 저장됨: {path}[/]")
636
+
637
+
638
+ def main() -> None:
639
+ """대화형 agentic loop 진입점."""
640
+ agent_config = load_config()
641
+ console.print(
642
+ f"[dim][Config] model={agent_config.model} | "
643
+ f"mode={agent_config.permission_mode.value} | "
644
+ f"threshold={agent_config.token_threshold:,}[/]"
645
+ )
646
+ conn = ConnectionInfo(
647
+ host="https://ollama.nabee.ai.kr",
648
+ cf_client_id=os.getenv("CF_ACCESS_CLIENT_ID", ""),
649
+ cf_client_secret=os.getenv("CF_ACCESS_CLIENT_SECRET", ""),
650
+ )
651
+ client = Client(
652
+ host=conn.host,
653
+ headers={
654
+ "CF-Access-Client-Id": conn.cf_client_id,
655
+ "CF-Access-Client-Secret": conn.cf_client_secret,
656
+ },
657
+ )
658
+ perm_config = PermissionConfig(
659
+ mode=agent_config.permission_mode,
660
+ deny_patterns=agent_config.deny_patterns,
661
+ )
662
+ dispatch = build_dispatch(perm_config)
663
+ base_prompt = build_system_prompt(agent_config.agents_md_path)
664
+ memory = SessionMemory()
665
+ messages: list[dict] = [{"role": "system", "content": base_prompt}]
666
+
667
+ while True:
668
+ user = input("\nYou: ")
669
+ if user.lower() in ["exit", "quit"]:
670
+ _auto_save_session(messages)
671
+ break
672
+ if _handle_memory_command(user, memory):
673
+ messages[0]["content"] = _build_full_system_prompt(base_prompt, memory)
674
+ continue
675
+ if _handle_plan_command(user, client, agent_config.model, base_prompt):
676
+ continue
677
+ if _handle_subagent_command(
678
+ user, conn, client, agent_config.model, base_prompt
679
+ ):
680
+ continue
681
+ messages[0]["content"] = _build_full_system_prompt(base_prompt, memory)
682
+ messages.append({"role": "user", "content": user})
683
+ messages = trim_messages(messages)
684
+ console.print("\n[bold green]Agent:[/]")
685
+ run_agentic_loop(
686
+ messages,
687
+ client,
688
+ dispatch,
689
+ model=agent_config.model,
690
+ max_iterations=agent_config.max_iterations,
691
+ )
692
+
693
+
694
+ if __name__ == "__main__":
695
+ main()