lobesync 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. lobesync-0.1.0/LICENSE +21 -0
  2. lobesync-0.1.0/PKG-INFO +167 -0
  3. lobesync-0.1.0/README.md +152 -0
  4. lobesync-0.1.0/lobesync/__init__.py +0 -0
  5. lobesync-0.1.0/lobesync/agent/__init__.py +0 -0
  6. lobesync-0.1.0/lobesync/agent/graph.py +40 -0
  7. lobesync-0.1.0/lobesync/agent/nodes/__init__.py +0 -0
  8. lobesync-0.1.0/lobesync/agent/nodes/commitment.py +112 -0
  9. lobesync-0.1.0/lobesync/agent/nodes/completion.py +104 -0
  10. lobesync-0.1.0/lobesync/agent/nodes/executor.py +134 -0
  11. lobesync-0.1.0/lobesync/agent/nodes/planner.py +140 -0
  12. lobesync-0.1.0/lobesync/agent/state.py +14 -0
  13. lobesync-0.1.0/lobesync/agent/tools.py +208 -0
  14. lobesync-0.1.0/lobesync/cli/__init__.py +0 -0
  15. lobesync-0.1.0/lobesync/cli/commands.py +118 -0
  16. lobesync-0.1.0/lobesync/config.py +25 -0
  17. lobesync-0.1.0/lobesync/db/__init__.py +0 -0
  18. lobesync-0.1.0/lobesync/db/database.py +26 -0
  19. lobesync-0.1.0/lobesync/db/models.py +100 -0
  20. lobesync-0.1.0/lobesync/db/repos/__init__.py +0 -0
  21. lobesync-0.1.0/lobesync/db/repos/chat_repo.py +227 -0
  22. lobesync-0.1.0/lobesync/db/repos/checklist_repo.py +213 -0
  23. lobesync-0.1.0/lobesync/db/repos/memory_repo.py +191 -0
  24. lobesync-0.1.0/lobesync/db/repos/note_repo.py +122 -0
  25. lobesync-0.1.0/lobesync/db/repos/task_repo.py +217 -0
  26. lobesync-0.1.0/lobesync/exceptions/__init__.py +0 -0
  27. lobesync-0.1.0/lobesync/exceptions/chat_exceptions.py +6 -0
  28. lobesync-0.1.0/lobesync/exceptions/checklist_exceptions.py +10 -0
  29. lobesync-0.1.0/lobesync/exceptions/memory_exceptions.py +2 -0
  30. lobesync-0.1.0/lobesync/exceptions/note_exceptions.py +2 -0
  31. lobesync-0.1.0/lobesync/exceptions/task_exceptions.py +2 -0
  32. lobesync-0.1.0/lobesync/main.py +88 -0
  33. lobesync-0.1.0/lobesync/services/__init__.py +0 -0
  34. lobesync-0.1.0/lobesync/services/chat_service.py +169 -0
  35. lobesync-0.1.0/lobesync/services/checklist_service.py +205 -0
  36. lobesync-0.1.0/lobesync/services/memory_service.py +136 -0
  37. lobesync-0.1.0/lobesync/services/note_service.py +104 -0
  38. lobesync-0.1.0/lobesync/services/task_service.py +159 -0
  39. lobesync-0.1.0/lobesync/wizard.py +71 -0
  40. lobesync-0.1.0/lobesync.egg-info/PKG-INFO +167 -0
  41. lobesync-0.1.0/lobesync.egg-info/SOURCES.txt +45 -0
  42. lobesync-0.1.0/lobesync.egg-info/dependency_links.txt +1 -0
  43. lobesync-0.1.0/lobesync.egg-info/entry_points.txt +2 -0
  44. lobesync-0.1.0/lobesync.egg-info/requires.txt +5 -0
  45. lobesync-0.1.0/lobesync.egg-info/top_level.txt +1 -0
  46. lobesync-0.1.0/pyproject.toml +26 -0
  47. lobesync-0.1.0/setup.cfg +4 -0
lobesync-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Chetan Panchal
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,167 @@
1
+ Metadata-Version: 2.4
2
+ Name: lobesync
3
+ Version: 0.1.0
4
+ Summary: Personal AI assistant for tasks, notes, memories, and checklists
5
+ License: MIT
6
+ Requires-Python: >=3.12
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE
9
+ Requires-Dist: anthropic>=0.89.0
10
+ Requires-Dist: langgraph>=1.1.6
11
+ Requires-Dist: sqlmodel>=0.0.38
12
+ Requires-Dist: rich>=14.3.3
13
+ Requires-Dist: python-dotenv>=1.2.2
14
+ Dynamic: license-file
15
+
16
+ # Lobesync
17
+
18
+ A personal AI assistant for your terminal. Manage tasks, notes, memories, and checklists through natural conversation — powered by Claude and built on LangGraph.
19
+
20
+ ```
21
+ Lobesync — Personal AI Assistant
22
+ Type /help for commands · exit to quit
23
+
24
+ You: Create a checklist "Launch" and add tasks for writing tests and updating docs
25
+ Lobesync: Done! Created checklist "Launch" (ID: 1) with two tasks — "Write tests" (ID: 2)
26
+ and "Update docs" (ID: 3), both set to pending.
27
+
28
+ You: Mark write tests as in progress
29
+ Lobesync: Updated "Write tests" to in progress. ✓
30
+ ```
31
+
32
+ ## Features
33
+
34
+ - **Natural language interface** — just talk to it, no commands to memorize
35
+ - **Tasks** with deadlines, statuses, and checklist grouping
36
+ - **Checklists** with items and pending-task guards
37
+ - **Notes** for storing anything
38
+ - **Memories** — the agent proactively remembers personal facts about you across sessions
39
+ - **Session management** — multiple conversations, each with its own history and incremental summary
40
+ - **Streaming responses** with live Markdown rendering
41
+ - **Cost-efficient** — single LLM call per turn (planner + executor + completion)
42
+ - **Local SQLite** by default — your data stays on your machine
43
+
44
+ ## Architecture
45
+
46
+ Lobesync uses a [LangGraph](https://github.com/langchain-ai/langgraph) graph with four nodes:
47
+
48
+ ```
49
+ user input
50
+
51
+
52
+ ┌─────────┐ direct response (streamed) ┌────────────┐
53
+ │ Planner │ ──────────────────────────────▶ │ Commitment │ ──▶ END
54
+ └─────────┘ └────────────┘
55
+ │ tool calls needed ▲
56
+ ▼ │
57
+ ┌──────────┐ ┌────────────────┐
58
+ │ Executor │ ────────────────────────────▶ │ Completion │
59
+ └──────────┘ └────────────────┘
60
+ ```
61
+
62
+ - **Planner** — single LLM call (Haiku). Streams a direct response for chat, or calls `make_plan` for data operations. Receives user memories in system prompt (cached) + session summary + last 5 messages.
63
+ - **Executor** — runs the plan. Atomic groups use one session (all-or-nothing). Independent steps run in separate sessions.
64
+ - **Completion** — generates the final natural language response with tool results as context.
65
+ - **Commitment** — saves user message, assistant message, and tool calls to DB. Regenerates session summary every 5 messages.
66
+
67
+ ## Installation
68
+
69
+ ### Using pipx (recommended for CLI tools)
70
+
71
+ ```bash
72
+ pipx install lobesync
73
+ ```
74
+
75
+ ### Using pip
76
+
77
+ ```bash
78
+ pip install lobesync
79
+ ```
80
+
81
+ ### From source
82
+
83
+ ```bash
84
+ git clone https://github.com/panchalchetan618/lobesync
85
+ cd lobesync
86
+ pip install -e .
87
+ ```
88
+
89
+ ## Setup
90
+
91
+ Run `lobesync` for the first time and the setup wizard will guide you through:
92
+
93
+ 1. Entering your [Anthropic API key](https://console.anthropic.com/)
94
+ 2. Choosing a local SQLite database (recommended) or providing your own database URL
95
+
96
+ Config is saved to `~/.lobesync/config.json`. Local database is stored at `~/.lobesync/lobesync.db`.
97
+
98
+ ## Usage
99
+
100
+ ```bash
101
+ lobesync
102
+ ```
103
+
104
+ ### CLI Commands
105
+
106
+ | Command | Description |
107
+ |---|---|
108
+ | `/sessions` | List all chat sessions |
109
+ | `/session new` | Start a new session |
110
+ | `/session new <name>` | Start a new named session |
111
+ | `/session <id>` | Switch to an existing session |
112
+ | `/help` | Show all commands |
113
+ | `exit` | Quit |
114
+
115
+ ### Example interactions
116
+
117
+ ```
118
+ You: Add a task to review PRs by Friday
119
+ You: What are my pending tasks?
120
+ You: Create a note about the deployment process
121
+ You: I prefer concise responses
122
+ You: Mark the PR review task as done
123
+ You: Start a new checklist for the Q2 release
124
+ ```
125
+
126
+ The agent remembers personal facts across sessions — tell it your preferences, goals, or anything relevant and it will factor them in automatically.
127
+
128
+ ## Configuration
129
+
130
+ | Variable | Description |
131
+ |---|---|
132
+ | `ANTHROPIC_API_KEY` | Your Anthropic API key |
133
+ | `DATABASE_URL` | SQLAlchemy database URL (default: local SQLite) |
134
+
135
+ Set via the setup wizard or in `~/.lobesync/config.json`:
136
+
137
+ ```json
138
+ {
139
+ "ANTHROPIC_API_KEY": "sk-ant-...",
140
+ "DATABASE_URL": "sqlite:////home/you/.lobesync/lobesync.db"
141
+ }
142
+ ```
143
+
144
+ You can also use a `.env` file in the working directory or environment variables as fallback.
145
+
146
+ ## Models used
147
+
148
+ | Node | Model | Reason |
149
+ |---|---|---|
150
+ | Planner | `claude-haiku-4-5` | Fast, cheap, handles planning and direct chat |
151
+ | Completion | `claude-haiku-4-5` | Generates final response from tool results |
152
+ | Summarizer | `claude-haiku-4-5` | Compresses old conversation history |
153
+
154
+ ## Tech stack
155
+
156
+ - [Anthropic Python SDK](https://github.com/anthropics/anthropic-sdk-python)
157
+ - [LangGraph](https://github.com/langchain-ai/langgraph)
158
+ - [SQLModel](https://sqlmodel.tiangolo.com/)
159
+ - [Rich](https://github.com/Textualize/rich)
160
+
161
+ ## Contributing
162
+
163
+ Pull requests are welcome. For major changes, open an issue first.
164
+
165
+ ## License
166
+
167
+ MIT
@@ -0,0 +1,152 @@
1
+ # Lobesync
2
+
3
+ A personal AI assistant for your terminal. Manage tasks, notes, memories, and checklists through natural conversation — powered by Claude and built on LangGraph.
4
+
5
+ ```
6
+ Lobesync — Personal AI Assistant
7
+ Type /help for commands · exit to quit
8
+
9
+ You: Create a checklist "Launch" and add tasks for writing tests and updating docs
10
+ Lobesync: Done! Created checklist "Launch" (ID: 1) with two tasks — "Write tests" (ID: 2)
11
+ and "Update docs" (ID: 3), both set to pending.
12
+
13
+ You: Mark write tests as in progress
14
+ Lobesync: Updated "Write tests" to in progress. ✓
15
+ ```
16
+
17
+ ## Features
18
+
19
+ - **Natural language interface** — just talk to it, no commands to memorize
20
+ - **Tasks** with deadlines, statuses, and checklist grouping
21
+ - **Checklists** with items and pending-task guards
22
+ - **Notes** for storing anything
23
+ - **Memories** — the agent proactively remembers personal facts about you across sessions
24
+ - **Session management** — multiple conversations, each with its own history and incremental summary
25
+ - **Streaming responses** with live Markdown rendering
26
+ - **Cost-efficient** — single LLM call per turn (planner + executor + completion)
27
+ - **Local SQLite** by default — your data stays on your machine
28
+
29
+ ## Architecture
30
+
31
+ Lobesync uses a [LangGraph](https://github.com/langchain-ai/langgraph) graph with four nodes:
32
+
33
+ ```
34
+ user input
35
+
36
+
37
+ ┌─────────┐ direct response (streamed) ┌────────────┐
38
+ │ Planner │ ──────────────────────────────▶ │ Commitment │ ──▶ END
39
+ └─────────┘ └────────────┘
40
+ │ tool calls needed ▲
41
+ ▼ │
42
+ ┌──────────┐ ┌────────────────┐
43
+ │ Executor │ ────────────────────────────▶ │ Completion │
44
+ └──────────┘ └────────────────┘
45
+ ```
46
+
47
+ - **Planner** — single LLM call (Haiku). Streams a direct response for chat, or calls `make_plan` for data operations. Receives user memories in system prompt (cached) + session summary + last 5 messages.
48
+ - **Executor** — runs the plan. Atomic groups use one session (all-or-nothing). Independent steps run in separate sessions.
49
+ - **Completion** — generates the final natural language response with tool results as context.
50
+ - **Commitment** — saves user message, assistant message, and tool calls to DB. Regenerates session summary every 5 messages.
51
+
52
+ ## Installation
53
+
54
+ ### Using pipx (recommended for CLI tools)
55
+
56
+ ```bash
57
+ pipx install lobesync
58
+ ```
59
+
60
+ ### Using pip
61
+
62
+ ```bash
63
+ pip install lobesync
64
+ ```
65
+
66
+ ### From source
67
+
68
+ ```bash
69
+ git clone https://github.com/panchalchetan618/lobesync
70
+ cd lobesync
71
+ pip install -e .
72
+ ```
73
+
74
+ ## Setup
75
+
76
+ Run `lobesync` for the first time and the setup wizard will guide you through:
77
+
78
+ 1. Entering your [Anthropic API key](https://console.anthropic.com/)
79
+ 2. Choosing a local SQLite database (recommended) or providing your own database URL
80
+
81
+ Config is saved to `~/.lobesync/config.json`. Local database is stored at `~/.lobesync/lobesync.db`.
82
+
83
+ ## Usage
84
+
85
+ ```bash
86
+ lobesync
87
+ ```
88
+
89
+ ### CLI Commands
90
+
91
+ | Command | Description |
92
+ |---|---|
93
+ | `/sessions` | List all chat sessions |
94
+ | `/session new` | Start a new session |
95
+ | `/session new <name>` | Start a new named session |
96
+ | `/session <id>` | Switch to an existing session |
97
+ | `/help` | Show all commands |
98
+ | `exit` | Quit |
99
+
100
+ ### Example interactions
101
+
102
+ ```
103
+ You: Add a task to review PRs by Friday
104
+ You: What are my pending tasks?
105
+ You: Create a note about the deployment process
106
+ You: I prefer concise responses
107
+ You: Mark the PR review task as done
108
+ You: Start a new checklist for the Q2 release
109
+ ```
110
+
111
+ The agent remembers personal facts across sessions — tell it your preferences, goals, or anything relevant and it will factor them in automatically.
112
+
113
+ ## Configuration
114
+
115
+ | Variable | Description |
116
+ |---|---|
117
+ | `ANTHROPIC_API_KEY` | Your Anthropic API key |
118
+ | `DATABASE_URL` | SQLAlchemy database URL (default: local SQLite) |
119
+
120
+ Set via the setup wizard or in `~/.lobesync/config.json`:
121
+
122
+ ```json
123
+ {
124
+ "ANTHROPIC_API_KEY": "sk-ant-...",
125
+ "DATABASE_URL": "sqlite:////home/you/.lobesync/lobesync.db"
126
+ }
127
+ ```
128
+
129
+ You can also use a `.env` file in the working directory or environment variables as fallback.
130
+
131
+ ## Models used
132
+
133
+ | Node | Model | Reason |
134
+ |---|---|---|
135
+ | Planner | `claude-haiku-4-5` | Fast, cheap, handles planning and direct chat |
136
+ | Completion | `claude-haiku-4-5` | Generates final response from tool results |
137
+ | Summarizer | `claude-haiku-4-5` | Compresses old conversation history |
138
+
139
+ ## Tech stack
140
+
141
+ - [Anthropic Python SDK](https://github.com/anthropics/anthropic-sdk-python)
142
+ - [LangGraph](https://github.com/langchain-ai/langgraph)
143
+ - [SQLModel](https://sqlmodel.tiangolo.com/)
144
+ - [Rich](https://github.com/Textualize/rich)
145
+
146
+ ## Contributing
147
+
148
+ Pull requests are welcome. For major changes, open an issue first.
149
+
150
+ ## License
151
+
152
+ MIT
File without changes
File without changes
@@ -0,0 +1,40 @@
1
+ from langgraph.graph import StateGraph, START, END
2
+
3
+ from lobesync.agent.state import AgentState
4
+ from lobesync.agent.nodes.planner import planner_node
5
+ from lobesync.agent.nodes.executor import executor_node
6
+ from lobesync.agent.nodes.completion import completion_node
7
+ from lobesync.agent.nodes.commitment import commitment_node
8
+
9
+
10
+ def _route_after_planner(state: AgentState) -> str:
11
+ if state.get("final_response"):
12
+ return "commitment"
13
+ plan = state.get("plan") or {}
14
+ has_work = bool(plan.get("atomic_groups")) or bool(plan.get("non_atomic"))
15
+ return "executor" if has_work else "completion"
16
+
17
+
18
+ def build_graph():
19
+ graph = StateGraph(AgentState)
20
+
21
+ graph.add_node("planner", planner_node)
22
+ graph.add_node("executor", executor_node)
23
+ graph.add_node("completion", completion_node)
24
+ graph.add_node("commitment", commitment_node)
25
+
26
+ graph.add_edge(START, "planner")
27
+ graph.add_conditional_edges(
28
+ "planner",
29
+ _route_after_planner,
30
+ {
31
+ "executor": "executor",
32
+ "completion": "completion",
33
+ "commitment": "commitment",
34
+ },
35
+ )
36
+ graph.add_edge("executor", "completion")
37
+ graph.add_edge("completion", "commitment")
38
+ graph.add_edge("commitment", END)
39
+
40
+ return graph.compile()
File without changes
@@ -0,0 +1,112 @@
1
+ import anthropic
2
+ import json
3
+ import logging
4
+ from datetime import datetime
5
+ from sqlmodel import Session
6
+
7
+ from lobesync.config import config
8
+ from lobesync.db.database import engine
9
+ from lobesync.db.models import MessageRole
10
+ from lobesync.db.repos.chat_repo import (
11
+ create_message,
12
+ create_tool_call,
13
+ get_messages_by_session,
14
+ update_chat_session_summary,
15
+ get_chat_session_by_id,
16
+ )
17
+ from lobesync.agent.state import AgentState
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ _SUMMARY_EVERY = 5
22
+ _KEEP_LAST = 5
23
+
24
+
25
+ def _generate_summary(existing_summary: str | None, messages_to_compress: list) -> str:
26
+ formatted = "\n".join([
27
+ f"{msg.role.value.upper()}: {msg.content}"
28
+ for msg in messages_to_compress
29
+ ])
30
+
31
+ if existing_summary:
32
+ prompt = f"Previous summary:\n{existing_summary}\n\nNew exchanges to incorporate:\n{formatted}\n\nUpdate the summary concisely."
33
+ else:
34
+ prompt = f"Summarize this conversation concisely:\n{formatted}"
35
+
36
+ client = anthropic.Anthropic(api_key=config.ANTHROPIC_API_KEY)
37
+ response = client.messages.create(
38
+ model="claude-haiku-4-5-20251001",
39
+ max_tokens=512,
40
+ messages=[{"role": "user", "content": prompt}],
41
+ )
42
+ return response.content[0].text
43
+
44
+
45
+ def _update_session_name(session: Session, chat_session_id: int, first_user_message: str):
46
+ from lobesync.db.models import ChatSession
47
+ chat_session = session.get(ChatSession, chat_session_id)
48
+ if chat_session and chat_session.name == "Lobesync":
49
+ chat_session.name = first_user_message[:40].strip()
50
+ session.add(chat_session)
51
+
52
+
53
+ def commitment_node(state: AgentState) -> dict:
54
+ """
55
+ Persists the conversation turn, tool calls, and manages incremental summary.
56
+ Always runs last — owns all DB writes for the turn.
57
+ """
58
+ chat_session_id = state["chat_session_id"]
59
+ user_query = state["user_query"]
60
+ final_response = state["final_response"]
61
+ input_tokens = state.get("input_tokens", 0)
62
+ output_tokens = state.get("output_tokens", 0)
63
+ model_name = state.get("model_name")
64
+ execution_results = state.get("execution_results") or []
65
+
66
+ with Session(engine) as session:
67
+ create_message(session, chat_session_id, user_query, MessageRole.USER)
68
+ assistant_msg = create_message(
69
+ session,
70
+ chat_session_id,
71
+ final_response,
72
+ MessageRole.AGENT,
73
+ input_tokens=input_tokens,
74
+ output_tokens=output_tokens,
75
+ model_name=model_name,
76
+ )
77
+ session.flush()
78
+
79
+ # Save tool calls linked to the assistant message
80
+ if execution_results and assistant_msg:
81
+ def _safe_json(obj) -> str:
82
+ return json.dumps(obj, default=lambda o: o.isoformat() if isinstance(o, datetime) else str(o))
83
+
84
+ for r in execution_results:
85
+ payload = _safe_json(r.get("args", {}))
86
+ response = _safe_json(r["result"]) if r.get("result") is not None else (r.get("error") or "")
87
+ create_tool_call(
88
+ session,
89
+ message_id=assistant_msg.id,
90
+ tool_name=r["tool"],
91
+ payload=payload,
92
+ response=response,
93
+ )
94
+
95
+ all_messages = get_messages_by_session(session, chat_session_id) or []
96
+ total = len(all_messages)
97
+
98
+ if total <= 2:
99
+ _update_session_name(session, chat_session_id, user_query)
100
+
101
+ if total > _KEEP_LAST and total % _SUMMARY_EVERY == 0:
102
+ messages_to_compress = all_messages[:-_KEEP_LAST]
103
+ chat_session = get_chat_session_by_id(session, chat_session_id)
104
+ existing_summary = chat_session.summary if chat_session else None
105
+ logger.info(f"Regenerating summary ({len(messages_to_compress)} messages to compress)")
106
+ new_summary = _generate_summary(existing_summary, messages_to_compress)
107
+ update_chat_session_summary(session, chat_session_id, new_summary)
108
+
109
+ session.commit()
110
+
111
+ logger.info(f"Commitment: saved turn for session {chat_session_id} ({total} total messages)")
112
+ return {}
@@ -0,0 +1,104 @@
1
+ import anthropic
2
+ import logging
3
+ from sqlmodel import Session
4
+ from rich.console import Console
5
+ from rich.markdown import Markdown
6
+ from rich.live import Live
7
+
8
+ from lobesync.config import config
9
+ from lobesync.db.database import engine
10
+ from lobesync.db.models import MessageRole
11
+ from lobesync.db.repos.chat_repo import get_messages_by_session
12
+ from lobesync.agent.state import AgentState
13
+
14
+ console = Console()
15
+
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+ COMPLETION_SYSTEM_PROMPT = """You are Lobesync, a personal AI assistant. You help the user manage tasks, notes, memories, and checklists.
20
+
21
+ When tool results are provided in the user message, summarize what was done naturally and concisely.
22
+ Always mention the ID of any created or retrieved item (e.g. "Task created with ID 3") so the user can reference it later.
23
+ If an operation failed, acknowledge it clearly and suggest what the user can do.
24
+ If no tools were called, just respond naturally as a helpful assistant."""
25
+
26
+ _COMPLETION_SYSTEM = [
27
+ {
28
+ "type": "text",
29
+ "text": COMPLETION_SYSTEM_PROMPT,
30
+ "cache_control": {"type": "ephemeral"},
31
+ }
32
+ ]
33
+
34
+ _COMPLETION_MODEL = "claude-haiku-4-5-20251001"
35
+
36
+
37
+ def _build_history(messages) -> list[dict]:
38
+ history = []
39
+ for msg in messages:
40
+ role = "user" if msg.role == MessageRole.USER else "assistant"
41
+ history.append({"role": role, "content": msg.content})
42
+ return history
43
+
44
+
45
+ def _format_results(execution_results: list[dict]) -> str:
46
+ if not execution_results:
47
+ return ""
48
+ lines = []
49
+ for r in execution_results:
50
+ if r["error"]:
51
+ lines.append(f"- {r['tool']}: FAILED — {r['error']}")
52
+ else:
53
+ lines.append(f"- {r['tool']}: SUCCESS — {r['result']}")
54
+ return "\n".join(lines)
55
+
56
+
57
+ def completion_node(state: AgentState) -> dict:
58
+ """
59
+ Generates the final natural language response.
60
+ Loads conversation history, injects tool results, calls Claude, saves messages to DB.
61
+ """
62
+ chat_session_id = state["chat_session_id"]
63
+ user_query = state["user_query"]
64
+ execution_results = state.get("execution_results") or []
65
+
66
+ with Session(engine) as session:
67
+ prior_messages = get_messages_by_session(session, chat_session_id) or []
68
+ history = _build_history(prior_messages)
69
+
70
+ user_content = user_query
71
+ results_text = _format_results(execution_results)
72
+ if results_text:
73
+ user_content = f"{user_query}\n\n[Tool results:\n{results_text}]"
74
+
75
+ history.append({"role": "user", "content": user_content})
76
+
77
+ client = anthropic.Anthropic(api_key=config.ANTHROPIC_API_KEY)
78
+
79
+ accumulated = ""
80
+ console.print("\n[bold blue]Lobesync:[/bold blue]")
81
+ with client.messages.stream(
82
+ model=_COMPLETION_MODEL,
83
+ max_tokens=1024,
84
+ system=_COMPLETION_SYSTEM,
85
+ messages=history,
86
+ ) as stream:
87
+ with Live(Markdown(accumulated), console=console, refresh_per_second=15) as live:
88
+ for text in stream.text_stream:
89
+ accumulated += text
90
+ live.update(Markdown(accumulated))
91
+ final_message = stream.get_final_message()
92
+ console.print()
93
+
94
+ final_response = final_message.content[0].text
95
+ input_tokens = final_message.usage.input_tokens
96
+ output_tokens = final_message.usage.output_tokens
97
+
98
+ logger.info(f"Completion: {input_tokens} in / {output_tokens} out tokens")
99
+ return {
100
+ "final_response": final_response,
101
+ "input_tokens": input_tokens,
102
+ "output_tokens": output_tokens,
103
+ "model_name": _COMPLETION_MODEL,
104
+ }