pylumbergh 0.0.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pylumbergh-0.0.2/.gitignore +50 -0
- pylumbergh-0.0.2/PKG-INFO +12 -0
- pylumbergh-0.0.2/assets/tmux.conf +43 -0
- pylumbergh-0.0.2/lumbergh/__init__.py +1 -0
- pylumbergh-0.0.2/lumbergh/__main__.py +5 -0
- pylumbergh-0.0.2/lumbergh/_version.py +1 -0
- pylumbergh-0.0.2/lumbergh/ai/__init__.py +14 -0
- pylumbergh-0.0.2/lumbergh/ai/prompts.py +191 -0
- pylumbergh-0.0.2/lumbergh/ai/providers.py +246 -0
- pylumbergh-0.0.2/lumbergh/cli.py +23 -0
- pylumbergh-0.0.2/lumbergh/constants.py +51 -0
- pylumbergh-0.0.2/lumbergh/db_utils.py +127 -0
- pylumbergh-0.0.2/lumbergh/diff_cache.py +107 -0
- pylumbergh-0.0.2/lumbergh/file_utils.py +107 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/app-icon.png +0 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/apple-touch-icon.png +0 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/_baseUniq-DccEzysV.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/arc-CFpv56S-.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/architectureDiagram-VXUJARFQ-iaS1TKzN.js +36 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/blockDiagram-VD42YOAC-ovvy1uju.js +122 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/c4Diagram-YG6GDRKO-DF-lI6Fi.js +10 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/channel-CD8Lr1D6.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/chunk-4BX2VUAB-DB5nCt7T.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/chunk-55IACEB6-DhrX1lEp.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/chunk-B4BG7PRW-DUa196Lw.js +165 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/chunk-DI55MBZ5-eBvMJEXy.js +220 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/chunk-FMBD7UC4-ByY0mTx_.js +15 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/chunk-QN33PNHL-BjmUWn42.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/chunk-QZHKN3VN-BUAJy3wE.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/chunk-TZMSLE5B-BbK-mqPy.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/classDiagram-2ON5EDUG-CyzSk4sp.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/classDiagram-v2-WZHVMYZB-CyzSk4sp.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/clone-9AAkVju9.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/cose-bilkent-S5V4N54A-D6epH-Yi.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/cytoscape.esm-5J0xJHOV.js +321 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/dagre-6UL2VRFP-BN8Hk-b6.js +4 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/defaultLocale-DX6XiGOO.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/diagram-PSM6KHXK-BdC5eCHB.js +24 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/diagram-QEK2KX5R-Bkc7XYXY.js +43 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/diagram-S2PKOQOG-KYVv8Nrg.js +24 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/erDiagram-Q2GNP2WA-P84CXrqh.js +60 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/flowDiagram-NV44I4VS-BYUL6kkL.js +162 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/ganttDiagram-JELNMOA3-DeE0h-NA.js +267 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/gitGraphDiagram-NY62KEGX-C6vwFnNw.js +65 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/github-CDab0zVI.css +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/github-dark-Dfs9RUU9.css +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/graph-BAHj05eE.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/index-DD91eJff.js +394 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/index-sFgxihlM.css +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/infoDiagram-WHAUD3N6-Bg800ZVy.js +2 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/init-Gi6I4Gst.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/journeyDiagram-XKPGCS4Q-C89yoAf_.js +139 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/kanban-definition-3W4ZIXB7-DRet19Jc.js +89 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/katex-DhXJpUyf.js +261 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/layout-DZizgGpj.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/linear-C2r4unaV.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/min-B02sjuqh.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/mindmap-definition-VGOIOE7T-4NbTK5Lr.js +68 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/ordinal-Cboi1Yqb.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/pieDiagram-ADFJNKIX-BJmGl4Gn.js +30 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/quadrantDiagram-AYHSOK5B-DJj52z-4.js +7 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/requirementDiagram-UZGBJVZJ-D_H0nuNn.js +64 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/sankeyDiagram-TZEHDZUN-DD_ai1gM.js +10 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/sequenceDiagram-WL72ISMW-A_MTCfZ8.js +145 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/stateDiagram-FKZM4ZOC--3F9vsWe.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/stateDiagram-v2-4FDKWEC3-DV3FtIzY.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/timeline-definition-IT6M3QCI-DEbUHKM6.js +61 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/treemap-KMMF4GRG-DaZ2lXLT.js +128 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/assets/xychartDiagram-PRI3JC2R-BSETIoBb.js +7 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/favicon.png +0 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/favicon.svg +4 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/icon-192.png +0 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/icon-512.png +0 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/index.html +24 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/manifest.webmanifest +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/pwa-192x192.png +0 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/pwa-512x512.png +0 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/registerSW.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/sw.js +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/vite.svg +1 -0
- pylumbergh-0.0.2/lumbergh/frontend_dist/workbox-e1276126.js +1 -0
- pylumbergh-0.0.2/lumbergh/git_utils.py +1694 -0
- pylumbergh-0.0.2/lumbergh/idle_detector.py +228 -0
- pylumbergh-0.0.2/lumbergh/idle_monitor.py +165 -0
- pylumbergh-0.0.2/lumbergh/main.py +403 -0
- pylumbergh-0.0.2/lumbergh/message_buffer.py +58 -0
- pylumbergh-0.0.2/lumbergh/models.py +140 -0
- pylumbergh-0.0.2/lumbergh/routers/__init__.py +1 -0
- pylumbergh-0.0.2/lumbergh/routers/ai.py +226 -0
- pylumbergh-0.0.2/lumbergh/routers/notes.py +45 -0
- pylumbergh-0.0.2/lumbergh/routers/sessions.py +1373 -0
- pylumbergh-0.0.2/lumbergh/routers/settings.py +142 -0
- pylumbergh-0.0.2/lumbergh/routers/shared.py +233 -0
- pylumbergh-0.0.2/lumbergh/routers/tmux.py +88 -0
- pylumbergh-0.0.2/lumbergh/session_manager.py +274 -0
- pylumbergh-0.0.2/lumbergh/tests/__init__.py +1 -0
- pylumbergh-0.0.2/lumbergh/tests/conftest.py +75 -0
- pylumbergh-0.0.2/lumbergh/tests/test_db_utils.py +106 -0
- pylumbergh-0.0.2/lumbergh/tests/test_endpoints.py +85 -0
- pylumbergh-0.0.2/lumbergh/tests/test_git_utils.py +178 -0
- pylumbergh-0.0.2/lumbergh/tmux_pty.py +283 -0
- pylumbergh-0.0.2/pyproject.toml +76 -0
- pylumbergh-0.0.2/start.sh +3 -0
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
.venv/
|
|
6
|
+
venv/
|
|
7
|
+
.uv/
|
|
8
|
+
*.egg-info/
|
|
9
|
+
.eggs/
|
|
10
|
+
|
|
11
|
+
# Node
|
|
12
|
+
node_modules/
|
|
13
|
+
dist/
|
|
14
|
+
dev-dist/
|
|
15
|
+
.npm
|
|
16
|
+
|
|
17
|
+
# IDE
|
|
18
|
+
.idea/
|
|
19
|
+
.vscode/
|
|
20
|
+
*.swp
|
|
21
|
+
*.swo
|
|
22
|
+
*~
|
|
23
|
+
|
|
24
|
+
# OS
|
|
25
|
+
.DS_Store
|
|
26
|
+
Thumbs.db
|
|
27
|
+
|
|
28
|
+
# Logs
|
|
29
|
+
*.log
|
|
30
|
+
npm-debug.log*
|
|
31
|
+
|
|
32
|
+
# Environment
|
|
33
|
+
.env
|
|
34
|
+
.env.local
|
|
35
|
+
.env.*.local
|
|
36
|
+
|
|
37
|
+
# Build outputs
|
|
38
|
+
build/
|
|
39
|
+
*.tsbuildinfo
|
|
40
|
+
|
|
41
|
+
# uv
|
|
42
|
+
uv.lock
|
|
43
|
+
|
|
44
|
+
# Playwright
|
|
45
|
+
.playwright/
|
|
46
|
+
.playwright-cli/
|
|
47
|
+
|
|
48
|
+
# Lumbergh package build artifacts
|
|
49
|
+
backend/lumbergh/frontend_dist/
|
|
50
|
+
!backend/lumbergh/frontend_dist/.gitkeep
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pylumbergh
|
|
3
|
+
Version: 0.0.2
|
|
4
|
+
Summary: Tmux session supervisor backend
|
|
5
|
+
Requires-Python: >=3.11
|
|
6
|
+
Requires-Dist: fastapi
|
|
7
|
+
Requires-Dist: gitpython>=3.1
|
|
8
|
+
Requires-Dist: httpx>=0.27
|
|
9
|
+
Requires-Dist: libtmux
|
|
10
|
+
Requires-Dist: python-multipart
|
|
11
|
+
Requires-Dist: tinydb
|
|
12
|
+
Requires-Dist: uvicorn[standard]
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# Change prefix from Ctrl-b to Ctrl-a
|
|
2
|
+
unbind C-b
|
|
3
|
+
set-option -g prefix C-a
|
|
4
|
+
bind-key C-a send-prefix
|
|
5
|
+
|
|
6
|
+
# Start windows and panes at 1, not 0
|
|
7
|
+
set -g base-index 1
|
|
8
|
+
setw -g pane-base-index 1
|
|
9
|
+
|
|
10
|
+
# Reload config with Ctrl-a r
|
|
11
|
+
bind r source-file ~/.tmux.conf \; display "Reloaded!"
|
|
12
|
+
|
|
13
|
+
# Status bar at top
|
|
14
|
+
set -g status-position top
|
|
15
|
+
|
|
16
|
+
# Minimal status bar styling
|
|
17
|
+
set -g status-style "bg=#282828,fg=#928374"
|
|
18
|
+
set -g status-left "#[fg=#a89984,bold] #S "
|
|
19
|
+
set -g status-left-length 20
|
|
20
|
+
set -g status-right ""
|
|
21
|
+
set -g window-status-format " #I:#W "
|
|
22
|
+
set -g window-status-current-format "#[fg=#ebdbb2,bg=#504945,bold] #I:#W "
|
|
23
|
+
set -g window-status-separator ""
|
|
24
|
+
|
|
25
|
+
# Rename window with F2
|
|
26
|
+
bind F2 command-prompt -I "#W" "rename-window '%%'"
|
|
27
|
+
|
|
28
|
+
# Move windows left/right with Ctrl-Shift-Left/Right
|
|
29
|
+
bind -n C-S-Left swap-window -t -1\; select-window -t -1
|
|
30
|
+
bind -n C-S-Right swap-window -t +1\; select-window -t +1
|
|
31
|
+
|
|
32
|
+
# Auto renumber windows when one is closed
|
|
33
|
+
set -g renumber-windows on
|
|
34
|
+
|
|
35
|
+
# Mobile/xterm.js scroll support
|
|
36
|
+
set -g mouse on
|
|
37
|
+
|
|
38
|
+
# Prevent copy-mode from exiting on mouse/touch release
|
|
39
|
+
unbind -T copy-mode MouseDragEnd1Pane
|
|
40
|
+
unbind -T copy-mode-vi MouseDragEnd1Pane
|
|
41
|
+
|
|
42
|
+
# Allow scrolling in alternate screen apps (like Claude Code)
|
|
43
|
+
set -ga terminal-overrides ',xterm*:smcup@:rmcup@'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Lumbergh - AI Session Supervisor."""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.0.2"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AI module for Lumbergh.
|
|
3
|
+
|
|
4
|
+
Provides provider-agnostic AI completions with support for:
|
|
5
|
+
- Ollama (local)
|
|
6
|
+
- OpenAI
|
|
7
|
+
- Anthropic
|
|
8
|
+
- OpenAI-compatible endpoints
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from lumbergh.ai.prompts import DEFAULT_COMMIT_MESSAGE_PROMPT, get_ai_prompt
|
|
12
|
+
from lumbergh.ai.providers import AIProvider, get_provider
|
|
13
|
+
|
|
14
|
+
__all__ = ["DEFAULT_COMMIT_MESSAGE_PROMPT", "AIProvider", "get_ai_prompt", "get_provider"]
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AI prompt template management.
|
|
3
|
+
|
|
4
|
+
Stores and retrieves AI system prompts with support for:
|
|
5
|
+
- Global default prompts
|
|
6
|
+
- Per-project overrides
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
from lumbergh.db_utils import (
|
|
12
|
+
get_global_db,
|
|
13
|
+
get_project_db,
|
|
14
|
+
get_single_document_items,
|
|
15
|
+
save_single_document_items,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
# Default prompt for commit message generation
|
|
19
|
+
DEFAULT_COMMIT_MESSAGE_PROMPT = """Generate a git commit message following the Conventional Commits specification.
|
|
20
|
+
|
|
21
|
+
FORMAT:
|
|
22
|
+
<type>(<scope>): <description>
|
|
23
|
+
|
|
24
|
+
[optional body]
|
|
25
|
+
|
|
26
|
+
TYPES (pick one):
|
|
27
|
+
- feat: new feature or capability
|
|
28
|
+
- fix: bug fix
|
|
29
|
+
- refactor: code change that neither fixes a bug nor adds a feature
|
|
30
|
+
- docs: documentation only
|
|
31
|
+
- test: adding or updating tests
|
|
32
|
+
- chore: maintenance tasks (deps, config, build)
|
|
33
|
+
- style: formatting, whitespace (no logic change)
|
|
34
|
+
- perf: performance improvement
|
|
35
|
+
|
|
36
|
+
RULES:
|
|
37
|
+
- Subject line MUST be under 50 characters (hard limit)
|
|
38
|
+
- Use imperative mood: "add" not "added" or "adds"
|
|
39
|
+
- Scope is optional but helpful (e.g., api, ui, auth)
|
|
40
|
+
- No period at end of subject line
|
|
41
|
+
- Body is optional; use for complex changes to explain WHY
|
|
42
|
+
|
|
43
|
+
EXAMPLES:
|
|
44
|
+
- feat(ai): add commit message generation
|
|
45
|
+
- fix: prevent crash on empty diff
|
|
46
|
+
- refactor(providers): extract base class for AI providers
|
|
47
|
+
- chore: update dependencies
|
|
48
|
+
|
|
49
|
+
{{#if user_messages}}
|
|
50
|
+
User instructions that led to these changes:
|
|
51
|
+
{{user_messages}}
|
|
52
|
+
{{/if}}
|
|
53
|
+
|
|
54
|
+
{{#if file_summary}}
|
|
55
|
+
Files changed:
|
|
56
|
+
{{file_summary}}
|
|
57
|
+
{{/if}}
|
|
58
|
+
|
|
59
|
+
Diff:
|
|
60
|
+
```
|
|
61
|
+
{{git_diff}}
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
Respond with ONLY the commit message. No markdown, no explanation."""
|
|
65
|
+
|
|
66
|
+
# Prompt for summarizing a todo into a short status
|
|
67
|
+
STATUS_SUMMARY_PROMPT = """Summarize this task in 2-3 words maximum.
|
|
68
|
+
Examples: "fixing auth", "adding tests", "refactoring API"
|
|
69
|
+
Task: {text}"""
|
|
70
|
+
|
|
71
|
+
# Table name for AI prompts
|
|
72
|
+
AI_PROMPTS_TABLE = "ai_prompts"
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def get_default_ai_prompts() -> list[dict]:
|
|
76
|
+
"""Return the built-in default AI prompts."""
|
|
77
|
+
return [
|
|
78
|
+
{
|
|
79
|
+
"id": "commit_message",
|
|
80
|
+
"task": "commit_message",
|
|
81
|
+
"name": "Default Commit Message",
|
|
82
|
+
"template": DEFAULT_COMMIT_MESSAGE_PROMPT,
|
|
83
|
+
"isDefault": True,
|
|
84
|
+
}
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def get_global_ai_prompts() -> list[dict]:
|
|
89
|
+
"""Get AI prompts from global config, with defaults merged in."""
|
|
90
|
+
db = get_global_db()
|
|
91
|
+
table = db.table(AI_PROMPTS_TABLE)
|
|
92
|
+
stored = get_single_document_items(table)
|
|
93
|
+
|
|
94
|
+
# Merge with defaults - stored prompts override defaults by task
|
|
95
|
+
defaults = get_default_ai_prompts()
|
|
96
|
+
stored_tasks = {p["task"] for p in stored}
|
|
97
|
+
|
|
98
|
+
# Add defaults for any tasks not in stored
|
|
99
|
+
result = list(stored)
|
|
100
|
+
result.extend(default for default in defaults if default["task"] not in stored_tasks)
|
|
101
|
+
|
|
102
|
+
return result
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def save_global_ai_prompts(prompts: list[dict]) -> list[dict]:
|
|
106
|
+
"""Save AI prompts to global config."""
|
|
107
|
+
db = get_global_db()
|
|
108
|
+
table = db.table(AI_PROMPTS_TABLE)
|
|
109
|
+
return save_single_document_items(table, prompts)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def get_project_ai_prompts(project_path: Path) -> list[dict]:
|
|
113
|
+
"""Get AI prompts for a specific project (overrides only)."""
|
|
114
|
+
db = get_project_db(project_path)
|
|
115
|
+
table = db.table(AI_PROMPTS_TABLE)
|
|
116
|
+
return get_single_document_items(table)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def save_project_ai_prompts(project_path: Path, prompts: list[dict]) -> list[dict]:
|
|
120
|
+
"""Save AI prompts for a specific project."""
|
|
121
|
+
db = get_project_db(project_path)
|
|
122
|
+
table = db.table(AI_PROMPTS_TABLE)
|
|
123
|
+
return save_single_document_items(table, prompts)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def get_ai_prompt(task: str, project_path: Path | None = None) -> str | None:
|
|
127
|
+
"""
|
|
128
|
+
Get the AI prompt template for a specific task.
|
|
129
|
+
|
|
130
|
+
Resolution order:
|
|
131
|
+
1. Project-specific override (if project_path provided)
|
|
132
|
+
2. Global custom prompt
|
|
133
|
+
3. Built-in default
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
task: The task identifier (e.g., "commit_message")
|
|
137
|
+
project_path: Optional project path for project-specific overrides
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
The prompt template string, or None if not found
|
|
141
|
+
"""
|
|
142
|
+
# Check project-specific first
|
|
143
|
+
if project_path:
|
|
144
|
+
project_prompts = get_project_ai_prompts(project_path)
|
|
145
|
+
for prompt in project_prompts:
|
|
146
|
+
if prompt.get("task") == task:
|
|
147
|
+
return prompt.get("template")
|
|
148
|
+
|
|
149
|
+
# Check global prompts (includes defaults)
|
|
150
|
+
global_prompts = get_global_ai_prompts()
|
|
151
|
+
for prompt in global_prompts:
|
|
152
|
+
if prompt.get("task") == task:
|
|
153
|
+
return prompt.get("template")
|
|
154
|
+
|
|
155
|
+
return None
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def render_prompt(template: str, variables: dict) -> str:
|
|
159
|
+
"""
|
|
160
|
+
Simple template rendering with {{variable}} syntax.
|
|
161
|
+
|
|
162
|
+
Also supports {{#if variable}}...{{/if}} blocks.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
template: The template string with {{variable}} placeholders
|
|
166
|
+
variables: Dict of variable names to values
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
The rendered prompt string
|
|
170
|
+
"""
|
|
171
|
+
result = template
|
|
172
|
+
|
|
173
|
+
# Handle {{#if var}}...{{/if}} blocks
|
|
174
|
+
import re
|
|
175
|
+
|
|
176
|
+
if_pattern = re.compile(r"\{\{#if\s+(\w+)\}\}(.*?)\{\{/if\}\}", re.DOTALL)
|
|
177
|
+
|
|
178
|
+
def replace_if(match):
|
|
179
|
+
var_name = match.group(1)
|
|
180
|
+
content = match.group(2)
|
|
181
|
+
if variables.get(var_name):
|
|
182
|
+
return content
|
|
183
|
+
return ""
|
|
184
|
+
|
|
185
|
+
result = if_pattern.sub(replace_if, result)
|
|
186
|
+
|
|
187
|
+
# Handle simple {{variable}} substitutions
|
|
188
|
+
for key, value in variables.items():
|
|
189
|
+
result = result.replace(f"{{{{{key}}}}}", str(value))
|
|
190
|
+
|
|
191
|
+
return result
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AI provider abstraction layer.
|
|
3
|
+
|
|
4
|
+
Supports multiple AI backends with a unified interface.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from abc import ABC, abstractmethod
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
import httpx
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AIProvider(ABC):
|
|
14
|
+
"""Abstract base class for AI providers."""
|
|
15
|
+
|
|
16
|
+
@abstractmethod
|
|
17
|
+
async def complete(self, prompt: str) -> str:
|
|
18
|
+
"""Generate a completion for the given prompt."""
|
|
19
|
+
...
|
|
20
|
+
|
|
21
|
+
@abstractmethod
|
|
22
|
+
async def health_check(self) -> bool:
|
|
23
|
+
"""Check if the provider is available."""
|
|
24
|
+
...
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class OllamaProvider(AIProvider):
|
|
28
|
+
"""Ollama local LLM provider."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, base_url: str = "http://localhost:11434", model: str = "llama3.2"):
|
|
31
|
+
self.base_url = base_url.rstrip("/")
|
|
32
|
+
self.model = model
|
|
33
|
+
|
|
34
|
+
async def complete(self, prompt: str) -> str:
|
|
35
|
+
async with httpx.AsyncClient(timeout=120.0) as client:
|
|
36
|
+
response = await client.post(
|
|
37
|
+
f"{self.base_url}/api/generate",
|
|
38
|
+
json={
|
|
39
|
+
"model": self.model,
|
|
40
|
+
"prompt": prompt,
|
|
41
|
+
"stream": False,
|
|
42
|
+
},
|
|
43
|
+
)
|
|
44
|
+
response.raise_for_status()
|
|
45
|
+
return response.json()["response"]
|
|
46
|
+
|
|
47
|
+
async def health_check(self) -> bool:
|
|
48
|
+
try:
|
|
49
|
+
async with httpx.AsyncClient(timeout=5.0) as client:
|
|
50
|
+
response = await client.get(f"{self.base_url}/api/tags")
|
|
51
|
+
return response.status_code == 200
|
|
52
|
+
except Exception:
|
|
53
|
+
return False
|
|
54
|
+
|
|
55
|
+
async def list_models(self) -> list[dict[str, Any]]:
|
|
56
|
+
"""List available models from Ollama."""
|
|
57
|
+
async with httpx.AsyncClient(timeout=10.0) as client:
|
|
58
|
+
response = await client.get(f"{self.base_url}/api/tags")
|
|
59
|
+
response.raise_for_status()
|
|
60
|
+
data = response.json()
|
|
61
|
+
return [
|
|
62
|
+
{
|
|
63
|
+
"name": m["name"],
|
|
64
|
+
"size": m.get("size", 0),
|
|
65
|
+
"parameter_size": m.get("details", {}).get("parameter_size", ""),
|
|
66
|
+
}
|
|
67
|
+
for m in data.get("models", [])
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class OpenAIProvider(AIProvider):
|
|
72
|
+
"""OpenAI API provider."""
|
|
73
|
+
|
|
74
|
+
def __init__(self, api_key: str, model: str = "gpt-4o"):
|
|
75
|
+
self.api_key = api_key
|
|
76
|
+
self.model = model
|
|
77
|
+
self.base_url = "https://api.openai.com/v1"
|
|
78
|
+
|
|
79
|
+
async def complete(self, prompt: str) -> str:
|
|
80
|
+
async with httpx.AsyncClient(timeout=60.0) as client:
|
|
81
|
+
response = await client.post(
|
|
82
|
+
f"{self.base_url}/chat/completions",
|
|
83
|
+
headers={
|
|
84
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
85
|
+
"Content-Type": "application/json",
|
|
86
|
+
},
|
|
87
|
+
json={
|
|
88
|
+
"model": self.model,
|
|
89
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
90
|
+
},
|
|
91
|
+
)
|
|
92
|
+
response.raise_for_status()
|
|
93
|
+
return response.json()["choices"][0]["message"]["content"]
|
|
94
|
+
|
|
95
|
+
async def health_check(self) -> bool:
|
|
96
|
+
try:
|
|
97
|
+
async with httpx.AsyncClient(timeout=5.0) as client:
|
|
98
|
+
response = await client.get(
|
|
99
|
+
f"{self.base_url}/models",
|
|
100
|
+
headers={"Authorization": f"Bearer {self.api_key}"},
|
|
101
|
+
)
|
|
102
|
+
return response.status_code == 200
|
|
103
|
+
except Exception:
|
|
104
|
+
return False
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class AnthropicProvider(AIProvider):
|
|
108
|
+
"""Anthropic Claude API provider."""
|
|
109
|
+
|
|
110
|
+
def __init__(self, api_key: str, model: str = "claude-sonnet-4-20250514"):
|
|
111
|
+
self.api_key = api_key
|
|
112
|
+
self.model = model
|
|
113
|
+
self.base_url = "https://api.anthropic.com/v1"
|
|
114
|
+
|
|
115
|
+
async def complete(self, prompt: str) -> str:
|
|
116
|
+
async with httpx.AsyncClient(timeout=60.0) as client:
|
|
117
|
+
response = await client.post(
|
|
118
|
+
f"{self.base_url}/messages",
|
|
119
|
+
headers={
|
|
120
|
+
"x-api-key": self.api_key,
|
|
121
|
+
"anthropic-version": "2023-06-01",
|
|
122
|
+
"Content-Type": "application/json",
|
|
123
|
+
},
|
|
124
|
+
json={
|
|
125
|
+
"model": self.model,
|
|
126
|
+
"max_tokens": 1024,
|
|
127
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
128
|
+
},
|
|
129
|
+
)
|
|
130
|
+
response.raise_for_status()
|
|
131
|
+
return response.json()["content"][0]["text"]
|
|
132
|
+
|
|
133
|
+
async def health_check(self) -> bool:
|
|
134
|
+
# Anthropic doesn't have a simple health endpoint, so just check if key exists
|
|
135
|
+
return bool(self.api_key)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class GoogleAIProvider(AIProvider):
|
|
139
|
+
"""Google AI (Gemini) API provider."""
|
|
140
|
+
|
|
141
|
+
def __init__(self, api_key: str, model: str = "gemini-3-flash-preview"):
|
|
142
|
+
self.api_key = api_key
|
|
143
|
+
self.model = model
|
|
144
|
+
self.base_url = "https://generativelanguage.googleapis.com/v1beta"
|
|
145
|
+
|
|
146
|
+
async def complete(self, prompt: str) -> str:
|
|
147
|
+
async with httpx.AsyncClient(timeout=120.0) as client:
|
|
148
|
+
response = await client.post(
|
|
149
|
+
f"{self.base_url}/models/{self.model}:generateContent",
|
|
150
|
+
headers={
|
|
151
|
+
"x-goog-api-key": self.api_key,
|
|
152
|
+
"Content-Type": "application/json",
|
|
153
|
+
},
|
|
154
|
+
json={
|
|
155
|
+
"contents": [{"parts": [{"text": prompt}]}],
|
|
156
|
+
"generationConfig": {"thinkingConfig": {"thinkingLevel": "minimal"}},
|
|
157
|
+
},
|
|
158
|
+
)
|
|
159
|
+
response.raise_for_status()
|
|
160
|
+
return response.json()["candidates"][0]["content"]["parts"][0]["text"]
|
|
161
|
+
|
|
162
|
+
async def health_check(self) -> bool:
|
|
163
|
+
# Google AI doesn't have a simple health endpoint, so just check if key exists
|
|
164
|
+
return bool(self.api_key)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class OpenAICompatibleProvider(AIProvider):
|
|
168
|
+
"""OpenAI-compatible API provider (e.g., local vLLM, text-generation-inference)."""
|
|
169
|
+
|
|
170
|
+
def __init__(self, base_url: str, api_key: str = "", model: str = "default"):
|
|
171
|
+
self.base_url = base_url.rstrip("/")
|
|
172
|
+
self.api_key = api_key
|
|
173
|
+
self.model = model
|
|
174
|
+
|
|
175
|
+
async def complete(self, prompt: str) -> str:
|
|
176
|
+
headers = {"Content-Type": "application/json"}
|
|
177
|
+
if self.api_key:
|
|
178
|
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
|
179
|
+
|
|
180
|
+
async with httpx.AsyncClient(timeout=120.0) as client:
|
|
181
|
+
response = await client.post(
|
|
182
|
+
f"{self.base_url}/chat/completions",
|
|
183
|
+
headers=headers,
|
|
184
|
+
json={
|
|
185
|
+
"model": self.model,
|
|
186
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
187
|
+
},
|
|
188
|
+
)
|
|
189
|
+
response.raise_for_status()
|
|
190
|
+
return response.json()["choices"][0]["message"]["content"]
|
|
191
|
+
|
|
192
|
+
async def health_check(self) -> bool:
|
|
193
|
+
try:
|
|
194
|
+
headers = {}
|
|
195
|
+
if self.api_key:
|
|
196
|
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
|
197
|
+
async with httpx.AsyncClient(timeout=5.0) as client:
|
|
198
|
+
response = await client.get(f"{self.base_url}/models", headers=headers)
|
|
199
|
+
return response.status_code == 200
|
|
200
|
+
except Exception:
|
|
201
|
+
return False
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def get_provider(ai_settings: dict) -> AIProvider:
|
|
205
|
+
"""
|
|
206
|
+
Factory function to get the appropriate AI provider based on settings.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
ai_settings: The 'ai' section of the settings dict, containing:
|
|
210
|
+
- provider: str (ollama, openai, anthropic, openai_compatible)
|
|
211
|
+
- providers: dict with provider-specific settings
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
An AIProvider instance
|
|
215
|
+
"""
|
|
216
|
+
provider_name = ai_settings.get("provider", "ollama")
|
|
217
|
+
providers_config = ai_settings.get("providers", {})
|
|
218
|
+
config = providers_config.get(provider_name, {})
|
|
219
|
+
|
|
220
|
+
if provider_name == "ollama":
|
|
221
|
+
return OllamaProvider(
|
|
222
|
+
base_url=config.get("baseUrl", "http://localhost:11434"),
|
|
223
|
+
model=config.get("model", "llama3.2"),
|
|
224
|
+
)
|
|
225
|
+
if provider_name == "openai":
|
|
226
|
+
return OpenAIProvider(
|
|
227
|
+
api_key=config.get("apiKey", ""),
|
|
228
|
+
model=config.get("model", "gpt-4o"),
|
|
229
|
+
)
|
|
230
|
+
if provider_name == "anthropic":
|
|
231
|
+
return AnthropicProvider(
|
|
232
|
+
api_key=config.get("apiKey", ""),
|
|
233
|
+
model=config.get("model", "claude-sonnet-4-20250514"),
|
|
234
|
+
)
|
|
235
|
+
if provider_name == "google":
|
|
236
|
+
return GoogleAIProvider(
|
|
237
|
+
api_key=config.get("apiKey", ""),
|
|
238
|
+
model=config.get("model", "gemini-3-flash-preview"),
|
|
239
|
+
)
|
|
240
|
+
if provider_name == "openai_compatible":
|
|
241
|
+
return OpenAICompatibleProvider(
|
|
242
|
+
base_url=config.get("baseUrl", ""),
|
|
243
|
+
api_key=config.get("apiKey", ""),
|
|
244
|
+
model=config.get("model", "default"),
|
|
245
|
+
)
|
|
246
|
+
raise ValueError(f"Unknown AI provider: {provider_name}")
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""CLI entry point for Lumbergh."""
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
|
|
5
|
+
from lumbergh._version import __version__
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def run():
|
|
9
|
+
"""Run the Lumbergh server."""
|
|
10
|
+
parser = argparse.ArgumentParser(description="Lumbergh - AI Session Supervisor")
|
|
11
|
+
parser.add_argument("--version", action="version", version=f"lumbergh {__version__}")
|
|
12
|
+
parser.add_argument("--host", default="0.0.0.0", help="Host to bind to")
|
|
13
|
+
parser.add_argument("--port", "-p", type=int, default=8420, help="Port to bind to")
|
|
14
|
+
parser.add_argument("--reload", action="store_true", help="Enable auto-reload for development")
|
|
15
|
+
args = parser.parse_args()
|
|
16
|
+
|
|
17
|
+
import uvicorn
|
|
18
|
+
|
|
19
|
+
uvicorn.run("lumbergh.main:app", host=args.host, port=args.port, reload=args.reload)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
if __name__ == "__main__":
|
|
23
|
+
run()
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Shared constants for the Lumbergh backend.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
# Configuration directories
|
|
8
|
+
CONFIG_DIR = Path.home() / ".config" / "lumbergh"
|
|
9
|
+
PROJECTS_DIR = CONFIG_DIR / "projects"
|
|
10
|
+
SESSIONS_DATA_DIR = CONFIG_DIR / "session_data"
|
|
11
|
+
SHARED_DIR = CONFIG_DIR / "shared"
|
|
12
|
+
|
|
13
|
+
# Ensure directories exist
|
|
14
|
+
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
|
15
|
+
PROJECTS_DIR.mkdir(parents=True, exist_ok=True)
|
|
16
|
+
SESSIONS_DATA_DIR.mkdir(parents=True, exist_ok=True)
|
|
17
|
+
SHARED_DIR.mkdir(parents=True, exist_ok=True)
|
|
18
|
+
|
|
19
|
+
# Git status code mapping
|
|
20
|
+
GIT_STATUS_MAP = {
|
|
21
|
+
"M": "modified",
|
|
22
|
+
"A": "added",
|
|
23
|
+
"D": "deleted",
|
|
24
|
+
"R": "renamed",
|
|
25
|
+
"C": "copied",
|
|
26
|
+
"U": "unmerged",
|
|
27
|
+
"?": "untracked",
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
# File extension to language mapping for syntax highlighting
|
|
31
|
+
EXT_TO_LANGUAGE = {
|
|
32
|
+
".py": "python",
|
|
33
|
+
".js": "javascript",
|
|
34
|
+
".ts": "typescript",
|
|
35
|
+
".tsx": "tsx",
|
|
36
|
+
".jsx": "jsx",
|
|
37
|
+
".json": "json",
|
|
38
|
+
".md": "markdown",
|
|
39
|
+
".sh": "bash",
|
|
40
|
+
".css": "css",
|
|
41
|
+
".html": "html",
|
|
42
|
+
".yaml": "yaml",
|
|
43
|
+
".yml": "yaml",
|
|
44
|
+
".toml": "toml",
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
# Directories to ignore when listing/searching files
|
|
48
|
+
IGNORE_DIRS = {".git", "node_modules", "__pycache__", ".venv", "venv", "dist", "build"}
|
|
49
|
+
|
|
50
|
+
# Additional directories to skip when searching for git repos
|
|
51
|
+
REPO_SEARCH_SKIP_DIRS = IGNORE_DIRS | {".cache", ".tox", ".nox"}
|