engram-cli 2.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,165 @@
1
+ Metadata-Version: 2.4
2
+ Name: engram-cli
3
+ Version: 2.1.0
4
+ Summary: AI-powered skill & memory generator for codebases - fully local, no API keys needed
5
+ Author: Engram Team
6
+ License-Expression: MIT
7
+ Keywords: ai,code-analysis,skills,memory,ollama,local-llm
8
+ Classifier: Development Status :: 4 - Beta
9
+ Classifier: Environment :: Console
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Intended Audience :: Education
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Topic :: Software Development :: Documentation
15
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
16
+ Requires-Python: >=3.10
17
+ Description-Content-Type: text/markdown
18
+ Requires-Dist: click>=8.0
19
+ Requires-Dist: httpx>=0.25
20
+ Requires-Dist: pyyaml>=6.0
21
+ Requires-Dist: rich>=13.0
22
+ Provides-Extra: dev
23
+ Requires-Dist: pytest>=8.0; extra == "dev"
24
+ Requires-Dist: pytest-asyncio>=0.23; extra == "dev"
25
+
26
+ # Engram CLI
27
+
28
+ AI-powered skill & memory generator for codebases. Fully local, no API keys needed.
29
+
30
+ ## What it does
31
+
32
+ Point Engram at any codebase and it generates structured **skills** (architectural knowledge) and **memories** (exploration sessions) using a local AI model. Zero API cost, fully air-gapped.
33
+
34
+ ```
35
+ $ engram analyze fastapi/fastapi
36
+
37
+ ╭────────────────────────────────────────╮
38
+ │ Engram v2.1.0 - Local AI Code Analyzer │
39
+ ╰────────────────────────────────────────╯
40
+
41
+ Phase 1: Heuristic Analysis
42
+ Languages: Python (89%), Markdown (6%), Shell (3%)
43
+ Frameworks: FastAPI, Starlette, Pydantic, Uvicorn, pytest
44
+ Patterns: REST API, Middleware, Documentation site
45
+
46
+ Phase 2: Local Model Inference (qwen2.5-coder:7b)
47
+ [1/5] Generating architecture skill...
48
+ [2/5] Generating patterns skill...
49
+ [3/5] Generating testing skill...
50
+ [4/5] Generating project overview...
51
+ [5/5] Generating activity analysis...
52
+
53
+ ╭───────── Results for fastapi/fastapi ──────────╮
54
+ │ Generated 3 skills + 2 memories │
55
+ │ Model: qwen2.5-coder:7b | Time: 42s | Cost: $0 │
56
+ ╰────────────────────────────────────────────────╯
57
+ ```
58
+
59
+ ## Install
60
+
61
+ ```bash
62
+ # 1. Install Ollama (one-time)
63
+ brew install ollama # macOS
64
+ # or: curl -fsSL https://ollama.com/install.sh | sh # Linux
65
+
66
+ # 2. Install Engram CLI
67
+ pip install engram-cli # from PyPI
68
+ # or from source:
69
+ # pipx install git+https://github.com/engram-hq/engram-cli.git
70
+ ```
71
+
72
+ The first run will automatically download the Qwen2.5-Coder 7B model (~4.5GB, one-time).
73
+
74
+ ## Usage
75
+
76
+ ```bash
77
+ # Analyze current directory
78
+ engram analyze .
79
+
80
+ # Analyze a GitHub repo
81
+ engram analyze https://github.com/pallets/flask
82
+
83
+ # Shorthand
84
+ engram analyze pallets/flask
85
+
86
+ # Specify org name for output
87
+ engram analyze . --org mycompany/myrepo
88
+
89
+ # Use a larger model for better quality
90
+ engram analyze . --model qwen2.5-coder:14b
91
+
92
+ # Heuristic-only (no model, instant)
93
+ engram analyze . --skip-model
94
+
95
+ # JSON output for piping
96
+ engram analyze . --json-only | jq '.skills | length'
97
+
98
+ # List recommended models
99
+ engram models
100
+
101
+ # Browse analysis results in a local web viewer
102
+ engram browse
103
+
104
+ # Start the viewer server without opening browser
105
+ engram serve
106
+
107
+ # Check version
108
+ engram version
109
+ ```
110
+
111
+ ## Output
112
+
113
+ By default, outputs both JSON and Markdown:
114
+
115
+ ```
116
+ engram-output/myrepo/
117
+ ├── engram-analysis.json # Combined analysis + generated content
118
+ ├── skills/
119
+ │ ├── architecture/SKILL.md # Architecture overview
120
+ │ ├── patterns/SKILL.md # Code patterns & conventions
121
+ │ └── testing/SKILL.md # Testing strategy (if tests detected)
122
+ └── memories/
123
+ └── sessions/
124
+ ├── 2026-02-13-myrepo-overview.md # Project deep dive
125
+ └── 2026-02-13-myrepo-activity.md # Recent activity analysis
126
+ ```
127
+
128
+ ## How it works
129
+
130
+ ### Layer 1: Heuristic Analysis (instant, no model)
131
+ - Walks the file tree, counts files/extensions
132
+ - Parses `package.json`, `Cargo.toml`, `go.mod`, `pyproject.toml` for dependencies
133
+ - Detects frameworks (React, FastAPI, Tokio, etc.) from dependency lists
134
+ - Identifies test infrastructure, CI/CD, Docker, K8s configs
135
+ - Extracts git metadata (commits, contributors, dates)
136
+ - Detects architectural patterns from directory structure
137
+
138
+ ### Layer 2: Local Model Inference (~40s per repo)
139
+ - Feeds heuristic context into structured prompts
140
+ - Qwen2.5-Coder 7B generates natural language skills and memories
141
+ - Produces architecture overviews, pattern analysis, testing guides
142
+ - Session memories document the exploration findings
143
+
144
+ ## Models
145
+
146
+ | Model | Size | RAM | Quality | Speed |
147
+ |-------|------|-----|---------|-------|
148
+ | `qwen2.5-coder:7b` (default) | 4.5GB | 8GB | Good | ~30 tok/s |
149
+ | `qwen2.5-coder:14b` | 8.5GB | 16GB | Very Good | ~18 tok/s |
150
+ | `qwen2.5-coder:32b` | 18GB | 24GB | Excellent | ~10 tok/s |
151
+ | `deepseek-coder-v2:16b` | 9GB | 16GB | Very Good | ~18 tok/s |
152
+
153
+ ## Development
154
+
155
+ ```bash
156
+ git clone https://github.com/engram-hq/engram-cli
157
+ cd engram-cli
158
+ python -m venv .venv && source .venv/bin/activate
159
+ pip install ".[dev]"
160
+ pytest tests/ -v
161
+ ```
162
+
163
+ ## License
164
+
165
+ MIT
@@ -0,0 +1,140 @@
1
+ # Engram CLI
2
+
3
+ AI-powered skill & memory generator for codebases. Fully local, no API keys needed.
4
+
5
+ ## What it does
6
+
7
+ Point Engram at any codebase and it generates structured **skills** (architectural knowledge) and **memories** (exploration sessions) using a local AI model. Zero API cost, fully air-gapped.
8
+
9
+ ```
10
+ $ engram analyze fastapi/fastapi
11
+
12
+ ╭────────────────────────────────────────╮
13
+ │ Engram v2.1.0 - Local AI Code Analyzer │
14
+ ╰────────────────────────────────────────╯
15
+
16
+ Phase 1: Heuristic Analysis
17
+ Languages: Python (89%), Markdown (6%), Shell (3%)
18
+ Frameworks: FastAPI, Starlette, Pydantic, Uvicorn, pytest
19
+ Patterns: REST API, Middleware, Documentation site
20
+
21
+ Phase 2: Local Model Inference (qwen2.5-coder:7b)
22
+ [1/5] Generating architecture skill...
23
+ [2/5] Generating patterns skill...
24
+ [3/5] Generating testing skill...
25
+ [4/5] Generating project overview...
26
+ [5/5] Generating activity analysis...
27
+
28
+ ╭───────── Results for fastapi/fastapi ──────────╮
29
+ │ Generated 3 skills + 2 memories │
30
+ │ Model: qwen2.5-coder:7b | Time: 42s | Cost: $0 │
31
+ ╰────────────────────────────────────────────────╯
32
+ ```
33
+
34
+ ## Install
35
+
36
+ ```bash
37
+ # 1. Install Ollama (one-time)
38
+ brew install ollama # macOS
39
+ # or: curl -fsSL https://ollama.com/install.sh | sh # Linux
40
+
41
+ # 2. Install Engram CLI
42
+ pip install engram-cli # from PyPI
43
+ # or from source:
44
+ # pipx install git+https://github.com/engram-hq/engram-cli.git
45
+ ```
46
+
47
+ The first run will automatically download the Qwen2.5-Coder 7B model (~4.5GB, one-time).
48
+
49
+ ## Usage
50
+
51
+ ```bash
52
+ # Analyze current directory
53
+ engram analyze .
54
+
55
+ # Analyze a GitHub repo
56
+ engram analyze https://github.com/pallets/flask
57
+
58
+ # Shorthand
59
+ engram analyze pallets/flask
60
+
61
+ # Specify org name for output
62
+ engram analyze . --org mycompany/myrepo
63
+
64
+ # Use a larger model for better quality
65
+ engram analyze . --model qwen2.5-coder:14b
66
+
67
+ # Heuristic-only (no model, instant)
68
+ engram analyze . --skip-model
69
+
70
+ # JSON output for piping
71
+ engram analyze . --json-only | jq '.skills | length'
72
+
73
+ # List recommended models
74
+ engram models
75
+
76
+ # Browse analysis results in a local web viewer
77
+ engram browse
78
+
79
+ # Start the viewer server without opening browser
80
+ engram serve
81
+
82
+ # Check version
83
+ engram version
84
+ ```
85
+
86
+ ## Output
87
+
88
+ By default, outputs both JSON and Markdown:
89
+
90
+ ```
91
+ engram-output/myrepo/
92
+ ├── engram-analysis.json # Combined analysis + generated content
93
+ ├── skills/
94
+ │ ├── architecture/SKILL.md # Architecture overview
95
+ │ ├── patterns/SKILL.md # Code patterns & conventions
96
+ │ └── testing/SKILL.md # Testing strategy (if tests detected)
97
+ └── memories/
98
+ └── sessions/
99
+ ├── 2026-02-13-myrepo-overview.md # Project deep dive
100
+ └── 2026-02-13-myrepo-activity.md # Recent activity analysis
101
+ ```
102
+
103
+ ## How it works
104
+
105
+ ### Layer 1: Heuristic Analysis (instant, no model)
106
+ - Walks the file tree, counts files/extensions
107
+ - Parses `package.json`, `Cargo.toml`, `go.mod`, `pyproject.toml` for dependencies
108
+ - Detects frameworks (React, FastAPI, Tokio, etc.) from dependency lists
109
+ - Identifies test infrastructure, CI/CD, Docker, K8s configs
110
+ - Extracts git metadata (commits, contributors, dates)
111
+ - Detects architectural patterns from directory structure
112
+
113
+ ### Layer 2: Local Model Inference (~40s per repo)
114
+ - Feeds heuristic context into structured prompts
115
+ - Qwen2.5-Coder 7B generates natural language skills and memories
116
+ - Produces architecture overviews, pattern analysis, testing guides
117
+ - Session memories document the exploration findings
118
+
119
+ ## Models
120
+
121
+ | Model | Size | RAM | Quality | Speed |
122
+ |-------|------|-----|---------|-------|
123
+ | `qwen2.5-coder:7b` (default) | 4.5GB | 8GB | Good | ~30 tok/s |
124
+ | `qwen2.5-coder:14b` | 8.5GB | 16GB | Very Good | ~18 tok/s |
125
+ | `qwen2.5-coder:32b` | 18GB | 24GB | Excellent | ~10 tok/s |
126
+ | `deepseek-coder-v2:16b` | 9GB | 16GB | Very Good | ~18 tok/s |
127
+
128
+ ## Development
129
+
130
+ ```bash
131
+ git clone https://github.com/engram-hq/engram-cli
132
+ cd engram-cli
133
+ python -m venv .venv && source .venv/bin/activate
134
+ pip install ".[dev]"
135
+ pytest tests/ -v
136
+ ```
137
+
138
+ ## License
139
+
140
+ MIT
@@ -0,0 +1,49 @@
1
+ [build-system]
2
+ requires = ["setuptools>=68.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "engram-cli"
7
+ version = "2.1.0"
8
+ description = "AI-powered skill & memory generator for codebases - fully local, no API keys needed"
9
+ readme = "README.md"
10
+ license = "MIT"
11
+ requires-python = ">=3.10"
12
+ authors = [{ name = "Engram Team" }]
13
+ keywords = ["ai", "code-analysis", "skills", "memory", "ollama", "local-llm"]
14
+ classifiers = [
15
+ "Development Status :: 4 - Beta",
16
+ "Environment :: Console",
17
+ "Intended Audience :: Developers",
18
+ "Intended Audience :: Education",
19
+ "Intended Audience :: Science/Research",
20
+ "Programming Language :: Python :: 3",
21
+ "Topic :: Software Development :: Documentation",
22
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
23
+ ]
24
+ license-files = []
25
+ dependencies = [
26
+ "click>=8.0",
27
+ "httpx>=0.25",
28
+ "pyyaml>=6.0",
29
+ "rich>=13.0",
30
+ ]
31
+
32
+ [project.optional-dependencies]
33
+ dev = [
34
+ "pytest>=8.0",
35
+ "pytest-asyncio>=0.23",
36
+ ]
37
+
38
+ [project.scripts]
39
+ engram = "engram_cli.main:cli"
40
+
41
+ [tool.setuptools.packages.find]
42
+ where = ["src"]
43
+
44
+ [tool.setuptools.package-data]
45
+ engram_cli = ["templates/*.html"]
46
+
47
+ [tool.pytest.ini_options]
48
+ testpaths = ["tests"]
49
+ pythonpath = ["src"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,3 @@
1
+ """Engram CLI - AI-powered skill & memory generator for codebases."""
2
+
3
+ __version__ = "2.1.0"