tlm-cli 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tlm_cli-0.1.0/PKG-INFO +91 -0
- tlm_cli-0.1.0/README.md +69 -0
- tlm_cli-0.1.0/pyproject.toml +40 -0
- tlm_cli-0.1.0/setup.cfg +4 -0
- tlm_cli-0.1.0/tests/test_api_client.py +407 -0
- tlm_cli-0.1.0/tests/test_cache.py +209 -0
- tlm_cli-0.1.0/tests/test_cli.py +244 -0
- tlm_cli-0.1.0/tests/test_engine_api.py +268 -0
- tlm_cli-0.1.0/tests/test_hooks.py +246 -0
- tlm_cli-0.1.0/tests/test_hooks_sync.py +122 -0
- tlm_cli-0.1.0/tests/test_installer.py +303 -0
- tlm_cli-0.1.0/tests/test_integration_server.py +167 -0
- tlm_cli-0.1.0/tests/test_learner.py +551 -0
- tlm_cli-0.1.0/tests/test_learner_api.py +117 -0
- tlm_cli-0.1.0/tests/test_no_anthropic.py +55 -0
- tlm_cli-0.1.0/tests/test_pypi_ready.py +118 -0
- tlm_cli-0.1.0/tests/test_state.py +147 -0
- tlm_cli-0.1.0/tlm/__init__.py +2 -0
- tlm_cli-0.1.0/tlm/api_client.py +260 -0
- tlm_cli-0.1.0/tlm/cache.py +132 -0
- tlm_cli-0.1.0/tlm/cli.py +1202 -0
- tlm_cli-0.1.0/tlm/enforcer.py +421 -0
- tlm_cli-0.1.0/tlm/engine.py +415 -0
- tlm_cli-0.1.0/tlm/hooks.py +348 -0
- tlm_cli-0.1.0/tlm/installer.py +339 -0
- tlm_cli-0.1.0/tlm/learner.py +383 -0
- tlm_cli-0.1.0/tlm/prompts/__init__.py +0 -0
- tlm_cli-0.1.0/tlm/prompts/rules.py +343 -0
- tlm_cli-0.1.0/tlm/state.py +79 -0
- tlm_cli-0.1.0/tlm_cli.egg-info/PKG-INFO +91 -0
- tlm_cli-0.1.0/tlm_cli.egg-info/SOURCES.txt +33 -0
- tlm_cli-0.1.0/tlm_cli.egg-info/dependency_links.txt +1 -0
- tlm_cli-0.1.0/tlm_cli.egg-info/entry_points.txt +2 -0
- tlm_cli-0.1.0/tlm_cli.egg-info/requires.txt +4 -0
- tlm_cli-0.1.0/tlm_cli.egg-info/top_level.txt +1 -0
tlm_cli-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: tlm-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: TLM — AI Tech Lead that enforces TDD, tests, and spec compliance in Claude Code.
|
|
5
|
+
Author-email: TLM <hello@tlm.dev>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://tlm.dev
|
|
8
|
+
Project-URL: Source, https://github.com/tlm-dev/tlm
|
|
9
|
+
Classifier: Development Status :: 4 - Beta
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Topic :: Software Development :: Quality Assurance
|
|
16
|
+
Classifier: Intended Audience :: Developers
|
|
17
|
+
Requires-Python: >=3.9
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
Requires-Dist: httpx>=0.27.0
|
|
20
|
+
Provides-Extra: dev
|
|
21
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
22
|
+
|
|
23
|
+
# TLM — AI Tech Lead for Claude Code
|
|
24
|
+
|
|
25
|
+
> The annoying agent that makes Claude do the right thing.
|
|
26
|
+
|
|
27
|
+
TLM sits inside [Claude Code](https://claude.ai/code) and enforces TDD, tests, and spec compliance — automatically. It interviews you before coding, generates specs, and blocks commits that don't meet your project's quality bar.
|
|
28
|
+
|
|
29
|
+
## Quick Start
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
pip install tlm-cli
|
|
33
|
+
tlm signup
|
|
34
|
+
cd your-project
|
|
35
|
+
tlm install
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
That's it. Open Claude Code and start working — TLM activates automatically.
|
|
39
|
+
|
|
40
|
+
## What Happens
|
|
41
|
+
|
|
42
|
+
1. **`tlm signup`** — Create your account (one-time)
|
|
43
|
+
2. **`tlm install`** — Scans your project, generates enforcement rules, you approve them interactively. Installs Claude Code hooks.
|
|
44
|
+
3. **Work in Claude Code** — TLM interviews you before features, enforces TDD, checks spec compliance before commits
|
|
45
|
+
4. **`tlm check`** — Manual quality gate (runs your approved checks mechanically)
|
|
46
|
+
|
|
47
|
+
## How It Works
|
|
48
|
+
|
|
49
|
+
TLM scans your project and asks Claude to figure out your stack — test framework, linter, deploy targets, coverage thresholds. It generates an enforcement config and presents it for your approval:
|
|
50
|
+
|
|
51
|
+
```
|
|
52
|
+
TLM Enforcement Config
|
|
53
|
+
|
|
54
|
+
Quality Checks (3):
|
|
55
|
+
● Tests pass [pre_commit]
|
|
56
|
+
pytest tests/ -v
|
|
57
|
+
● Linting [pre_commit]
|
|
58
|
+
flake8 src/
|
|
59
|
+
○ Security audit [pre_commit]
|
|
60
|
+
pip-audit
|
|
61
|
+
|
|
62
|
+
Does this look right?
|
|
63
|
+
yes — Approve and save
|
|
64
|
+
[correction] — Tell me what's wrong
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
You can correct anything conversationally. Once approved, enforcement is mechanical — commands and exit codes, no LLM opinions.
|
|
68
|
+
|
|
69
|
+
## Commands
|
|
70
|
+
|
|
71
|
+
| Command | What it does |
|
|
72
|
+
|---------|-------------|
|
|
73
|
+
| `tlm signup` | Create account |
|
|
74
|
+
| `tlm auth <key>` | Save API key |
|
|
75
|
+
| `tlm install` | Full setup: scan, approve config, install hooks |
|
|
76
|
+
| `tlm uninstall` | Remove TLM integration (keeps data) |
|
|
77
|
+
| `tlm check` | Run quality gate manually |
|
|
78
|
+
| `tlm status` | Project stats + enforcement status |
|
|
79
|
+
| `tlm learn` | Analyze recent commits for patterns |
|
|
80
|
+
| `tlm learn --all` | Full history analysis |
|
|
81
|
+
|
|
82
|
+
## Philosophy
|
|
83
|
+
|
|
84
|
+
1. **Claude knows your stack.** TLM doesn't hardcode detection for any framework. Claude figures it out.
|
|
85
|
+
2. **You approve everything.** Nothing becomes a rule until you say yes.
|
|
86
|
+
3. **Enforcement is mechanical.** Commands and exit codes. No LLM opinions during enforcement.
|
|
87
|
+
4. **Annoying, not a prison.** Every block has an `OVERRIDE`. But you have to be explicit.
|
|
88
|
+
|
|
89
|
+
## License
|
|
90
|
+
|
|
91
|
+
MIT
|
tlm_cli-0.1.0/README.md
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# TLM — AI Tech Lead for Claude Code
|
|
2
|
+
|
|
3
|
+
> The annoying agent that makes Claude do the right thing.
|
|
4
|
+
|
|
5
|
+
TLM sits inside [Claude Code](https://claude.ai/code) and enforces TDD, tests, and spec compliance — automatically. It interviews you before coding, generates specs, and blocks commits that don't meet your project's quality bar.
|
|
6
|
+
|
|
7
|
+
## Quick Start
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install tlm-cli
|
|
11
|
+
tlm signup
|
|
12
|
+
cd your-project
|
|
13
|
+
tlm install
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
That's it. Open Claude Code and start working — TLM activates automatically.
|
|
17
|
+
|
|
18
|
+
## What Happens
|
|
19
|
+
|
|
20
|
+
1. **`tlm signup`** — Create your account (one-time)
|
|
21
|
+
2. **`tlm install`** — Scans your project, generates enforcement rules, you approve them interactively. Installs Claude Code hooks.
|
|
22
|
+
3. **Work in Claude Code** — TLM interviews you before features, enforces TDD, checks spec compliance before commits
|
|
23
|
+
4. **`tlm check`** — Manual quality gate (runs your approved checks mechanically)
|
|
24
|
+
|
|
25
|
+
## How It Works
|
|
26
|
+
|
|
27
|
+
TLM scans your project and asks Claude to figure out your stack — test framework, linter, deploy targets, coverage thresholds. It generates an enforcement config and presents it for your approval:
|
|
28
|
+
|
|
29
|
+
```
|
|
30
|
+
TLM Enforcement Config
|
|
31
|
+
|
|
32
|
+
Quality Checks (3):
|
|
33
|
+
● Tests pass [pre_commit]
|
|
34
|
+
pytest tests/ -v
|
|
35
|
+
● Linting [pre_commit]
|
|
36
|
+
flake8 src/
|
|
37
|
+
○ Security audit [pre_commit]
|
|
38
|
+
pip-audit
|
|
39
|
+
|
|
40
|
+
Does this look right?
|
|
41
|
+
yes — Approve and save
|
|
42
|
+
[correction] — Tell me what's wrong
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
You can correct anything conversationally. Once approved, enforcement is mechanical — commands and exit codes, no LLM opinions.
|
|
46
|
+
|
|
47
|
+
## Commands
|
|
48
|
+
|
|
49
|
+
| Command | What it does |
|
|
50
|
+
|---------|-------------|
|
|
51
|
+
| `tlm signup` | Create account |
|
|
52
|
+
| `tlm auth <key>` | Save API key |
|
|
53
|
+
| `tlm install` | Full setup: scan, approve config, install hooks |
|
|
54
|
+
| `tlm uninstall` | Remove TLM integration (keeps data) |
|
|
55
|
+
| `tlm check` | Run quality gate manually |
|
|
56
|
+
| `tlm status` | Project stats + enforcement status |
|
|
57
|
+
| `tlm learn` | Analyze recent commits for patterns |
|
|
58
|
+
| `tlm learn --all` | Full history analysis |
|
|
59
|
+
|
|
60
|
+
## Philosophy
|
|
61
|
+
|
|
62
|
+
1. **Claude knows your stack.** TLM doesn't hardcode detection for any framework. Claude figures it out.
|
|
63
|
+
2. **You approve everything.** Nothing becomes a rule until you say yes.
|
|
64
|
+
3. **Enforcement is mechanical.** Commands and exit codes. No LLM opinions during enforcement.
|
|
65
|
+
4. **Annoying, not a prison.** Every block has an `OVERRIDE`. But you have to be explicit.
|
|
66
|
+
|
|
67
|
+
## License
|
|
68
|
+
|
|
69
|
+
MIT
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "tlm-cli"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "TLM — AI Tech Lead that enforces TDD, tests, and spec compliance in Claude Code."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.9"
|
|
12
|
+
authors = [
|
|
13
|
+
{name = "TLM", email = "hello@tlm.dev"},
|
|
14
|
+
]
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Development Status :: 4 - Beta",
|
|
17
|
+
"Programming Language :: Python :: 3",
|
|
18
|
+
"Programming Language :: Python :: 3.9",
|
|
19
|
+
"Programming Language :: Python :: 3.10",
|
|
20
|
+
"Programming Language :: Python :: 3.11",
|
|
21
|
+
"Programming Language :: Python :: 3.12",
|
|
22
|
+
"Topic :: Software Development :: Quality Assurance",
|
|
23
|
+
"Intended Audience :: Developers",
|
|
24
|
+
]
|
|
25
|
+
dependencies = [
|
|
26
|
+
"httpx>=0.27.0",
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
[project.urls]
|
|
30
|
+
Homepage = "https://tlm.dev"
|
|
31
|
+
Source = "https://github.com/tlm-dev/tlm"
|
|
32
|
+
|
|
33
|
+
[project.scripts]
|
|
34
|
+
tlm = "tlm.cli:main"
|
|
35
|
+
|
|
36
|
+
[project.optional-dependencies]
|
|
37
|
+
dev = ["pytest>=7.0"]
|
|
38
|
+
|
|
39
|
+
[tool.setuptools.packages.find]
|
|
40
|
+
include = ["tlm*"]
|
tlm_cli-0.1.0/setup.cfg
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tests for TLM API Client — HTTP client for TLM server.
|
|
3
|
+
|
|
4
|
+
Uses unittest.mock to mock httpx calls. Tests all endpoint methods,
|
|
5
|
+
auth header injection, error handling, timeout behavior, and credential management.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from unittest.mock import patch, MagicMock, PropertyMock
|
|
12
|
+
|
|
13
|
+
import pytest
|
|
14
|
+
|
|
15
|
+
from tlm.api_client import (
|
|
16
|
+
TLMClient,
|
|
17
|
+
TLMAuthError,
|
|
18
|
+
TLMServerError,
|
|
19
|
+
TLMConnectionError,
|
|
20
|
+
load_credentials,
|
|
21
|
+
save_credentials,
|
|
22
|
+
get_client,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# ─── Fixtures ─────────────────────────────────────────────────
|
|
27
|
+
|
|
28
|
+
@pytest.fixture
|
|
29
|
+
def credentials_dir(tmp_path):
|
|
30
|
+
"""Temporary directory for credentials."""
|
|
31
|
+
cred_dir = tmp_path / ".tlm"
|
|
32
|
+
cred_dir.mkdir()
|
|
33
|
+
return cred_dir
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@pytest.fixture
|
|
37
|
+
def mock_response():
|
|
38
|
+
"""Create a mock httpx response."""
|
|
39
|
+
def _make(status_code=200, json_data=None, text=""):
|
|
40
|
+
resp = MagicMock()
|
|
41
|
+
resp.status_code = status_code
|
|
42
|
+
resp.json.return_value = json_data or {}
|
|
43
|
+
resp.text = text or json.dumps(json_data or {})
|
|
44
|
+
resp.is_success = 200 <= status_code < 300
|
|
45
|
+
resp.raise_for_status = MagicMock()
|
|
46
|
+
if status_code >= 400:
|
|
47
|
+
import httpx
|
|
48
|
+
resp.raise_for_status.side_effect = httpx.HTTPStatusError(
|
|
49
|
+
"error", request=MagicMock(), response=resp
|
|
50
|
+
)
|
|
51
|
+
return resp
|
|
52
|
+
return _make
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@pytest.fixture
|
|
56
|
+
def client():
|
|
57
|
+
"""Create a TLMClient with test credentials."""
|
|
58
|
+
return TLMClient(api_key="tlm_sk_test123", base_url="https://api.test.tlm.dev")
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# ─── Credential Management Tests ─────────────────────────────
|
|
62
|
+
|
|
63
|
+
class TestCredentials:
|
|
64
|
+
def test_save_credentials(self, credentials_dir):
|
|
65
|
+
cred_file = credentials_dir / "credentials.json"
|
|
66
|
+
save_credentials("tlm_sk_abc123", credentials_dir=str(credentials_dir))
|
|
67
|
+
assert cred_file.exists()
|
|
68
|
+
data = json.loads(cred_file.read_text())
|
|
69
|
+
assert data["api_key"] == "tlm_sk_abc123"
|
|
70
|
+
|
|
71
|
+
def test_load_credentials(self, credentials_dir):
|
|
72
|
+
cred_file = credentials_dir / "credentials.json"
|
|
73
|
+
cred_file.write_text(json.dumps({"api_key": "tlm_sk_loaded"}))
|
|
74
|
+
key = load_credentials(credentials_dir=str(credentials_dir))
|
|
75
|
+
assert key == "tlm_sk_loaded"
|
|
76
|
+
|
|
77
|
+
def test_load_credentials_missing_file(self, credentials_dir):
|
|
78
|
+
key = load_credentials(credentials_dir=str(credentials_dir))
|
|
79
|
+
assert key is None
|
|
80
|
+
|
|
81
|
+
def test_load_credentials_corrupted(self, credentials_dir):
|
|
82
|
+
cred_file = credentials_dir / "credentials.json"
|
|
83
|
+
cred_file.write_text("not json")
|
|
84
|
+
key = load_credentials(credentials_dir=str(credentials_dir))
|
|
85
|
+
assert key is None
|
|
86
|
+
|
|
87
|
+
def test_save_credentials_creates_dir(self, tmp_path):
|
|
88
|
+
cred_dir = tmp_path / "nonexistent" / ".tlm"
|
|
89
|
+
save_credentials("tlm_sk_new", credentials_dir=str(cred_dir))
|
|
90
|
+
assert (cred_dir / "credentials.json").exists()
|
|
91
|
+
|
|
92
|
+
def test_save_overwrite(self, credentials_dir):
|
|
93
|
+
save_credentials("tlm_sk_first", credentials_dir=str(credentials_dir))
|
|
94
|
+
save_credentials("tlm_sk_second", credentials_dir=str(credentials_dir))
|
|
95
|
+
key = load_credentials(credentials_dir=str(credentials_dir))
|
|
96
|
+
assert key == "tlm_sk_second"
|
|
97
|
+
|
|
98
|
+
def test_save_preserves_other_fields(self, credentials_dir):
|
|
99
|
+
cred_file = credentials_dir / "credentials.json"
|
|
100
|
+
cred_file.write_text(json.dumps({"api_key": "old", "base_url": "https://custom.url"}))
|
|
101
|
+
save_credentials("tlm_sk_new", credentials_dir=str(credentials_dir))
|
|
102
|
+
data = json.loads(cred_file.read_text())
|
|
103
|
+
assert data["api_key"] == "tlm_sk_new"
|
|
104
|
+
assert data["base_url"] == "https://custom.url"
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
# ─── Client Construction Tests ────────────────────────────────
|
|
108
|
+
|
|
109
|
+
class TestClientConstruction:
|
|
110
|
+
def test_creates_with_api_key(self):
|
|
111
|
+
client = TLMClient(api_key="tlm_sk_test")
|
|
112
|
+
assert client.api_key == "tlm_sk_test"
|
|
113
|
+
|
|
114
|
+
def test_default_base_url(self):
|
|
115
|
+
from tlm.api_client import DEFAULT_BASE_URL
|
|
116
|
+
client = TLMClient(api_key="tlm_sk_test")
|
|
117
|
+
assert client.base_url == DEFAULT_BASE_URL
|
|
118
|
+
|
|
119
|
+
def test_custom_base_url(self):
|
|
120
|
+
client = TLMClient(api_key="tlm_sk_test", base_url="https://custom.api.com")
|
|
121
|
+
assert client.base_url == "https://custom.api.com"
|
|
122
|
+
|
|
123
|
+
def test_get_client_with_saved_credentials(self, credentials_dir):
|
|
124
|
+
save_credentials("tlm_sk_saved", credentials_dir=str(credentials_dir))
|
|
125
|
+
client = get_client(credentials_dir=str(credentials_dir))
|
|
126
|
+
assert client is not None
|
|
127
|
+
assert client.api_key == "tlm_sk_saved"
|
|
128
|
+
|
|
129
|
+
def test_get_client_no_credentials(self, credentials_dir):
|
|
130
|
+
client = get_client(credentials_dir=str(credentials_dir))
|
|
131
|
+
assert client is None
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
# ─── Auth Header Tests ────────────────────────────────────────
|
|
135
|
+
|
|
136
|
+
class TestAuthHeaders:
|
|
137
|
+
def test_auth_header_included(self, client):
|
|
138
|
+
headers = client._headers()
|
|
139
|
+
assert "Authorization" in headers
|
|
140
|
+
assert headers["Authorization"] == "Bearer tlm_sk_test123"
|
|
141
|
+
|
|
142
|
+
def test_content_type_json(self, client):
|
|
143
|
+
headers = client._headers()
|
|
144
|
+
assert headers.get("Content-Type") == "application/json"
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
# ─── Auth Endpoint Tests ─────────────────────────────────────
|
|
148
|
+
|
|
149
|
+
class TestAuthEndpoints:
|
|
150
|
+
@patch("tlm.api_client.httpx")
|
|
151
|
+
def test_signup(self, mock_httpx, client, mock_response):
|
|
152
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
153
|
+
"api_key": "tlm_sk_new",
|
|
154
|
+
"user_id": "usr_123",
|
|
155
|
+
"email": "test@example.com",
|
|
156
|
+
})
|
|
157
|
+
result = client.signup("test@example.com", "password123")
|
|
158
|
+
assert result["api_key"] == "tlm_sk_new"
|
|
159
|
+
mock_httpx.post.assert_called_once()
|
|
160
|
+
|
|
161
|
+
@patch("tlm.api_client.httpx")
|
|
162
|
+
def test_login(self, mock_httpx, client, mock_response):
|
|
163
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
164
|
+
"api_key": "tlm_sk_existing",
|
|
165
|
+
"user_id": "usr_456",
|
|
166
|
+
})
|
|
167
|
+
result = client.login("test@example.com", "password123")
|
|
168
|
+
assert result["api_key"] == "tlm_sk_existing"
|
|
169
|
+
|
|
170
|
+
@patch("tlm.api_client.httpx")
|
|
171
|
+
def test_me(self, mock_httpx, client, mock_response):
|
|
172
|
+
mock_httpx.get.return_value = mock_response(200, {
|
|
173
|
+
"user_id": "usr_123",
|
|
174
|
+
"email": "test@example.com",
|
|
175
|
+
"tier": "free",
|
|
176
|
+
})
|
|
177
|
+
result = client.me()
|
|
178
|
+
assert result["email"] == "test@example.com"
|
|
179
|
+
|
|
180
|
+
@patch("tlm.api_client.httpx")
|
|
181
|
+
def test_auth_error_on_401(self, mock_httpx, client, mock_response):
|
|
182
|
+
mock_httpx.get.return_value = mock_response(401, {"detail": "Invalid API key"})
|
|
183
|
+
with pytest.raises(TLMAuthError):
|
|
184
|
+
client.me()
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
# ─── Project Endpoint Tests ──────────────────────────────────
|
|
188
|
+
|
|
189
|
+
class TestProjectEndpoints:
|
|
190
|
+
@patch("tlm.api_client.httpx")
|
|
191
|
+
def test_create_project(self, mock_httpx, client, mock_response):
|
|
192
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
193
|
+
"project_id": "proj_123",
|
|
194
|
+
"name": "my-app",
|
|
195
|
+
"fingerprint": "abc123",
|
|
196
|
+
})
|
|
197
|
+
result = client.create_project("my-app", "abc123")
|
|
198
|
+
assert result["project_id"] == "proj_123"
|
|
199
|
+
|
|
200
|
+
@patch("tlm.api_client.httpx")
|
|
201
|
+
def test_list_projects(self, mock_httpx, client, mock_response):
|
|
202
|
+
mock_httpx.get.return_value = mock_response(200, {
|
|
203
|
+
"projects": [
|
|
204
|
+
{"project_id": "proj_1", "name": "app1"},
|
|
205
|
+
{"project_id": "proj_2", "name": "app2"},
|
|
206
|
+
]
|
|
207
|
+
})
|
|
208
|
+
result = client.list_projects()
|
|
209
|
+
assert len(result["projects"]) == 2
|
|
210
|
+
|
|
211
|
+
@patch("tlm.api_client.httpx")
|
|
212
|
+
def test_delete_project(self, mock_httpx, client, mock_response):
|
|
213
|
+
mock_httpx.delete.return_value = mock_response(200, {"deleted": True})
|
|
214
|
+
result = client.delete_project("proj_123")
|
|
215
|
+
assert result["deleted"] is True
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
# ─── Intelligence Endpoint Tests ─────────────────────────────
|
|
219
|
+
|
|
220
|
+
class TestIntelligenceEndpoints:
|
|
221
|
+
@patch("tlm.api_client.httpx")
|
|
222
|
+
def test_scan(self, mock_httpx, client, mock_response):
|
|
223
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
224
|
+
"profile": "## Stack\nPython, FastAPI",
|
|
225
|
+
})
|
|
226
|
+
result = client.scan("proj_123", "file_tree_here", "samples_here")
|
|
227
|
+
assert "profile" in result
|
|
228
|
+
|
|
229
|
+
@patch("tlm.api_client.httpx")
|
|
230
|
+
def test_generate_config(self, mock_httpx, client, mock_response):
|
|
231
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
232
|
+
"config": {"checks": [], "environments": {}},
|
|
233
|
+
})
|
|
234
|
+
result = client.generate_config("proj_123", "profile", "tree", "samples")
|
|
235
|
+
assert "config" in result
|
|
236
|
+
|
|
237
|
+
@patch("tlm.api_client.httpx")
|
|
238
|
+
def test_update_config(self, mock_httpx, client, mock_response):
|
|
239
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
240
|
+
"config": {"checks": [{"name": "test"}]},
|
|
241
|
+
})
|
|
242
|
+
result = client.update_config("proj_123", {"checks": []}, "add tests", "profile")
|
|
243
|
+
assert "config" in result
|
|
244
|
+
|
|
245
|
+
@patch("tlm.api_client.httpx")
|
|
246
|
+
def test_approve_config(self, mock_httpx, client, mock_response):
|
|
247
|
+
mock_httpx.post.return_value = mock_response(200, {"approved": True})
|
|
248
|
+
result = client.approve_config("proj_123", {"checks": []})
|
|
249
|
+
assert result["approved"] is True
|
|
250
|
+
|
|
251
|
+
@patch("tlm.api_client.httpx")
|
|
252
|
+
def test_check_drift(self, mock_httpx, client, mock_response):
|
|
253
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
254
|
+
"stale": False,
|
|
255
|
+
"drifted": False,
|
|
256
|
+
})
|
|
257
|
+
result = client.check_drift("proj_123", {"checks": []}, {"file": "hash"}, {})
|
|
258
|
+
assert result["stale"] is False
|
|
259
|
+
|
|
260
|
+
@patch("tlm.api_client.httpx")
|
|
261
|
+
def test_compliance_check(self, mock_httpx, client, mock_response):
|
|
262
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
263
|
+
"review": "### Verdict: PASS",
|
|
264
|
+
})
|
|
265
|
+
result = client.compliance_check("proj_123", "spec content", "diff content", "profile", "config")
|
|
266
|
+
assert "review" in result
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
# ─── Discovery Endpoint Tests ────────────────────────────────
|
|
270
|
+
|
|
271
|
+
class TestDiscoveryEndpoints:
|
|
272
|
+
@patch("tlm.api_client.httpx")
|
|
273
|
+
def test_discovery_start(self, mock_httpx, client, mock_response):
|
|
274
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
275
|
+
"session_id": "disc_123",
|
|
276
|
+
"response": "Tell me about this feature...",
|
|
277
|
+
})
|
|
278
|
+
result = client.discovery_start("proj_123", "Add payments")
|
|
279
|
+
assert result["session_id"] == "disc_123"
|
|
280
|
+
assert "response" in result
|
|
281
|
+
|
|
282
|
+
@patch("tlm.api_client.httpx")
|
|
283
|
+
def test_discovery_respond(self, mock_httpx, client, mock_response):
|
|
284
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
285
|
+
"response": "Great, and what about error handling?",
|
|
286
|
+
"is_complete": False,
|
|
287
|
+
})
|
|
288
|
+
result = client.discovery_respond("proj_123", "disc_123", "Use Stripe for processing")
|
|
289
|
+
assert "response" in result
|
|
290
|
+
|
|
291
|
+
@patch("tlm.api_client.httpx")
|
|
292
|
+
def test_discovery_generate(self, mock_httpx, client, mock_response):
|
|
293
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
294
|
+
"spec": "# Payment Feature Spec",
|
|
295
|
+
"instructions": "# TLM Engineering Rules",
|
|
296
|
+
"knowledge": "New payment domain knowledge",
|
|
297
|
+
})
|
|
298
|
+
result = client.discovery_generate("proj_123", "disc_123")
|
|
299
|
+
assert "spec" in result
|
|
300
|
+
assert "instructions" in result
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
# ─── Learning Endpoint Tests ─────────────────────────────────
|
|
304
|
+
|
|
305
|
+
class TestLearningEndpoints:
|
|
306
|
+
@patch("tlm.api_client.httpx")
|
|
307
|
+
def test_analyze_commit(self, mock_httpx, client, mock_response):
|
|
308
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
309
|
+
"analysis": {
|
|
310
|
+
"hash": "abc123",
|
|
311
|
+
"category": "bug_fix",
|
|
312
|
+
"planned": False,
|
|
313
|
+
},
|
|
314
|
+
})
|
|
315
|
+
commit = {"hash": "abc123", "message": "fix bug", "author": "dev",
|
|
316
|
+
"date": "2026-02-15", "diff": "diff content"}
|
|
317
|
+
result = client.analyze_commit("proj_123", commit)
|
|
318
|
+
assert result["analysis"]["category"] == "bug_fix"
|
|
319
|
+
|
|
320
|
+
@patch("tlm.api_client.httpx")
|
|
321
|
+
def test_synthesize(self, mock_httpx, client, mock_response):
|
|
322
|
+
mock_httpx.post.return_value = mock_response(200, {
|
|
323
|
+
"synthesis": {
|
|
324
|
+
"total_commits": 5,
|
|
325
|
+
"spec_accuracy_percent": 80,
|
|
326
|
+
"interview_improvements": ["Ask about edge cases"],
|
|
327
|
+
},
|
|
328
|
+
})
|
|
329
|
+
result = client.synthesize("proj_123", [{"hash": "a"}])
|
|
330
|
+
assert result["synthesis"]["total_commits"] == 5
|
|
331
|
+
|
|
332
|
+
@patch("tlm.api_client.httpx")
|
|
333
|
+
def test_learning_status(self, mock_httpx, client, mock_response):
|
|
334
|
+
mock_httpx.get.return_value = mock_response(200, {
|
|
335
|
+
"unprocessed_count": 3,
|
|
336
|
+
"latest_synthesis": None,
|
|
337
|
+
})
|
|
338
|
+
result = client.learning_status("proj_123")
|
|
339
|
+
assert result["unprocessed_count"] == 3
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
# ─── Sync Endpoint Tests ─────────────────────────────────────
|
|
343
|
+
|
|
344
|
+
class TestSyncEndpoint:
|
|
345
|
+
@patch("tlm.api_client.httpx")
|
|
346
|
+
def test_sync(self, mock_httpx, client, mock_response):
|
|
347
|
+
mock_httpx.get.return_value = mock_response(200, {
|
|
348
|
+
"knowledge": "# Knowledge Base\n- Uses PostgreSQL",
|
|
349
|
+
"profile": "## Stack\nPython",
|
|
350
|
+
"enforcement_config": {"checks": []},
|
|
351
|
+
"latest_synthesis": None,
|
|
352
|
+
"specs": [],
|
|
353
|
+
"project_lessons": "",
|
|
354
|
+
})
|
|
355
|
+
result = client.sync("proj_123")
|
|
356
|
+
assert "knowledge" in result
|
|
357
|
+
assert "profile" in result
|
|
358
|
+
assert "enforcement_config" in result
|
|
359
|
+
|
|
360
|
+
@patch("tlm.api_client.httpx")
|
|
361
|
+
def test_sync_timeout(self, mock_httpx, client):
|
|
362
|
+
import httpx
|
|
363
|
+
mock_httpx.get.side_effect = httpx.TimeoutException("timeout")
|
|
364
|
+
with pytest.raises(TLMConnectionError):
|
|
365
|
+
client.sync("proj_123", timeout=3.0)
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
# ─── Error Handling Tests ─────────────────────────────────────
|
|
369
|
+
|
|
370
|
+
class TestErrorHandling:
|
|
371
|
+
@patch("tlm.api_client.httpx")
|
|
372
|
+
def test_401_raises_auth_error(self, mock_httpx, client, mock_response):
|
|
373
|
+
mock_httpx.get.return_value = mock_response(401, {"detail": "Unauthorized"})
|
|
374
|
+
with pytest.raises(TLMAuthError):
|
|
375
|
+
client.me()
|
|
376
|
+
|
|
377
|
+
@patch("tlm.api_client.httpx")
|
|
378
|
+
def test_500_raises_server_error(self, mock_httpx, client, mock_response):
|
|
379
|
+
mock_httpx.post.return_value = mock_response(500, {"detail": "Internal error"})
|
|
380
|
+
with pytest.raises(TLMServerError):
|
|
381
|
+
client.scan("proj_123", "tree", "samples")
|
|
382
|
+
|
|
383
|
+
@patch("tlm.api_client.httpx")
|
|
384
|
+
def test_connection_error(self, mock_httpx, client):
|
|
385
|
+
import httpx
|
|
386
|
+
mock_httpx.post.side_effect = httpx.ConnectError("Connection refused")
|
|
387
|
+
with pytest.raises(TLMConnectionError):
|
|
388
|
+
client.scan("proj_123", "tree", "samples")
|
|
389
|
+
|
|
390
|
+
@patch("tlm.api_client.httpx")
|
|
391
|
+
def test_timeout_error(self, mock_httpx, client):
|
|
392
|
+
import httpx
|
|
393
|
+
mock_httpx.post.side_effect = httpx.TimeoutException("timeout")
|
|
394
|
+
with pytest.raises(TLMConnectionError):
|
|
395
|
+
client.scan("proj_123", "tree", "samples")
|
|
396
|
+
|
|
397
|
+
@patch("tlm.api_client.httpx")
|
|
398
|
+
def test_403_raises_auth_error(self, mock_httpx, client, mock_response):
|
|
399
|
+
mock_httpx.get.return_value = mock_response(403, {"detail": "Forbidden"})
|
|
400
|
+
with pytest.raises(TLMAuthError):
|
|
401
|
+
client.me()
|
|
402
|
+
|
|
403
|
+
@patch("tlm.api_client.httpx")
|
|
404
|
+
def test_422_raises_server_error(self, mock_httpx, client, mock_response):
|
|
405
|
+
mock_httpx.post.return_value = mock_response(422, {"detail": "Validation error"})
|
|
406
|
+
with pytest.raises(TLMServerError):
|
|
407
|
+
client.create_project("bad", "data")
|