quickcall-integrations 0.3.0__tar.gz → 0.3.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/PKG-INFO +1 -1
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/github_tools.py +50 -119
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/.claude-plugin/plugin.json +1 -1
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/commands/appraisal.md +22 -13
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/pyproject.toml +1 -1
- quickcall_integrations-0.3.2/tests/test_appraisal_integration.py +235 -0
- quickcall_integrations-0.3.2/tests/test_appraisal_tools.py +416 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/.claude-plugin/marketplace.json +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/.github/workflows/publish-pypi.yml +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/.gitignore +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/.pre-commit-config.yaml +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/Dockerfile +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/README.md +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/assets/logo.png +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/__init__.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/api_clients/__init__.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/api_clients/github_client.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/api_clients/slack_client.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/auth/__init__.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/auth/credentials.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/auth/device_flow.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/resources/__init__.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/resources/slack_resources.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/server.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/__init__.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/auth_tools.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/git_tools.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/slack_tools.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/utility_tools.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/commands/connect-github-pat.md +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/commands/connect.md +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/commands/slack-summary.md +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/commands/status.md +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/commands/updates.md +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/requirements.txt +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/tests/README.md +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/tests/appraisal/__init__.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/tests/appraisal/setup_test_data.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/tests/test_integrations.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/tests/test_tools.py +0 -0
- {quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/uv.lock +0 -0
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/github_tools.py
RENAMED
|
@@ -195,40 +195,60 @@ def create_github_tools(mcp: FastMCP) -> None:
|
|
|
195
195
|
raise ToolError(f"Failed to list pull requests: {str(e)}")
|
|
196
196
|
|
|
197
197
|
@mcp.tool(tags={"github", "prs"})
|
|
198
|
-
def
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
),
|
|
204
|
-
repo: Optional[str] = Field(
|
|
205
|
-
default=None,
|
|
206
|
-
description="Repository name. Required.",
|
|
198
|
+
def get_prs(
|
|
199
|
+
pr_refs: List[dict] = Field(
|
|
200
|
+
...,
|
|
201
|
+
description="List of PR references. Each item should have 'owner', 'repo', and 'number' keys. "
|
|
202
|
+
"Example: [{'owner': 'org', 'repo': 'myrepo', 'number': 123}, ...]",
|
|
207
203
|
),
|
|
208
204
|
) -> dict:
|
|
209
205
|
"""
|
|
210
|
-
Get detailed information about
|
|
206
|
+
Get detailed information about one or more pull requests.
|
|
207
|
+
|
|
208
|
+
Works for single or multiple PRs - fetches in parallel when multiple.
|
|
209
|
+
Each PR ref needs owner, repo, and number.
|
|
211
210
|
|
|
212
|
-
|
|
211
|
+
Returns full PR details including additions, deletions, and files changed.
|
|
213
212
|
Requires QuickCall authentication with GitHub connected.
|
|
214
213
|
"""
|
|
215
214
|
try:
|
|
216
215
|
client = _get_client()
|
|
217
|
-
pr = client.get_pr(pr_number, owner=owner, repo=repo)
|
|
218
216
|
|
|
219
|
-
|
|
220
|
-
|
|
217
|
+
# Validate input
|
|
218
|
+
validated_refs = []
|
|
219
|
+
for ref in pr_refs:
|
|
220
|
+
if not isinstance(ref, dict):
|
|
221
|
+
raise ToolError(f"Invalid PR ref (must be dict): {ref}")
|
|
222
|
+
if "number" not in ref:
|
|
223
|
+
raise ToolError(f"Missing 'number' in PR ref: {ref}")
|
|
224
|
+
if "owner" not in ref or "repo" not in ref:
|
|
225
|
+
raise ToolError(
|
|
226
|
+
f"Missing 'owner' or 'repo' in PR ref: {ref}. "
|
|
227
|
+
"Each ref must have owner, repo, and number."
|
|
228
|
+
)
|
|
229
|
+
validated_refs.append(
|
|
230
|
+
{
|
|
231
|
+
"owner": ref["owner"],
|
|
232
|
+
"repo": ref["repo"],
|
|
233
|
+
"number": int(ref["number"]),
|
|
234
|
+
}
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
if not validated_refs:
|
|
238
|
+
return {"count": 0, "prs": []}
|
|
239
|
+
|
|
240
|
+
# Fetch all PRs in parallel
|
|
241
|
+
prs = client.fetch_prs_parallel(validated_refs, max_workers=10)
|
|
221
242
|
|
|
222
|
-
return {
|
|
243
|
+
return {
|
|
244
|
+
"count": len(prs),
|
|
245
|
+
"requested": len(validated_refs),
|
|
246
|
+
"prs": prs,
|
|
247
|
+
}
|
|
223
248
|
except ToolError:
|
|
224
249
|
raise
|
|
225
|
-
except ValueError as e:
|
|
226
|
-
raise ToolError(
|
|
227
|
-
f"Repository not specified: {str(e)}. "
|
|
228
|
-
f"Please provide both owner and repo parameters."
|
|
229
|
-
)
|
|
230
250
|
except Exception as e:
|
|
231
|
-
raise ToolError(f"Failed to
|
|
251
|
+
raise ToolError(f"Failed to fetch PRs: {str(e)}")
|
|
232
252
|
|
|
233
253
|
@mcp.tool(tags={"github", "commits"})
|
|
234
254
|
def list_commits(
|
|
@@ -373,97 +393,6 @@ def create_github_tools(mcp: FastMCP) -> None:
|
|
|
373
393
|
except Exception as e:
|
|
374
394
|
raise ToolError(f"Failed to list branches: {str(e)}")
|
|
375
395
|
|
|
376
|
-
@mcp.tool(tags={"github", "prs", "appraisal"})
|
|
377
|
-
def search_merged_prs(
|
|
378
|
-
author: Optional[str] = Field(
|
|
379
|
-
default=None,
|
|
380
|
-
description="GitHub username to filter by. Defaults to authenticated user if not specified.",
|
|
381
|
-
),
|
|
382
|
-
days: int = Field(
|
|
383
|
-
default=180,
|
|
384
|
-
description="Number of days to look back (default: 180 for ~6 months)",
|
|
385
|
-
),
|
|
386
|
-
org: Optional[str] = Field(
|
|
387
|
-
default=None,
|
|
388
|
-
description="GitHub org to search within. If not specified, searches all accessible repos.",
|
|
389
|
-
),
|
|
390
|
-
repo: Optional[str] = Field(
|
|
391
|
-
default=None,
|
|
392
|
-
description="Specific repo in 'owner/repo' format (e.g., 'revolving-org/supabase'). Overrides org if specified.",
|
|
393
|
-
),
|
|
394
|
-
limit: int = Field(
|
|
395
|
-
default=100,
|
|
396
|
-
description="Maximum PRs to return (default: 100)",
|
|
397
|
-
),
|
|
398
|
-
detail_level: str = Field(
|
|
399
|
-
default="summary",
|
|
400
|
-
description="'summary' for minimal fields (number, title, merged_at, repo, owner, html_url, author), "
|
|
401
|
-
"'full' adds body and labels. Use 'summary' for large result sets.",
|
|
402
|
-
),
|
|
403
|
-
) -> dict:
|
|
404
|
-
"""
|
|
405
|
-
Search for merged pull requests by author within a time period.
|
|
406
|
-
|
|
407
|
-
USE FOR APPRAISALS: This tool is ideal for gathering contribution data
|
|
408
|
-
for performance reviews. Returns basic PR info - use get_pr for full
|
|
409
|
-
details (additions, deletions, files) on specific PRs.
|
|
410
|
-
|
|
411
|
-
Claude should analyze the returned PRs to:
|
|
412
|
-
|
|
413
|
-
1. CATEGORIZE by type (look at PR title/labels):
|
|
414
|
-
- Features: "feat:", "add:", "implement", "new", "create"
|
|
415
|
-
- Enhancements: "improve:", "update:", "perf:", "optimize", "enhance"
|
|
416
|
-
- Bug fixes: "fix:", "bugfix:", "hotfix:", "resolve", "patch"
|
|
417
|
-
- Chores: "chore:", "docs:", "test:", "ci:", "refactor:", "bump"
|
|
418
|
-
|
|
419
|
-
2. IDENTIFY top PRs worth highlighting (call get_pr for detailed metrics)
|
|
420
|
-
|
|
421
|
-
3. SUMMARIZE for appraisal with accomplishments grouped by category
|
|
422
|
-
|
|
423
|
-
Use detail_level='summary' (default) to avoid context overflow with large result sets.
|
|
424
|
-
Use get_pr(number) to get full details for specific PRs when needed.
|
|
425
|
-
|
|
426
|
-
Requires QuickCall authentication with GitHub connected.
|
|
427
|
-
"""
|
|
428
|
-
try:
|
|
429
|
-
client = _get_client()
|
|
430
|
-
|
|
431
|
-
# Calculate since_date from days
|
|
432
|
-
from datetime import datetime, timedelta, timezone
|
|
433
|
-
|
|
434
|
-
since_date = (datetime.now(timezone.utc) - timedelta(days=days)).strftime(
|
|
435
|
-
"%Y-%m-%d"
|
|
436
|
-
)
|
|
437
|
-
|
|
438
|
-
# Use authenticated user if author not specified
|
|
439
|
-
if not author:
|
|
440
|
-
creds = get_credential_store().get_api_credentials()
|
|
441
|
-
if creds and creds.github_username:
|
|
442
|
-
author = creds.github_username
|
|
443
|
-
|
|
444
|
-
prs = client.search_merged_prs(
|
|
445
|
-
author=author,
|
|
446
|
-
since_date=since_date,
|
|
447
|
-
org=org,
|
|
448
|
-
repo=repo,
|
|
449
|
-
limit=limit,
|
|
450
|
-
detail_level=detail_level,
|
|
451
|
-
)
|
|
452
|
-
|
|
453
|
-
return {
|
|
454
|
-
"count": len(prs),
|
|
455
|
-
"detail_level": detail_level,
|
|
456
|
-
"period": f"Last {days} days",
|
|
457
|
-
"author": author,
|
|
458
|
-
"org": org,
|
|
459
|
-
"repo": repo,
|
|
460
|
-
"prs": prs,
|
|
461
|
-
}
|
|
462
|
-
except ToolError:
|
|
463
|
-
raise
|
|
464
|
-
except Exception as e:
|
|
465
|
-
raise ToolError(f"Failed to search merged PRs: {str(e)}")
|
|
466
|
-
|
|
467
396
|
@mcp.tool(tags={"github", "prs", "appraisal"})
|
|
468
397
|
def prepare_appraisal_data(
|
|
469
398
|
author: Optional[str] = Field(
|
|
@@ -486,14 +415,16 @@ def create_github_tools(mcp: FastMCP) -> None:
|
|
|
486
415
|
"""
|
|
487
416
|
Prepare appraisal data by fetching ALL merged PRs with full details.
|
|
488
417
|
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
418
|
+
USE THIS TOOL FOR APPRAISALS AND PERFORMANCE REVIEWS!
|
|
419
|
+
|
|
420
|
+
This is the recommended tool for gathering contribution data because it:
|
|
421
|
+
1. Fetches ALL merged PRs with full stats (additions, deletions) in PARALLEL
|
|
422
|
+
2. Dumps everything to a local file (avoids context overflow)
|
|
423
|
+
3. Returns just PR titles for you to review
|
|
424
|
+
4. Then use get_appraisal_pr_details(file_path, pr_numbers) for selected PRs
|
|
494
425
|
|
|
495
|
-
|
|
496
|
-
|
|
426
|
+
DO NOT use search_merged_prs for appraisals - it doesn't include stats
|
|
427
|
+
and causes context overflow with large result sets.
|
|
497
428
|
"""
|
|
498
429
|
import json
|
|
499
430
|
import tempfile
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "quickcall",
|
|
3
3
|
"description": "Integrate quickcall into dev workflows - eliminate interruptions for developers. Ask about your work, get instant answers. No more context switching.",
|
|
4
|
-
"version": "0.6.
|
|
4
|
+
"version": "0.6.1",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Sagar Sarkale"
|
|
7
7
|
}
|
|
@@ -16,30 +16,39 @@ Parse `$ARGUMENTS` for time period:
|
|
|
16
16
|
|
|
17
17
|
## Instructions
|
|
18
18
|
|
|
19
|
+
**IMPORTANT:** Only use these two MCP tools for appraisals:
|
|
20
|
+
1. `prepare_appraisal_data` - fetches and dumps all PR data to a temp file
|
|
21
|
+
2. `get_appraisal_pr_details` - reads specific PRs from that file (no API calls)
|
|
22
|
+
|
|
23
|
+
Do NOT use `get_prs`, `list_prs`, or any other tools - they will overflow context.
|
|
24
|
+
|
|
25
|
+
---
|
|
26
|
+
|
|
19
27
|
1. **Gather contribution data:**
|
|
20
28
|
|
|
21
|
-
**Option A - GitHub API (preferred
|
|
22
|
-
-
|
|
23
|
-
-
|
|
24
|
-
-
|
|
25
|
-
-
|
|
29
|
+
**Option A - GitHub API (preferred):**
|
|
30
|
+
- Call `prepare_appraisal_data(days=X)` with the parsed time period
|
|
31
|
+
- This fetches ALL merged PRs with full stats in PARALLEL
|
|
32
|
+
- Dumps everything to a temp file (avoids context overflow)
|
|
33
|
+
- Returns: `file_path` + list of `pr_titles` (number, title, repo only)
|
|
34
|
+
- Optional: pass `org` or `repo` parameter to filter
|
|
26
35
|
|
|
27
|
-
**Option B - Local Git (fallback
|
|
36
|
+
**Option B - Local Git (fallback):**
|
|
28
37
|
- Use `get_local_contributions` tool on the current directory
|
|
29
38
|
- This parses local git history for commits by the user
|
|
30
|
-
- Extracts PR numbers from merge commit messages where available
|
|
31
39
|
|
|
32
|
-
2. **Analyze and categorize
|
|
33
|
-
|
|
40
|
+
2. **Analyze and categorize PRs from the titles:**
|
|
41
|
+
Review the `pr_titles` list returned in step 1 and categorize:
|
|
34
42
|
- **Features**: New functionality (feat:, add:, implement, new, create)
|
|
35
43
|
- **Enhancements**: Improvements (improve:, update:, perf:, optimize, enhance)
|
|
36
44
|
- **Bug fixes**: (fix:, bugfix:, hotfix:, resolve, patch)
|
|
37
45
|
- **Chores**: Maintenance work (docs:, test:, ci:, chore:, refactor:, bump)
|
|
38
46
|
|
|
39
|
-
3. **
|
|
40
|
-
-
|
|
41
|
-
-
|
|
42
|
-
-
|
|
47
|
+
3. **Get details for top accomplishments:**
|
|
48
|
+
- Pick the top 5-10 significant PRs from the categorized list
|
|
49
|
+
- Call `get_appraisal_pr_details(file_path, [pr_numbers])`
|
|
50
|
+
- This reads from the temp file - NO additional API calls
|
|
51
|
+
- Returns full details: additions, deletions, files, body
|
|
43
52
|
|
|
44
53
|
4. **Calculate summary metrics:**
|
|
45
54
|
- Total PRs merged by category
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Integration test for appraisal tools.
|
|
4
|
+
|
|
5
|
+
Tests the actual MCP tool functions with real GitHub API calls.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
uv run python tests/test_appraisal_integration.py
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import os
|
|
13
|
+
import sys
|
|
14
|
+
|
|
15
|
+
# Add parent dir to path for imports
|
|
16
|
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def test_prepare_appraisal_data_tool():
|
|
20
|
+
"""Test prepare_appraisal_data MCP tool with real API."""
|
|
21
|
+
print("\n=== Integration Test: prepare_appraisal_data ===")
|
|
22
|
+
|
|
23
|
+
from mcp_server.auth import get_github_pat, get_credential_store
|
|
24
|
+
from mcp_server.api_clients.github_client import GitHubClient
|
|
25
|
+
|
|
26
|
+
# Check auth
|
|
27
|
+
pat_token, source = get_github_pat()
|
|
28
|
+
store = get_credential_store()
|
|
29
|
+
|
|
30
|
+
if not pat_token and not store.is_authenticated():
|
|
31
|
+
print("⚠️ No GitHub auth available, skipping integration test")
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
# Get client
|
|
35
|
+
if pat_token:
|
|
36
|
+
client = GitHubClient(token=pat_token)
|
|
37
|
+
author = client.get_authenticated_user()
|
|
38
|
+
print(f"✅ Using PAT ({source}), user: {author}")
|
|
39
|
+
else:
|
|
40
|
+
creds = store.get_api_credentials()
|
|
41
|
+
client = GitHubClient(
|
|
42
|
+
token=creds.github_token,
|
|
43
|
+
default_owner=creds.github_username,
|
|
44
|
+
installation_id=creds.github_installation_id,
|
|
45
|
+
)
|
|
46
|
+
author = creds.github_username
|
|
47
|
+
print(f"✅ Using GitHub App, user: {author}")
|
|
48
|
+
|
|
49
|
+
# Step 1: Search merged PRs
|
|
50
|
+
print("\n[Step 1] Searching merged PRs (last 30 days, limit 10)...")
|
|
51
|
+
from datetime import datetime, timedelta, timezone
|
|
52
|
+
|
|
53
|
+
since_date = (datetime.now(timezone.utc) - timedelta(days=30)).strftime("%Y-%m-%d")
|
|
54
|
+
|
|
55
|
+
pr_list = client.search_merged_prs(
|
|
56
|
+
author=author,
|
|
57
|
+
since_date=since_date,
|
|
58
|
+
limit=10,
|
|
59
|
+
detail_level="full",
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
if not pr_list:
|
|
63
|
+
print("⚠️ No merged PRs found in last 30 days")
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
print(f"✅ Found {len(pr_list)} PRs")
|
|
67
|
+
for pr in pr_list[:3]:
|
|
68
|
+
print(f" - #{pr['number']}: {pr['title'][:50]}")
|
|
69
|
+
|
|
70
|
+
# Step 2: Fetch full details in parallel
|
|
71
|
+
print("\n[Step 2] Fetching full PR details in parallel...")
|
|
72
|
+
pr_refs = [
|
|
73
|
+
{"owner": pr["owner"], "repo": pr["repo"], "number": pr["number"]}
|
|
74
|
+
for pr in pr_list
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
full_prs = client.fetch_prs_parallel(pr_refs, max_workers=5)
|
|
78
|
+
print(f"✅ Fetched {len(full_prs)} PRs with full details")
|
|
79
|
+
|
|
80
|
+
if full_prs:
|
|
81
|
+
pr = full_prs[0]
|
|
82
|
+
print(
|
|
83
|
+
f" Sample: #{pr['number']} - +{pr.get('additions', 0)}/-{pr.get('deletions', 0)}"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Step 3: Dump to file
|
|
87
|
+
print("\n[Step 3] Dumping to file...")
|
|
88
|
+
import tempfile
|
|
89
|
+
|
|
90
|
+
dump_data = {
|
|
91
|
+
"author": author,
|
|
92
|
+
"period": "Last 30 days",
|
|
93
|
+
"fetched_at": datetime.now(timezone.utc).isoformat(),
|
|
94
|
+
"count": len(full_prs),
|
|
95
|
+
"prs": full_prs,
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
fd, file_path = tempfile.mkstemp(suffix=".json", prefix="appraisal_integration_")
|
|
99
|
+
with open(file_path, "w") as f:
|
|
100
|
+
json.dump(dump_data, f, indent=2, default=str)
|
|
101
|
+
|
|
102
|
+
file_size = os.path.getsize(file_path)
|
|
103
|
+
print(f"✅ Dumped to: {file_path}")
|
|
104
|
+
print(f" Size: {file_size / 1024:.1f} KB")
|
|
105
|
+
|
|
106
|
+
# Step 4: Generate titles (what tool returns)
|
|
107
|
+
print("\n[Step 4] Generating PR titles for Claude...")
|
|
108
|
+
pr_titles = [
|
|
109
|
+
{
|
|
110
|
+
"number": pr["number"],
|
|
111
|
+
"title": pr["title"],
|
|
112
|
+
"repo": f"{pr.get('owner', '')}/{pr.get('repo', '')}",
|
|
113
|
+
}
|
|
114
|
+
for pr in full_prs
|
|
115
|
+
]
|
|
116
|
+
|
|
117
|
+
print(f"✅ Generated {len(pr_titles)} titles")
|
|
118
|
+
for t in pr_titles[:5]:
|
|
119
|
+
print(f" - #{t['number']}: {t['title'][:50]}")
|
|
120
|
+
|
|
121
|
+
# Step 5: Test get_appraisal_pr_details
|
|
122
|
+
print("\n[Step 5] Testing get_appraisal_pr_details...")
|
|
123
|
+
if len(full_prs) >= 2:
|
|
124
|
+
selected_numbers = [full_prs[0]["number"], full_prs[1]["number"]]
|
|
125
|
+
|
|
126
|
+
with open(file_path) as f:
|
|
127
|
+
data = json.load(f)
|
|
128
|
+
|
|
129
|
+
pr_numbers_set = set(selected_numbers)
|
|
130
|
+
selected_prs = [
|
|
131
|
+
pr for pr in data.get("prs", []) if pr["number"] in pr_numbers_set
|
|
132
|
+
]
|
|
133
|
+
|
|
134
|
+
print(f"✅ Retrieved {len(selected_prs)} selected PRs from dump")
|
|
135
|
+
for pr in selected_prs:
|
|
136
|
+
print(
|
|
137
|
+
f" - #{pr['number']}: {pr['title'][:40]} (+{pr.get('additions', 0)}/-{pr.get('deletions', 0)})"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Cleanup
|
|
141
|
+
os.unlink(file_path)
|
|
142
|
+
print("\n✅ Integration test passed!")
|
|
143
|
+
return True
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def test_response_size():
|
|
147
|
+
"""Test that response sizes are reasonable."""
|
|
148
|
+
print("\n=== Test: Response sizes ===")
|
|
149
|
+
|
|
150
|
+
# Simulate 100 PRs
|
|
151
|
+
mock_pr_titles = [
|
|
152
|
+
{
|
|
153
|
+
"number": i,
|
|
154
|
+
"title": f"PR title for #{i} - some description here",
|
|
155
|
+
"repo": "org/repo",
|
|
156
|
+
}
|
|
157
|
+
for i in range(1, 101)
|
|
158
|
+
]
|
|
159
|
+
|
|
160
|
+
# Calculate size of titles-only response
|
|
161
|
+
titles_response = {
|
|
162
|
+
"file_path": "/tmp/appraisal_xxx.json",
|
|
163
|
+
"count": 100,
|
|
164
|
+
"author": "testuser",
|
|
165
|
+
"period": "Last 180 days",
|
|
166
|
+
"pr_titles": mock_pr_titles,
|
|
167
|
+
"next_step": "Call get_appraisal_pr_details...",
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
titles_size = len(json.dumps(titles_response))
|
|
171
|
+
print(f"✅ Titles-only response for 100 PRs: {titles_size / 1024:.1f} KB")
|
|
172
|
+
|
|
173
|
+
# Compare to full response (old way)
|
|
174
|
+
mock_full_prs = [
|
|
175
|
+
{
|
|
176
|
+
"number": i,
|
|
177
|
+
"title": f"PR title for #{i} - some description here",
|
|
178
|
+
"body": "This is a longer description " * 10,
|
|
179
|
+
"owner": "org",
|
|
180
|
+
"repo": "repo",
|
|
181
|
+
"additions": 100,
|
|
182
|
+
"deletions": 50,
|
|
183
|
+
"changed_files": 10,
|
|
184
|
+
"labels": ["bug", "feature"],
|
|
185
|
+
"merged_at": "2024-01-01T00:00:00Z",
|
|
186
|
+
"html_url": f"https://github.com/org/repo/pull/{i}",
|
|
187
|
+
}
|
|
188
|
+
for i in range(1, 101)
|
|
189
|
+
]
|
|
190
|
+
|
|
191
|
+
full_response = {
|
|
192
|
+
"count": 100,
|
|
193
|
+
"prs": mock_full_prs,
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
full_size = len(json.dumps(full_response))
|
|
197
|
+
print(f"✅ Full response for 100 PRs: {full_size / 1024:.1f} KB")
|
|
198
|
+
print(f"✅ Reduction: {(1 - titles_size / full_size) * 100:.0f}%")
|
|
199
|
+
|
|
200
|
+
assert titles_size < full_size / 2, "Titles response should be <50% of full"
|
|
201
|
+
print("✅ Test passed!\n")
|
|
202
|
+
return True
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
if __name__ == "__main__":
|
|
206
|
+
print("=" * 60)
|
|
207
|
+
print("Appraisal Tools - Integration Tests")
|
|
208
|
+
print("=" * 60)
|
|
209
|
+
|
|
210
|
+
results = []
|
|
211
|
+
|
|
212
|
+
# Test response sizes first (no auth needed)
|
|
213
|
+
try:
|
|
214
|
+
results.append(("Response sizes", test_response_size()))
|
|
215
|
+
except Exception as e:
|
|
216
|
+
print(f"❌ Failed: {e}")
|
|
217
|
+
results.append(("Response sizes", False))
|
|
218
|
+
|
|
219
|
+
# Test actual tool flow
|
|
220
|
+
try:
|
|
221
|
+
result = test_prepare_appraisal_data_tool()
|
|
222
|
+
results.append(("prepare_appraisal_data", result))
|
|
223
|
+
except Exception as e:
|
|
224
|
+
print(f"❌ Failed: {e}")
|
|
225
|
+
import traceback
|
|
226
|
+
|
|
227
|
+
traceback.print_exc()
|
|
228
|
+
results.append(("prepare_appraisal_data", False))
|
|
229
|
+
|
|
230
|
+
print("\n" + "=" * 60)
|
|
231
|
+
print("Results:")
|
|
232
|
+
for name, passed in results:
|
|
233
|
+
status = "✅ PASS" if passed else ("⚠️ SKIP" if passed is None else "❌ FAIL")
|
|
234
|
+
print(f" {name}: {status}")
|
|
235
|
+
print("=" * 60)
|
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Test appraisal tools locally.
|
|
4
|
+
|
|
5
|
+
Tests:
|
|
6
|
+
1. prepare_appraisal_data - fetches PRs in parallel, dumps to file, returns titles
|
|
7
|
+
2. get_appraisal_pr_details - reads from dump file
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
uv run python tests/test_appraisal_tools.py
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import json
|
|
14
|
+
import os
|
|
15
|
+
import tempfile
|
|
16
|
+
from datetime import datetime, timezone
|
|
17
|
+
from unittest.mock import MagicMock, patch
|
|
18
|
+
|
|
19
|
+
# Test the core logic without MCP server
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def test_prepare_appraisal_data_dumps_to_file():
|
|
23
|
+
"""Test that prepare_appraisal_data creates a file with PR data."""
|
|
24
|
+
print("\n=== Test 1: prepare_appraisal_data dumps to file ===")
|
|
25
|
+
|
|
26
|
+
# Mock data - simulating full PR details from parallel fetch
|
|
27
|
+
mock_full_prs = [
|
|
28
|
+
{
|
|
29
|
+
"number": 1,
|
|
30
|
+
"title": "feat: add login",
|
|
31
|
+
"owner": "test-org",
|
|
32
|
+
"repo": "test-repo",
|
|
33
|
+
"additions": 100,
|
|
34
|
+
"deletions": 20,
|
|
35
|
+
"body": "Added login feature",
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
"number": 2,
|
|
39
|
+
"title": "fix: bug in auth",
|
|
40
|
+
"owner": "test-org",
|
|
41
|
+
"repo": "test-repo",
|
|
42
|
+
"additions": 10,
|
|
43
|
+
"deletions": 5,
|
|
44
|
+
"body": "Fixed auth bug",
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"number": 3,
|
|
48
|
+
"title": "chore: update deps",
|
|
49
|
+
"owner": "test-org",
|
|
50
|
+
"repo": "test-repo",
|
|
51
|
+
"additions": 50,
|
|
52
|
+
"deletions": 50,
|
|
53
|
+
"body": "Updated dependencies",
|
|
54
|
+
},
|
|
55
|
+
]
|
|
56
|
+
|
|
57
|
+
# Simulate what prepare_appraisal_data does
|
|
58
|
+
dump_data = {
|
|
59
|
+
"author": "testuser",
|
|
60
|
+
"period": "Last 180 days",
|
|
61
|
+
"org": None,
|
|
62
|
+
"repo": None,
|
|
63
|
+
"fetched_at": datetime.now(timezone.utc).isoformat(),
|
|
64
|
+
"count": len(mock_full_prs),
|
|
65
|
+
"prs": mock_full_prs,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
# Create temp file
|
|
69
|
+
fd, file_path = tempfile.mkstemp(suffix=".json", prefix="appraisal_test_")
|
|
70
|
+
with open(file_path, "w") as f:
|
|
71
|
+
json.dump(dump_data, f, indent=2, default=str)
|
|
72
|
+
|
|
73
|
+
# Verify file exists and has correct structure
|
|
74
|
+
assert os.path.exists(file_path), "File should be created"
|
|
75
|
+
|
|
76
|
+
with open(file_path) as f:
|
|
77
|
+
loaded = json.load(f)
|
|
78
|
+
|
|
79
|
+
assert loaded["count"] == 3, f"Expected 3 PRs, got {loaded['count']}"
|
|
80
|
+
assert len(loaded["prs"]) == 3, "Should have 3 PRs in data"
|
|
81
|
+
assert loaded["prs"][0]["title"] == "feat: add login", "First PR title should match"
|
|
82
|
+
|
|
83
|
+
# Generate titles (what prepare_appraisal_data returns)
|
|
84
|
+
pr_titles = [
|
|
85
|
+
{
|
|
86
|
+
"number": pr["number"],
|
|
87
|
+
"title": pr["title"],
|
|
88
|
+
"repo": f"{pr['owner']}/{pr['repo']}",
|
|
89
|
+
}
|
|
90
|
+
for pr in mock_full_prs
|
|
91
|
+
]
|
|
92
|
+
|
|
93
|
+
result = {
|
|
94
|
+
"file_path": file_path,
|
|
95
|
+
"count": len(mock_full_prs),
|
|
96
|
+
"author": "testuser",
|
|
97
|
+
"period": "Last 180 days",
|
|
98
|
+
"pr_titles": pr_titles,
|
|
99
|
+
"next_step": "Call get_appraisal_pr_details...",
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
print(f"✅ File created: {file_path}")
|
|
103
|
+
print(f"✅ PR count: {result['count']}")
|
|
104
|
+
print(f"✅ PR titles returned: {len(result['pr_titles'])}")
|
|
105
|
+
for t in result["pr_titles"]:
|
|
106
|
+
print(f" - #{t['number']}: {t['title']}")
|
|
107
|
+
|
|
108
|
+
# Cleanup
|
|
109
|
+
os.unlink(file_path)
|
|
110
|
+
print("✅ Test passed!\n")
|
|
111
|
+
return True
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def test_get_appraisal_pr_details_reads_from_file():
|
|
115
|
+
"""Test that get_appraisal_pr_details reads selected PRs from dump."""
|
|
116
|
+
print("\n=== Test 2: get_appraisal_pr_details reads from file ===")
|
|
117
|
+
|
|
118
|
+
# Create a dump file
|
|
119
|
+
mock_prs = [
|
|
120
|
+
{
|
|
121
|
+
"number": 1,
|
|
122
|
+
"title": "feat: add login",
|
|
123
|
+
"owner": "test-org",
|
|
124
|
+
"repo": "test-repo",
|
|
125
|
+
"additions": 100,
|
|
126
|
+
"deletions": 20,
|
|
127
|
+
"body": "Added login feature",
|
|
128
|
+
},
|
|
129
|
+
{
|
|
130
|
+
"number": 2,
|
|
131
|
+
"title": "fix: bug in auth",
|
|
132
|
+
"owner": "test-org",
|
|
133
|
+
"repo": "test-repo",
|
|
134
|
+
"additions": 10,
|
|
135
|
+
"deletions": 5,
|
|
136
|
+
"body": "Fixed auth bug",
|
|
137
|
+
},
|
|
138
|
+
{
|
|
139
|
+
"number": 3,
|
|
140
|
+
"title": "chore: update deps",
|
|
141
|
+
"owner": "test-org",
|
|
142
|
+
"repo": "test-repo",
|
|
143
|
+
"additions": 50,
|
|
144
|
+
"deletions": 50,
|
|
145
|
+
"body": "Updated dependencies",
|
|
146
|
+
},
|
|
147
|
+
{
|
|
148
|
+
"number": 4,
|
|
149
|
+
"title": "feat: add logout",
|
|
150
|
+
"owner": "test-org",
|
|
151
|
+
"repo": "test-repo",
|
|
152
|
+
"additions": 80,
|
|
153
|
+
"deletions": 10,
|
|
154
|
+
"body": "Added logout",
|
|
155
|
+
},
|
|
156
|
+
{
|
|
157
|
+
"number": 5,
|
|
158
|
+
"title": "docs: update readme",
|
|
159
|
+
"owner": "test-org",
|
|
160
|
+
"repo": "test-repo",
|
|
161
|
+
"additions": 20,
|
|
162
|
+
"deletions": 5,
|
|
163
|
+
"body": "Updated docs",
|
|
164
|
+
},
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
dump_data = {
|
|
168
|
+
"author": "testuser",
|
|
169
|
+
"period": "Last 180 days",
|
|
170
|
+
"prs": mock_prs,
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
fd, file_path = tempfile.mkstemp(suffix=".json", prefix="appraisal_test_")
|
|
174
|
+
with open(file_path, "w") as f:
|
|
175
|
+
json.dump(dump_data, f)
|
|
176
|
+
|
|
177
|
+
# Simulate get_appraisal_pr_details - select only PRs 1 and 4
|
|
178
|
+
selected_numbers = [1, 4]
|
|
179
|
+
pr_numbers_set = set(selected_numbers)
|
|
180
|
+
|
|
181
|
+
with open(file_path) as f:
|
|
182
|
+
data = json.load(f)
|
|
183
|
+
|
|
184
|
+
selected_prs = [pr for pr in data.get("prs", []) if pr["number"] in pr_numbers_set]
|
|
185
|
+
|
|
186
|
+
result = {
|
|
187
|
+
"count": len(selected_prs),
|
|
188
|
+
"requested": len(selected_numbers),
|
|
189
|
+
"prs": selected_prs,
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
assert result["count"] == 2, f"Expected 2 PRs, got {result['count']}"
|
|
193
|
+
assert result["prs"][0]["number"] == 1, "First PR should be #1"
|
|
194
|
+
assert result["prs"][1]["number"] == 4, "Second PR should be #4"
|
|
195
|
+
assert result["prs"][0]["additions"] == 100, "Should have full PR details"
|
|
196
|
+
|
|
197
|
+
print(f"✅ Requested PRs: {selected_numbers}")
|
|
198
|
+
print(f"✅ Retrieved {result['count']} PRs from dump")
|
|
199
|
+
for pr in result["prs"]:
|
|
200
|
+
print(
|
|
201
|
+
f" - #{pr['number']}: {pr['title']} (+{pr['additions']}/-{pr['deletions']})"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
# Cleanup
|
|
205
|
+
os.unlink(file_path)
|
|
206
|
+
print("✅ Test passed!\n")
|
|
207
|
+
return True
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def test_parallel_fetch_simulation():
|
|
211
|
+
"""Test that parallel fetching logic works."""
|
|
212
|
+
print("\n=== Test 3: Parallel fetch simulation ===")
|
|
213
|
+
|
|
214
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
215
|
+
import time
|
|
216
|
+
|
|
217
|
+
# Simulate fetching PR details
|
|
218
|
+
def fetch_pr(pr_ref):
|
|
219
|
+
# Simulate API latency
|
|
220
|
+
time.sleep(0.1)
|
|
221
|
+
return {
|
|
222
|
+
"number": pr_ref["number"],
|
|
223
|
+
"title": f"PR #{pr_ref['number']}",
|
|
224
|
+
"owner": pr_ref["owner"],
|
|
225
|
+
"repo": pr_ref["repo"],
|
|
226
|
+
"additions": pr_ref["number"] * 10,
|
|
227
|
+
"deletions": pr_ref["number"] * 2,
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
pr_refs = [
|
|
231
|
+
{"owner": "test", "repo": "repo", "number": i}
|
|
232
|
+
for i in range(1, 11) # 10 PRs
|
|
233
|
+
]
|
|
234
|
+
|
|
235
|
+
start = time.time()
|
|
236
|
+
|
|
237
|
+
# Sequential (for comparison)
|
|
238
|
+
# sequential_results = [fetch_pr(ref) for ref in pr_refs]
|
|
239
|
+
# sequential_time = time.time() - start
|
|
240
|
+
|
|
241
|
+
# Parallel
|
|
242
|
+
start = time.time()
|
|
243
|
+
results = []
|
|
244
|
+
with ThreadPoolExecutor(max_workers=10) as executor:
|
|
245
|
+
futures = {executor.submit(fetch_pr, ref): ref for ref in pr_refs}
|
|
246
|
+
for future in as_completed(futures):
|
|
247
|
+
result = future.result()
|
|
248
|
+
if result:
|
|
249
|
+
results.append(result)
|
|
250
|
+
|
|
251
|
+
parallel_time = time.time() - start
|
|
252
|
+
|
|
253
|
+
assert len(results) == 10, f"Expected 10 results, got {len(results)}"
|
|
254
|
+
print(f"✅ Fetched {len(results)} PRs in parallel")
|
|
255
|
+
print(f"✅ Time: {parallel_time:.2f}s (vs ~1s sequential)")
|
|
256
|
+
print("✅ Test passed!\n")
|
|
257
|
+
return True
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def test_full_flow_with_mock_client():
|
|
261
|
+
"""Test the full appraisal flow with mocked GitHub client."""
|
|
262
|
+
print("\n=== Test 4: Full flow with mock client ===")
|
|
263
|
+
|
|
264
|
+
from mcp_server.api_clients.github_client import GitHubClient
|
|
265
|
+
|
|
266
|
+
# Create a mock client
|
|
267
|
+
with patch.object(GitHubClient, "__init__", lambda self, **kwargs: None):
|
|
268
|
+
client = GitHubClient(token="fake")
|
|
269
|
+
client.token = "fake"
|
|
270
|
+
client._is_pat_mode = True
|
|
271
|
+
client.default_owner = "testuser"
|
|
272
|
+
|
|
273
|
+
# Mock search_merged_prs
|
|
274
|
+
mock_search_results = [
|
|
275
|
+
{
|
|
276
|
+
"number": 1,
|
|
277
|
+
"title": "feat: add feature",
|
|
278
|
+
"owner": "org",
|
|
279
|
+
"repo": "repo",
|
|
280
|
+
"merged_at": "2024-01-01",
|
|
281
|
+
"body": "",
|
|
282
|
+
"labels": [],
|
|
283
|
+
},
|
|
284
|
+
{
|
|
285
|
+
"number": 2,
|
|
286
|
+
"title": "fix: bug fix",
|
|
287
|
+
"owner": "org",
|
|
288
|
+
"repo": "repo",
|
|
289
|
+
"merged_at": "2024-01-02",
|
|
290
|
+
"body": "",
|
|
291
|
+
"labels": [],
|
|
292
|
+
},
|
|
293
|
+
]
|
|
294
|
+
|
|
295
|
+
# Mock get_pr to return full details
|
|
296
|
+
class MockPR:
|
|
297
|
+
def __init__(self, num):
|
|
298
|
+
self.number = num
|
|
299
|
+
self.title = f"PR #{num}"
|
|
300
|
+
self.body = f"Body for PR #{num}"
|
|
301
|
+
self.state = "closed"
|
|
302
|
+
self.additions = num * 100
|
|
303
|
+
self.deletions = num * 10
|
|
304
|
+
self.changed_files = num * 5
|
|
305
|
+
self.commits = num
|
|
306
|
+
self.draft = False
|
|
307
|
+
self.mergeable = True
|
|
308
|
+
self.labels = []
|
|
309
|
+
self.reviewers = []
|
|
310
|
+
self.created_at = datetime.now()
|
|
311
|
+
self.updated_at = datetime.now()
|
|
312
|
+
self.merged_at = datetime.now()
|
|
313
|
+
self.html_url = f"https://github.com/org/repo/pull/{num}"
|
|
314
|
+
self.head_branch = "feature"
|
|
315
|
+
self.base_branch = "main"
|
|
316
|
+
self.user = MagicMock(login="testuser")
|
|
317
|
+
|
|
318
|
+
def model_dump(self):
|
|
319
|
+
return {
|
|
320
|
+
"number": self.number,
|
|
321
|
+
"title": self.title,
|
|
322
|
+
"body": self.body,
|
|
323
|
+
"additions": self.additions,
|
|
324
|
+
"deletions": self.deletions,
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
client.search_merged_prs = MagicMock(return_value=mock_search_results)
|
|
328
|
+
client.get_pr = MagicMock(side_effect=lambda num, **kw: MockPR(num))
|
|
329
|
+
|
|
330
|
+
# Test search
|
|
331
|
+
search_result = client.search_merged_prs(
|
|
332
|
+
author="testuser", since_date="2024-01-01"
|
|
333
|
+
)
|
|
334
|
+
assert len(search_result) == 2, "Should get 2 PRs from search"
|
|
335
|
+
print(f"✅ Search returned {len(search_result)} PRs")
|
|
336
|
+
|
|
337
|
+
# Test get_pr
|
|
338
|
+
pr = client.get_pr(1)
|
|
339
|
+
assert pr.number == 1, "Should get PR #1"
|
|
340
|
+
assert pr.additions == 100, "Should have additions"
|
|
341
|
+
print(
|
|
342
|
+
f"✅ get_pr returned PR #{pr.number} with +{pr.additions}/-{pr.deletions}"
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
print("✅ Test passed!\n")
|
|
346
|
+
return True
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
def test_actual_github_client():
|
|
350
|
+
"""Test actual GitHub client if credentials available."""
|
|
351
|
+
print("\n=== Test 5: Actual GitHub client (optional) ===")
|
|
352
|
+
|
|
353
|
+
from mcp_server.auth import get_github_pat
|
|
354
|
+
|
|
355
|
+
pat_token, source = get_github_pat()
|
|
356
|
+
if not pat_token:
|
|
357
|
+
print("⚠️ No GitHub PAT found, skipping live test")
|
|
358
|
+
print(" Set GITHUB_TOKEN env var to enable this test")
|
|
359
|
+
return True
|
|
360
|
+
|
|
361
|
+
from mcp_server.api_clients.github_client import GitHubClient
|
|
362
|
+
|
|
363
|
+
client = GitHubClient(token=pat_token)
|
|
364
|
+
|
|
365
|
+
# Test search (limited)
|
|
366
|
+
print(f"✅ Using PAT from {source}")
|
|
367
|
+
|
|
368
|
+
try:
|
|
369
|
+
prs = client.search_merged_prs(
|
|
370
|
+
author=client.get_authenticated_user(),
|
|
371
|
+
since_date="2024-12-01",
|
|
372
|
+
limit=5,
|
|
373
|
+
detail_level="summary",
|
|
374
|
+
)
|
|
375
|
+
print(f"✅ Found {len(prs)} merged PRs in last month")
|
|
376
|
+
for pr in prs[:3]:
|
|
377
|
+
print(f" - #{pr['number']}: {pr['title'][:50]}")
|
|
378
|
+
except Exception as e:
|
|
379
|
+
print(f"⚠️ Search failed: {e}")
|
|
380
|
+
|
|
381
|
+
print("✅ Test passed!\n")
|
|
382
|
+
return True
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
if __name__ == "__main__":
|
|
386
|
+
print("=" * 60)
|
|
387
|
+
print("Testing Appraisal Tools")
|
|
388
|
+
print("=" * 60)
|
|
389
|
+
|
|
390
|
+
tests = [
|
|
391
|
+
test_prepare_appraisal_data_dumps_to_file,
|
|
392
|
+
test_get_appraisal_pr_details_reads_from_file,
|
|
393
|
+
test_parallel_fetch_simulation,
|
|
394
|
+
test_full_flow_with_mock_client,
|
|
395
|
+
test_actual_github_client,
|
|
396
|
+
]
|
|
397
|
+
|
|
398
|
+
passed = 0
|
|
399
|
+
failed = 0
|
|
400
|
+
|
|
401
|
+
for test in tests:
|
|
402
|
+
try:
|
|
403
|
+
if test():
|
|
404
|
+
passed += 1
|
|
405
|
+
else:
|
|
406
|
+
failed += 1
|
|
407
|
+
except Exception as e:
|
|
408
|
+
print(f"❌ Test failed with exception: {e}")
|
|
409
|
+
import traceback
|
|
410
|
+
|
|
411
|
+
traceback.print_exc()
|
|
412
|
+
failed += 1
|
|
413
|
+
|
|
414
|
+
print("=" * 60)
|
|
415
|
+
print(f"Results: {passed} passed, {failed} failed")
|
|
416
|
+
print("=" * 60)
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/.claude-plugin/marketplace.json
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/.github/workflows/publish-pypi.yml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/api_clients/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/api_clients/slack_client.py
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/auth/credentials.py
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/auth/device_flow.py
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/resources/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/auth_tools.py
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/slack_tools.py
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/mcp_server/tools/utility_tools.py
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/commands/connect.md
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/commands/status.md
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/plugins/quickcall/commands/updates.md
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.0 → quickcall_integrations-0.3.2}/tests/appraisal/setup_test_data.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|