quickcall-integrations 0.3.1__tar.gz → 0.3.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/PKG-INFO +1 -1
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/github_tools.py +17 -101
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/.claude-plugin/plugin.json +1 -1
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/commands/appraisal.md +19 -13
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/pyproject.toml +1 -1
- quickcall_integrations-0.3.3/tests/test_appraisal_integration.py +235 -0
- quickcall_integrations-0.3.3/tests/test_appraisal_tools.py +416 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/.claude-plugin/marketplace.json +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/.github/workflows/publish-pypi.yml +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/.gitignore +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/.pre-commit-config.yaml +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/Dockerfile +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/README.md +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/assets/logo.png +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/__init__.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/api_clients/__init__.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/api_clients/github_client.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/api_clients/slack_client.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/auth/__init__.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/auth/credentials.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/auth/device_flow.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/resources/__init__.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/resources/slack_resources.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/server.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/__init__.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/auth_tools.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/git_tools.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/slack_tools.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/utility_tools.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/commands/connect-github-pat.md +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/commands/connect.md +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/commands/slack-summary.md +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/commands/status.md +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/commands/updates.md +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/requirements.txt +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/tests/README.md +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/tests/appraisal/__init__.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/tests/appraisal/setup_test_data.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/tests/test_integrations.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/tests/test_tools.py +0 -0
- {quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/uv.lock +0 -0
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/github_tools.py
RENAMED
|
@@ -164,10 +164,8 @@ def create_github_tools(mcp: FastMCP) -> None:
|
|
|
164
164
|
List pull requests for a GitHub repository.
|
|
165
165
|
|
|
166
166
|
Returns PRs sorted by last updated.
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
Use detail_level='summary' (default) to avoid context overflow with large result sets.
|
|
170
|
-
Use get_pr(number) to get full details for specific PRs when needed.
|
|
167
|
+
Use detail_level='summary' (default) to avoid context overflow.
|
|
168
|
+
Use get_prs() to fetch full details for specific PRs.
|
|
171
169
|
"""
|
|
172
170
|
try:
|
|
173
171
|
client = _get_client()
|
|
@@ -286,10 +284,8 @@ def create_github_tools(mcp: FastMCP) -> None:
|
|
|
286
284
|
List commits for a GitHub repository.
|
|
287
285
|
|
|
288
286
|
Returns commits sorted by date (newest first).
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
Use detail_level='summary' (default) to avoid context overflow with large result sets.
|
|
292
|
-
Use get_commit(sha) to get full details for specific commits when needed.
|
|
287
|
+
Use detail_level='summary' (default) to avoid context overflow.
|
|
288
|
+
Use get_commit(sha) for full details on a specific commit.
|
|
293
289
|
"""
|
|
294
290
|
try:
|
|
295
291
|
client = _get_client()
|
|
@@ -393,84 +389,6 @@ def create_github_tools(mcp: FastMCP) -> None:
|
|
|
393
389
|
except Exception as e:
|
|
394
390
|
raise ToolError(f"Failed to list branches: {str(e)}")
|
|
395
391
|
|
|
396
|
-
@mcp.tool(tags={"github", "prs"})
|
|
397
|
-
def search_merged_prs(
|
|
398
|
-
author: Optional[str] = Field(
|
|
399
|
-
default=None,
|
|
400
|
-
description="GitHub username to filter by. Defaults to authenticated user if not specified.",
|
|
401
|
-
),
|
|
402
|
-
days: int = Field(
|
|
403
|
-
default=180,
|
|
404
|
-
description="Number of days to look back (default: 180 for ~6 months)",
|
|
405
|
-
),
|
|
406
|
-
org: Optional[str] = Field(
|
|
407
|
-
default=None,
|
|
408
|
-
description="GitHub org to search within. If not specified, searches all accessible repos.",
|
|
409
|
-
),
|
|
410
|
-
repo: Optional[str] = Field(
|
|
411
|
-
default=None,
|
|
412
|
-
description="Specific repo in 'owner/repo' format (e.g., 'revolving-org/supabase'). Overrides org if specified.",
|
|
413
|
-
),
|
|
414
|
-
limit: int = Field(
|
|
415
|
-
default=100,
|
|
416
|
-
description="Maximum PRs to return (default: 100)",
|
|
417
|
-
),
|
|
418
|
-
detail_level: str = Field(
|
|
419
|
-
default="summary",
|
|
420
|
-
description="'summary' for minimal fields (number, title, merged_at, repo, owner, html_url, author), "
|
|
421
|
-
"'full' adds body and labels. Use 'summary' for large result sets.",
|
|
422
|
-
),
|
|
423
|
-
) -> dict:
|
|
424
|
-
"""
|
|
425
|
-
Search for merged pull requests by author within a time period.
|
|
426
|
-
|
|
427
|
-
NOTE: For appraisals/performance reviews, use prepare_appraisal_data instead!
|
|
428
|
-
It fetches all PRs with full stats in parallel and avoids context overflow.
|
|
429
|
-
|
|
430
|
-
This tool returns basic PR info without stats (additions, deletions).
|
|
431
|
-
Use detail_level='summary' (default) for large result sets.
|
|
432
|
-
|
|
433
|
-
Requires QuickCall authentication with GitHub connected.
|
|
434
|
-
"""
|
|
435
|
-
try:
|
|
436
|
-
client = _get_client()
|
|
437
|
-
|
|
438
|
-
# Calculate since_date from days
|
|
439
|
-
from datetime import datetime, timedelta, timezone
|
|
440
|
-
|
|
441
|
-
since_date = (datetime.now(timezone.utc) - timedelta(days=days)).strftime(
|
|
442
|
-
"%Y-%m-%d"
|
|
443
|
-
)
|
|
444
|
-
|
|
445
|
-
# Use authenticated user if author not specified
|
|
446
|
-
if not author:
|
|
447
|
-
creds = get_credential_store().get_api_credentials()
|
|
448
|
-
if creds and creds.github_username:
|
|
449
|
-
author = creds.github_username
|
|
450
|
-
|
|
451
|
-
prs = client.search_merged_prs(
|
|
452
|
-
author=author,
|
|
453
|
-
since_date=since_date,
|
|
454
|
-
org=org,
|
|
455
|
-
repo=repo,
|
|
456
|
-
limit=limit,
|
|
457
|
-
detail_level=detail_level,
|
|
458
|
-
)
|
|
459
|
-
|
|
460
|
-
return {
|
|
461
|
-
"count": len(prs),
|
|
462
|
-
"detail_level": detail_level,
|
|
463
|
-
"period": f"Last {days} days",
|
|
464
|
-
"author": author,
|
|
465
|
-
"org": org,
|
|
466
|
-
"repo": repo,
|
|
467
|
-
"prs": prs,
|
|
468
|
-
}
|
|
469
|
-
except ToolError:
|
|
470
|
-
raise
|
|
471
|
-
except Exception as e:
|
|
472
|
-
raise ToolError(f"Failed to search merged PRs: {str(e)}")
|
|
473
|
-
|
|
474
392
|
@mcp.tool(tags={"github", "prs", "appraisal"})
|
|
475
393
|
def prepare_appraisal_data(
|
|
476
394
|
author: Optional[str] = Field(
|
|
@@ -491,18 +409,17 @@ def create_github_tools(mcp: FastMCP) -> None:
|
|
|
491
409
|
),
|
|
492
410
|
) -> dict:
|
|
493
411
|
"""
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
USE THIS TOOL FOR APPRAISALS AND PERFORMANCE REVIEWS!
|
|
412
|
+
Fetch all merged PRs for appraisals/performance reviews.
|
|
497
413
|
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
4. Then use get_appraisal_pr_details(file_path, pr_numbers) for selected PRs
|
|
414
|
+
Returns:
|
|
415
|
+
- file_path: temp file with full PR data (additions, deletions, files)
|
|
416
|
+
- pr_titles: list of {number, title, repo} for Claude to review
|
|
417
|
+
- count: total PRs found
|
|
503
418
|
|
|
504
|
-
|
|
505
|
-
|
|
419
|
+
Workflow:
|
|
420
|
+
1. Call this tool → get file_path and pr_titles
|
|
421
|
+
2. Review pr_titles, pick significant PRs
|
|
422
|
+
3. Call get_appraisal_pr_details(file_path, [pr_numbers]) for full details
|
|
506
423
|
"""
|
|
507
424
|
import json
|
|
508
425
|
import tempfile
|
|
@@ -611,13 +528,12 @@ def create_github_tools(mcp: FastMCP) -> None:
|
|
|
611
528
|
),
|
|
612
529
|
) -> dict:
|
|
613
530
|
"""
|
|
614
|
-
|
|
531
|
+
Read full PR details from the appraisal data file.
|
|
615
532
|
|
|
616
|
-
|
|
617
|
-
|
|
533
|
+
Call this after prepare_appraisal_data with selected PR numbers.
|
|
534
|
+
Reads from the cached file - no API calls made.
|
|
618
535
|
|
|
619
|
-
|
|
620
|
-
that Claude has identified as important for the appraisal.
|
|
536
|
+
Returns: additions, deletions, files changed, body for selected PRs.
|
|
621
537
|
"""
|
|
622
538
|
import json
|
|
623
539
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "quickcall",
|
|
3
3
|
"description": "Integrate quickcall into dev workflows - eliminate interruptions for developers. Ask about your work, get instant answers. No more context switching.",
|
|
4
|
-
"version": "0.6.
|
|
4
|
+
"version": "0.6.2",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Sagar Sarkale"
|
|
7
7
|
}
|
|
@@ -16,30 +16,36 @@ Parse `$ARGUMENTS` for time period:
|
|
|
16
16
|
|
|
17
17
|
## Instructions
|
|
18
18
|
|
|
19
|
+
**Two-step flow (avoids context overflow):**
|
|
20
|
+
|
|
21
|
+
1. `prepare_appraisal_data` → fetches all PRs, dumps to file, returns titles + PR numbers
|
|
22
|
+
2. `get_appraisal_pr_details` → Claude picks which PRs to get details for, reads from file
|
|
23
|
+
|
|
24
|
+
---
|
|
25
|
+
|
|
19
26
|
1. **Gather contribution data:**
|
|
20
27
|
|
|
21
|
-
**Option A - GitHub API (preferred
|
|
22
|
-
-
|
|
23
|
-
-
|
|
24
|
-
-
|
|
25
|
-
-
|
|
28
|
+
**Option A - GitHub API (preferred):**
|
|
29
|
+
- Call `prepare_appraisal_data(days=X)` with the parsed time period
|
|
30
|
+
- This fetches ALL merged PRs with full stats in PARALLEL and dumps to a temp file
|
|
31
|
+
- Returns: `file_path` + `pr_titles` (number, title, repo for each PR)
|
|
32
|
+
- Optional: pass `org` or `repo` parameter to filter
|
|
26
33
|
|
|
27
|
-
**Option B - Local Git (fallback
|
|
34
|
+
**Option B - Local Git (fallback):**
|
|
28
35
|
- Use `get_local_contributions` tool on the current directory
|
|
29
36
|
- This parses local git history for commits by the user
|
|
30
|
-
- Extracts PR numbers from merge commit messages where available
|
|
31
37
|
|
|
32
|
-
2. **Analyze and categorize
|
|
33
|
-
|
|
38
|
+
2. **Analyze and categorize PRs from the titles:**
|
|
39
|
+
Review the `pr_titles` list returned in step 1 and categorize:
|
|
34
40
|
- **Features**: New functionality (feat:, add:, implement, new, create)
|
|
35
41
|
- **Enhancements**: Improvements (improve:, update:, perf:, optimize, enhance)
|
|
36
42
|
- **Bug fixes**: (fix:, bugfix:, hotfix:, resolve, patch)
|
|
37
43
|
- **Chores**: Maintenance work (docs:, test:, ci:, chore:, refactor:, bump)
|
|
38
44
|
|
|
39
|
-
3. **
|
|
40
|
-
-
|
|
41
|
-
-
|
|
42
|
-
-
|
|
45
|
+
3. **Get full details for selected PRs:**
|
|
46
|
+
- Based on the titles, pick the top 5-10 significant PRs worth highlighting
|
|
47
|
+
- Call `get_appraisal_pr_details(file_path, [pr_numbers])` with the selected PR numbers
|
|
48
|
+
- Returns: full details (additions, deletions, files, body) for those PRs only
|
|
43
49
|
|
|
44
50
|
4. **Calculate summary metrics:**
|
|
45
51
|
- Total PRs merged by category
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Integration test for appraisal tools.
|
|
4
|
+
|
|
5
|
+
Tests the actual MCP tool functions with real GitHub API calls.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
uv run python tests/test_appraisal_integration.py
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import os
|
|
13
|
+
import sys
|
|
14
|
+
|
|
15
|
+
# Add parent dir to path for imports
|
|
16
|
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def test_prepare_appraisal_data_tool():
|
|
20
|
+
"""Test prepare_appraisal_data MCP tool with real API."""
|
|
21
|
+
print("\n=== Integration Test: prepare_appraisal_data ===")
|
|
22
|
+
|
|
23
|
+
from mcp_server.auth import get_github_pat, get_credential_store
|
|
24
|
+
from mcp_server.api_clients.github_client import GitHubClient
|
|
25
|
+
|
|
26
|
+
# Check auth
|
|
27
|
+
pat_token, source = get_github_pat()
|
|
28
|
+
store = get_credential_store()
|
|
29
|
+
|
|
30
|
+
if not pat_token and not store.is_authenticated():
|
|
31
|
+
print("⚠️ No GitHub auth available, skipping integration test")
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
# Get client
|
|
35
|
+
if pat_token:
|
|
36
|
+
client = GitHubClient(token=pat_token)
|
|
37
|
+
author = client.get_authenticated_user()
|
|
38
|
+
print(f"✅ Using PAT ({source}), user: {author}")
|
|
39
|
+
else:
|
|
40
|
+
creds = store.get_api_credentials()
|
|
41
|
+
client = GitHubClient(
|
|
42
|
+
token=creds.github_token,
|
|
43
|
+
default_owner=creds.github_username,
|
|
44
|
+
installation_id=creds.github_installation_id,
|
|
45
|
+
)
|
|
46
|
+
author = creds.github_username
|
|
47
|
+
print(f"✅ Using GitHub App, user: {author}")
|
|
48
|
+
|
|
49
|
+
# Step 1: Search merged PRs
|
|
50
|
+
print("\n[Step 1] Searching merged PRs (last 30 days, limit 10)...")
|
|
51
|
+
from datetime import datetime, timedelta, timezone
|
|
52
|
+
|
|
53
|
+
since_date = (datetime.now(timezone.utc) - timedelta(days=30)).strftime("%Y-%m-%d")
|
|
54
|
+
|
|
55
|
+
pr_list = client.search_merged_prs(
|
|
56
|
+
author=author,
|
|
57
|
+
since_date=since_date,
|
|
58
|
+
limit=10,
|
|
59
|
+
detail_level="full",
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
if not pr_list:
|
|
63
|
+
print("⚠️ No merged PRs found in last 30 days")
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
print(f"✅ Found {len(pr_list)} PRs")
|
|
67
|
+
for pr in pr_list[:3]:
|
|
68
|
+
print(f" - #{pr['number']}: {pr['title'][:50]}")
|
|
69
|
+
|
|
70
|
+
# Step 2: Fetch full details in parallel
|
|
71
|
+
print("\n[Step 2] Fetching full PR details in parallel...")
|
|
72
|
+
pr_refs = [
|
|
73
|
+
{"owner": pr["owner"], "repo": pr["repo"], "number": pr["number"]}
|
|
74
|
+
for pr in pr_list
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
full_prs = client.fetch_prs_parallel(pr_refs, max_workers=5)
|
|
78
|
+
print(f"✅ Fetched {len(full_prs)} PRs with full details")
|
|
79
|
+
|
|
80
|
+
if full_prs:
|
|
81
|
+
pr = full_prs[0]
|
|
82
|
+
print(
|
|
83
|
+
f" Sample: #{pr['number']} - +{pr.get('additions', 0)}/-{pr.get('deletions', 0)}"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Step 3: Dump to file
|
|
87
|
+
print("\n[Step 3] Dumping to file...")
|
|
88
|
+
import tempfile
|
|
89
|
+
|
|
90
|
+
dump_data = {
|
|
91
|
+
"author": author,
|
|
92
|
+
"period": "Last 30 days",
|
|
93
|
+
"fetched_at": datetime.now(timezone.utc).isoformat(),
|
|
94
|
+
"count": len(full_prs),
|
|
95
|
+
"prs": full_prs,
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
fd, file_path = tempfile.mkstemp(suffix=".json", prefix="appraisal_integration_")
|
|
99
|
+
with open(file_path, "w") as f:
|
|
100
|
+
json.dump(dump_data, f, indent=2, default=str)
|
|
101
|
+
|
|
102
|
+
file_size = os.path.getsize(file_path)
|
|
103
|
+
print(f"✅ Dumped to: {file_path}")
|
|
104
|
+
print(f" Size: {file_size / 1024:.1f} KB")
|
|
105
|
+
|
|
106
|
+
# Step 4: Generate titles (what tool returns)
|
|
107
|
+
print("\n[Step 4] Generating PR titles for Claude...")
|
|
108
|
+
pr_titles = [
|
|
109
|
+
{
|
|
110
|
+
"number": pr["number"],
|
|
111
|
+
"title": pr["title"],
|
|
112
|
+
"repo": f"{pr.get('owner', '')}/{pr.get('repo', '')}",
|
|
113
|
+
}
|
|
114
|
+
for pr in full_prs
|
|
115
|
+
]
|
|
116
|
+
|
|
117
|
+
print(f"✅ Generated {len(pr_titles)} titles")
|
|
118
|
+
for t in pr_titles[:5]:
|
|
119
|
+
print(f" - #{t['number']}: {t['title'][:50]}")
|
|
120
|
+
|
|
121
|
+
# Step 5: Test get_appraisal_pr_details
|
|
122
|
+
print("\n[Step 5] Testing get_appraisal_pr_details...")
|
|
123
|
+
if len(full_prs) >= 2:
|
|
124
|
+
selected_numbers = [full_prs[0]["number"], full_prs[1]["number"]]
|
|
125
|
+
|
|
126
|
+
with open(file_path) as f:
|
|
127
|
+
data = json.load(f)
|
|
128
|
+
|
|
129
|
+
pr_numbers_set = set(selected_numbers)
|
|
130
|
+
selected_prs = [
|
|
131
|
+
pr for pr in data.get("prs", []) if pr["number"] in pr_numbers_set
|
|
132
|
+
]
|
|
133
|
+
|
|
134
|
+
print(f"✅ Retrieved {len(selected_prs)} selected PRs from dump")
|
|
135
|
+
for pr in selected_prs:
|
|
136
|
+
print(
|
|
137
|
+
f" - #{pr['number']}: {pr['title'][:40]} (+{pr.get('additions', 0)}/-{pr.get('deletions', 0)})"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Cleanup
|
|
141
|
+
os.unlink(file_path)
|
|
142
|
+
print("\n✅ Integration test passed!")
|
|
143
|
+
return True
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def test_response_size():
|
|
147
|
+
"""Test that response sizes are reasonable."""
|
|
148
|
+
print("\n=== Test: Response sizes ===")
|
|
149
|
+
|
|
150
|
+
# Simulate 100 PRs
|
|
151
|
+
mock_pr_titles = [
|
|
152
|
+
{
|
|
153
|
+
"number": i,
|
|
154
|
+
"title": f"PR title for #{i} - some description here",
|
|
155
|
+
"repo": "org/repo",
|
|
156
|
+
}
|
|
157
|
+
for i in range(1, 101)
|
|
158
|
+
]
|
|
159
|
+
|
|
160
|
+
# Calculate size of titles-only response
|
|
161
|
+
titles_response = {
|
|
162
|
+
"file_path": "/tmp/appraisal_xxx.json",
|
|
163
|
+
"count": 100,
|
|
164
|
+
"author": "testuser",
|
|
165
|
+
"period": "Last 180 days",
|
|
166
|
+
"pr_titles": mock_pr_titles,
|
|
167
|
+
"next_step": "Call get_appraisal_pr_details...",
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
titles_size = len(json.dumps(titles_response))
|
|
171
|
+
print(f"✅ Titles-only response for 100 PRs: {titles_size / 1024:.1f} KB")
|
|
172
|
+
|
|
173
|
+
# Compare to full response (old way)
|
|
174
|
+
mock_full_prs = [
|
|
175
|
+
{
|
|
176
|
+
"number": i,
|
|
177
|
+
"title": f"PR title for #{i} - some description here",
|
|
178
|
+
"body": "This is a longer description " * 10,
|
|
179
|
+
"owner": "org",
|
|
180
|
+
"repo": "repo",
|
|
181
|
+
"additions": 100,
|
|
182
|
+
"deletions": 50,
|
|
183
|
+
"changed_files": 10,
|
|
184
|
+
"labels": ["bug", "feature"],
|
|
185
|
+
"merged_at": "2024-01-01T00:00:00Z",
|
|
186
|
+
"html_url": f"https://github.com/org/repo/pull/{i}",
|
|
187
|
+
}
|
|
188
|
+
for i in range(1, 101)
|
|
189
|
+
]
|
|
190
|
+
|
|
191
|
+
full_response = {
|
|
192
|
+
"count": 100,
|
|
193
|
+
"prs": mock_full_prs,
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
full_size = len(json.dumps(full_response))
|
|
197
|
+
print(f"✅ Full response for 100 PRs: {full_size / 1024:.1f} KB")
|
|
198
|
+
print(f"✅ Reduction: {(1 - titles_size / full_size) * 100:.0f}%")
|
|
199
|
+
|
|
200
|
+
assert titles_size < full_size / 2, "Titles response should be <50% of full"
|
|
201
|
+
print("✅ Test passed!\n")
|
|
202
|
+
return True
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
if __name__ == "__main__":
|
|
206
|
+
print("=" * 60)
|
|
207
|
+
print("Appraisal Tools - Integration Tests")
|
|
208
|
+
print("=" * 60)
|
|
209
|
+
|
|
210
|
+
results = []
|
|
211
|
+
|
|
212
|
+
# Test response sizes first (no auth needed)
|
|
213
|
+
try:
|
|
214
|
+
results.append(("Response sizes", test_response_size()))
|
|
215
|
+
except Exception as e:
|
|
216
|
+
print(f"❌ Failed: {e}")
|
|
217
|
+
results.append(("Response sizes", False))
|
|
218
|
+
|
|
219
|
+
# Test actual tool flow
|
|
220
|
+
try:
|
|
221
|
+
result = test_prepare_appraisal_data_tool()
|
|
222
|
+
results.append(("prepare_appraisal_data", result))
|
|
223
|
+
except Exception as e:
|
|
224
|
+
print(f"❌ Failed: {e}")
|
|
225
|
+
import traceback
|
|
226
|
+
|
|
227
|
+
traceback.print_exc()
|
|
228
|
+
results.append(("prepare_appraisal_data", False))
|
|
229
|
+
|
|
230
|
+
print("\n" + "=" * 60)
|
|
231
|
+
print("Results:")
|
|
232
|
+
for name, passed in results:
|
|
233
|
+
status = "✅ PASS" if passed else ("⚠️ SKIP" if passed is None else "❌ FAIL")
|
|
234
|
+
print(f" {name}: {status}")
|
|
235
|
+
print("=" * 60)
|
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Test appraisal tools locally.
|
|
4
|
+
|
|
5
|
+
Tests:
|
|
6
|
+
1. prepare_appraisal_data - fetches PRs in parallel, dumps to file, returns titles
|
|
7
|
+
2. get_appraisal_pr_details - reads from dump file
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
uv run python tests/test_appraisal_tools.py
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import json
|
|
14
|
+
import os
|
|
15
|
+
import tempfile
|
|
16
|
+
from datetime import datetime, timezone
|
|
17
|
+
from unittest.mock import MagicMock, patch
|
|
18
|
+
|
|
19
|
+
# Test the core logic without MCP server
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def test_prepare_appraisal_data_dumps_to_file():
|
|
23
|
+
"""Test that prepare_appraisal_data creates a file with PR data."""
|
|
24
|
+
print("\n=== Test 1: prepare_appraisal_data dumps to file ===")
|
|
25
|
+
|
|
26
|
+
# Mock data - simulating full PR details from parallel fetch
|
|
27
|
+
mock_full_prs = [
|
|
28
|
+
{
|
|
29
|
+
"number": 1,
|
|
30
|
+
"title": "feat: add login",
|
|
31
|
+
"owner": "test-org",
|
|
32
|
+
"repo": "test-repo",
|
|
33
|
+
"additions": 100,
|
|
34
|
+
"deletions": 20,
|
|
35
|
+
"body": "Added login feature",
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
"number": 2,
|
|
39
|
+
"title": "fix: bug in auth",
|
|
40
|
+
"owner": "test-org",
|
|
41
|
+
"repo": "test-repo",
|
|
42
|
+
"additions": 10,
|
|
43
|
+
"deletions": 5,
|
|
44
|
+
"body": "Fixed auth bug",
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"number": 3,
|
|
48
|
+
"title": "chore: update deps",
|
|
49
|
+
"owner": "test-org",
|
|
50
|
+
"repo": "test-repo",
|
|
51
|
+
"additions": 50,
|
|
52
|
+
"deletions": 50,
|
|
53
|
+
"body": "Updated dependencies",
|
|
54
|
+
},
|
|
55
|
+
]
|
|
56
|
+
|
|
57
|
+
# Simulate what prepare_appraisal_data does
|
|
58
|
+
dump_data = {
|
|
59
|
+
"author": "testuser",
|
|
60
|
+
"period": "Last 180 days",
|
|
61
|
+
"org": None,
|
|
62
|
+
"repo": None,
|
|
63
|
+
"fetched_at": datetime.now(timezone.utc).isoformat(),
|
|
64
|
+
"count": len(mock_full_prs),
|
|
65
|
+
"prs": mock_full_prs,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
# Create temp file
|
|
69
|
+
fd, file_path = tempfile.mkstemp(suffix=".json", prefix="appraisal_test_")
|
|
70
|
+
with open(file_path, "w") as f:
|
|
71
|
+
json.dump(dump_data, f, indent=2, default=str)
|
|
72
|
+
|
|
73
|
+
# Verify file exists and has correct structure
|
|
74
|
+
assert os.path.exists(file_path), "File should be created"
|
|
75
|
+
|
|
76
|
+
with open(file_path) as f:
|
|
77
|
+
loaded = json.load(f)
|
|
78
|
+
|
|
79
|
+
assert loaded["count"] == 3, f"Expected 3 PRs, got {loaded['count']}"
|
|
80
|
+
assert len(loaded["prs"]) == 3, "Should have 3 PRs in data"
|
|
81
|
+
assert loaded["prs"][0]["title"] == "feat: add login", "First PR title should match"
|
|
82
|
+
|
|
83
|
+
# Generate titles (what prepare_appraisal_data returns)
|
|
84
|
+
pr_titles = [
|
|
85
|
+
{
|
|
86
|
+
"number": pr["number"],
|
|
87
|
+
"title": pr["title"],
|
|
88
|
+
"repo": f"{pr['owner']}/{pr['repo']}",
|
|
89
|
+
}
|
|
90
|
+
for pr in mock_full_prs
|
|
91
|
+
]
|
|
92
|
+
|
|
93
|
+
result = {
|
|
94
|
+
"file_path": file_path,
|
|
95
|
+
"count": len(mock_full_prs),
|
|
96
|
+
"author": "testuser",
|
|
97
|
+
"period": "Last 180 days",
|
|
98
|
+
"pr_titles": pr_titles,
|
|
99
|
+
"next_step": "Call get_appraisal_pr_details...",
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
print(f"✅ File created: {file_path}")
|
|
103
|
+
print(f"✅ PR count: {result['count']}")
|
|
104
|
+
print(f"✅ PR titles returned: {len(result['pr_titles'])}")
|
|
105
|
+
for t in result["pr_titles"]:
|
|
106
|
+
print(f" - #{t['number']}: {t['title']}")
|
|
107
|
+
|
|
108
|
+
# Cleanup
|
|
109
|
+
os.unlink(file_path)
|
|
110
|
+
print("✅ Test passed!\n")
|
|
111
|
+
return True
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def test_get_appraisal_pr_details_reads_from_file():
|
|
115
|
+
"""Test that get_appraisal_pr_details reads selected PRs from dump."""
|
|
116
|
+
print("\n=== Test 2: get_appraisal_pr_details reads from file ===")
|
|
117
|
+
|
|
118
|
+
# Create a dump file
|
|
119
|
+
mock_prs = [
|
|
120
|
+
{
|
|
121
|
+
"number": 1,
|
|
122
|
+
"title": "feat: add login",
|
|
123
|
+
"owner": "test-org",
|
|
124
|
+
"repo": "test-repo",
|
|
125
|
+
"additions": 100,
|
|
126
|
+
"deletions": 20,
|
|
127
|
+
"body": "Added login feature",
|
|
128
|
+
},
|
|
129
|
+
{
|
|
130
|
+
"number": 2,
|
|
131
|
+
"title": "fix: bug in auth",
|
|
132
|
+
"owner": "test-org",
|
|
133
|
+
"repo": "test-repo",
|
|
134
|
+
"additions": 10,
|
|
135
|
+
"deletions": 5,
|
|
136
|
+
"body": "Fixed auth bug",
|
|
137
|
+
},
|
|
138
|
+
{
|
|
139
|
+
"number": 3,
|
|
140
|
+
"title": "chore: update deps",
|
|
141
|
+
"owner": "test-org",
|
|
142
|
+
"repo": "test-repo",
|
|
143
|
+
"additions": 50,
|
|
144
|
+
"deletions": 50,
|
|
145
|
+
"body": "Updated dependencies",
|
|
146
|
+
},
|
|
147
|
+
{
|
|
148
|
+
"number": 4,
|
|
149
|
+
"title": "feat: add logout",
|
|
150
|
+
"owner": "test-org",
|
|
151
|
+
"repo": "test-repo",
|
|
152
|
+
"additions": 80,
|
|
153
|
+
"deletions": 10,
|
|
154
|
+
"body": "Added logout",
|
|
155
|
+
},
|
|
156
|
+
{
|
|
157
|
+
"number": 5,
|
|
158
|
+
"title": "docs: update readme",
|
|
159
|
+
"owner": "test-org",
|
|
160
|
+
"repo": "test-repo",
|
|
161
|
+
"additions": 20,
|
|
162
|
+
"deletions": 5,
|
|
163
|
+
"body": "Updated docs",
|
|
164
|
+
},
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
dump_data = {
|
|
168
|
+
"author": "testuser",
|
|
169
|
+
"period": "Last 180 days",
|
|
170
|
+
"prs": mock_prs,
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
fd, file_path = tempfile.mkstemp(suffix=".json", prefix="appraisal_test_")
|
|
174
|
+
with open(file_path, "w") as f:
|
|
175
|
+
json.dump(dump_data, f)
|
|
176
|
+
|
|
177
|
+
# Simulate get_appraisal_pr_details - select only PRs 1 and 4
|
|
178
|
+
selected_numbers = [1, 4]
|
|
179
|
+
pr_numbers_set = set(selected_numbers)
|
|
180
|
+
|
|
181
|
+
with open(file_path) as f:
|
|
182
|
+
data = json.load(f)
|
|
183
|
+
|
|
184
|
+
selected_prs = [pr for pr in data.get("prs", []) if pr["number"] in pr_numbers_set]
|
|
185
|
+
|
|
186
|
+
result = {
|
|
187
|
+
"count": len(selected_prs),
|
|
188
|
+
"requested": len(selected_numbers),
|
|
189
|
+
"prs": selected_prs,
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
assert result["count"] == 2, f"Expected 2 PRs, got {result['count']}"
|
|
193
|
+
assert result["prs"][0]["number"] == 1, "First PR should be #1"
|
|
194
|
+
assert result["prs"][1]["number"] == 4, "Second PR should be #4"
|
|
195
|
+
assert result["prs"][0]["additions"] == 100, "Should have full PR details"
|
|
196
|
+
|
|
197
|
+
print(f"✅ Requested PRs: {selected_numbers}")
|
|
198
|
+
print(f"✅ Retrieved {result['count']} PRs from dump")
|
|
199
|
+
for pr in result["prs"]:
|
|
200
|
+
print(
|
|
201
|
+
f" - #{pr['number']}: {pr['title']} (+{pr['additions']}/-{pr['deletions']})"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
# Cleanup
|
|
205
|
+
os.unlink(file_path)
|
|
206
|
+
print("✅ Test passed!\n")
|
|
207
|
+
return True
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def test_parallel_fetch_simulation():
|
|
211
|
+
"""Test that parallel fetching logic works."""
|
|
212
|
+
print("\n=== Test 3: Parallel fetch simulation ===")
|
|
213
|
+
|
|
214
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
215
|
+
import time
|
|
216
|
+
|
|
217
|
+
# Simulate fetching PR details
|
|
218
|
+
def fetch_pr(pr_ref):
|
|
219
|
+
# Simulate API latency
|
|
220
|
+
time.sleep(0.1)
|
|
221
|
+
return {
|
|
222
|
+
"number": pr_ref["number"],
|
|
223
|
+
"title": f"PR #{pr_ref['number']}",
|
|
224
|
+
"owner": pr_ref["owner"],
|
|
225
|
+
"repo": pr_ref["repo"],
|
|
226
|
+
"additions": pr_ref["number"] * 10,
|
|
227
|
+
"deletions": pr_ref["number"] * 2,
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
pr_refs = [
|
|
231
|
+
{"owner": "test", "repo": "repo", "number": i}
|
|
232
|
+
for i in range(1, 11) # 10 PRs
|
|
233
|
+
]
|
|
234
|
+
|
|
235
|
+
start = time.time()
|
|
236
|
+
|
|
237
|
+
# Sequential (for comparison)
|
|
238
|
+
# sequential_results = [fetch_pr(ref) for ref in pr_refs]
|
|
239
|
+
# sequential_time = time.time() - start
|
|
240
|
+
|
|
241
|
+
# Parallel
|
|
242
|
+
start = time.time()
|
|
243
|
+
results = []
|
|
244
|
+
with ThreadPoolExecutor(max_workers=10) as executor:
|
|
245
|
+
futures = {executor.submit(fetch_pr, ref): ref for ref in pr_refs}
|
|
246
|
+
for future in as_completed(futures):
|
|
247
|
+
result = future.result()
|
|
248
|
+
if result:
|
|
249
|
+
results.append(result)
|
|
250
|
+
|
|
251
|
+
parallel_time = time.time() - start
|
|
252
|
+
|
|
253
|
+
assert len(results) == 10, f"Expected 10 results, got {len(results)}"
|
|
254
|
+
print(f"✅ Fetched {len(results)} PRs in parallel")
|
|
255
|
+
print(f"✅ Time: {parallel_time:.2f}s (vs ~1s sequential)")
|
|
256
|
+
print("✅ Test passed!\n")
|
|
257
|
+
return True
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def test_full_flow_with_mock_client():
|
|
261
|
+
"""Test the full appraisal flow with mocked GitHub client."""
|
|
262
|
+
print("\n=== Test 4: Full flow with mock client ===")
|
|
263
|
+
|
|
264
|
+
from mcp_server.api_clients.github_client import GitHubClient
|
|
265
|
+
|
|
266
|
+
# Create a mock client
|
|
267
|
+
with patch.object(GitHubClient, "__init__", lambda self, **kwargs: None):
|
|
268
|
+
client = GitHubClient(token="fake")
|
|
269
|
+
client.token = "fake"
|
|
270
|
+
client._is_pat_mode = True
|
|
271
|
+
client.default_owner = "testuser"
|
|
272
|
+
|
|
273
|
+
# Mock search_merged_prs
|
|
274
|
+
mock_search_results = [
|
|
275
|
+
{
|
|
276
|
+
"number": 1,
|
|
277
|
+
"title": "feat: add feature",
|
|
278
|
+
"owner": "org",
|
|
279
|
+
"repo": "repo",
|
|
280
|
+
"merged_at": "2024-01-01",
|
|
281
|
+
"body": "",
|
|
282
|
+
"labels": [],
|
|
283
|
+
},
|
|
284
|
+
{
|
|
285
|
+
"number": 2,
|
|
286
|
+
"title": "fix: bug fix",
|
|
287
|
+
"owner": "org",
|
|
288
|
+
"repo": "repo",
|
|
289
|
+
"merged_at": "2024-01-02",
|
|
290
|
+
"body": "",
|
|
291
|
+
"labels": [],
|
|
292
|
+
},
|
|
293
|
+
]
|
|
294
|
+
|
|
295
|
+
# Mock get_pr to return full details
|
|
296
|
+
class MockPR:
|
|
297
|
+
def __init__(self, num):
|
|
298
|
+
self.number = num
|
|
299
|
+
self.title = f"PR #{num}"
|
|
300
|
+
self.body = f"Body for PR #{num}"
|
|
301
|
+
self.state = "closed"
|
|
302
|
+
self.additions = num * 100
|
|
303
|
+
self.deletions = num * 10
|
|
304
|
+
self.changed_files = num * 5
|
|
305
|
+
self.commits = num
|
|
306
|
+
self.draft = False
|
|
307
|
+
self.mergeable = True
|
|
308
|
+
self.labels = []
|
|
309
|
+
self.reviewers = []
|
|
310
|
+
self.created_at = datetime.now()
|
|
311
|
+
self.updated_at = datetime.now()
|
|
312
|
+
self.merged_at = datetime.now()
|
|
313
|
+
self.html_url = f"https://github.com/org/repo/pull/{num}"
|
|
314
|
+
self.head_branch = "feature"
|
|
315
|
+
self.base_branch = "main"
|
|
316
|
+
self.user = MagicMock(login="testuser")
|
|
317
|
+
|
|
318
|
+
def model_dump(self):
|
|
319
|
+
return {
|
|
320
|
+
"number": self.number,
|
|
321
|
+
"title": self.title,
|
|
322
|
+
"body": self.body,
|
|
323
|
+
"additions": self.additions,
|
|
324
|
+
"deletions": self.deletions,
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
client.search_merged_prs = MagicMock(return_value=mock_search_results)
|
|
328
|
+
client.get_pr = MagicMock(side_effect=lambda num, **kw: MockPR(num))
|
|
329
|
+
|
|
330
|
+
# Test search
|
|
331
|
+
search_result = client.search_merged_prs(
|
|
332
|
+
author="testuser", since_date="2024-01-01"
|
|
333
|
+
)
|
|
334
|
+
assert len(search_result) == 2, "Should get 2 PRs from search"
|
|
335
|
+
print(f"✅ Search returned {len(search_result)} PRs")
|
|
336
|
+
|
|
337
|
+
# Test get_pr
|
|
338
|
+
pr = client.get_pr(1)
|
|
339
|
+
assert pr.number == 1, "Should get PR #1"
|
|
340
|
+
assert pr.additions == 100, "Should have additions"
|
|
341
|
+
print(
|
|
342
|
+
f"✅ get_pr returned PR #{pr.number} with +{pr.additions}/-{pr.deletions}"
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
print("✅ Test passed!\n")
|
|
346
|
+
return True
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
def test_actual_github_client():
|
|
350
|
+
"""Test actual GitHub client if credentials available."""
|
|
351
|
+
print("\n=== Test 5: Actual GitHub client (optional) ===")
|
|
352
|
+
|
|
353
|
+
from mcp_server.auth import get_github_pat
|
|
354
|
+
|
|
355
|
+
pat_token, source = get_github_pat()
|
|
356
|
+
if not pat_token:
|
|
357
|
+
print("⚠️ No GitHub PAT found, skipping live test")
|
|
358
|
+
print(" Set GITHUB_TOKEN env var to enable this test")
|
|
359
|
+
return True
|
|
360
|
+
|
|
361
|
+
from mcp_server.api_clients.github_client import GitHubClient
|
|
362
|
+
|
|
363
|
+
client = GitHubClient(token=pat_token)
|
|
364
|
+
|
|
365
|
+
# Test search (limited)
|
|
366
|
+
print(f"✅ Using PAT from {source}")
|
|
367
|
+
|
|
368
|
+
try:
|
|
369
|
+
prs = client.search_merged_prs(
|
|
370
|
+
author=client.get_authenticated_user(),
|
|
371
|
+
since_date="2024-12-01",
|
|
372
|
+
limit=5,
|
|
373
|
+
detail_level="summary",
|
|
374
|
+
)
|
|
375
|
+
print(f"✅ Found {len(prs)} merged PRs in last month")
|
|
376
|
+
for pr in prs[:3]:
|
|
377
|
+
print(f" - #{pr['number']}: {pr['title'][:50]}")
|
|
378
|
+
except Exception as e:
|
|
379
|
+
print(f"⚠️ Search failed: {e}")
|
|
380
|
+
|
|
381
|
+
print("✅ Test passed!\n")
|
|
382
|
+
return True
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
if __name__ == "__main__":
|
|
386
|
+
print("=" * 60)
|
|
387
|
+
print("Testing Appraisal Tools")
|
|
388
|
+
print("=" * 60)
|
|
389
|
+
|
|
390
|
+
tests = [
|
|
391
|
+
test_prepare_appraisal_data_dumps_to_file,
|
|
392
|
+
test_get_appraisal_pr_details_reads_from_file,
|
|
393
|
+
test_parallel_fetch_simulation,
|
|
394
|
+
test_full_flow_with_mock_client,
|
|
395
|
+
test_actual_github_client,
|
|
396
|
+
]
|
|
397
|
+
|
|
398
|
+
passed = 0
|
|
399
|
+
failed = 0
|
|
400
|
+
|
|
401
|
+
for test in tests:
|
|
402
|
+
try:
|
|
403
|
+
if test():
|
|
404
|
+
passed += 1
|
|
405
|
+
else:
|
|
406
|
+
failed += 1
|
|
407
|
+
except Exception as e:
|
|
408
|
+
print(f"❌ Test failed with exception: {e}")
|
|
409
|
+
import traceback
|
|
410
|
+
|
|
411
|
+
traceback.print_exc()
|
|
412
|
+
failed += 1
|
|
413
|
+
|
|
414
|
+
print("=" * 60)
|
|
415
|
+
print(f"Results: {passed} passed, {failed} failed")
|
|
416
|
+
print("=" * 60)
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/.claude-plugin/marketplace.json
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/.github/workflows/publish-pypi.yml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/api_clients/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/api_clients/slack_client.py
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/auth/credentials.py
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/auth/device_flow.py
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/resources/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/auth_tools.py
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/slack_tools.py
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/mcp_server/tools/utility_tools.py
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/commands/connect.md
RENAMED
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/commands/status.md
RENAMED
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/plugins/quickcall/commands/updates.md
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{quickcall_integrations-0.3.1 → quickcall_integrations-0.3.3}/tests/appraisal/setup_test_data.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|