linear-mcp-fast 0.3.1__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- linear_mcp_fast/reader.py +217 -27
- linear_mcp_fast/server.py +294 -60
- linear_mcp_fast/store_detector.py +87 -1
- {linear_mcp_fast-0.3.1.dist-info → linear_mcp_fast-0.4.0.dist-info}/METADATA +11 -1
- {linear_mcp_fast-0.3.1.dist-info → linear_mcp_fast-0.4.0.dist-info}/RECORD +8 -8
- {linear_mcp_fast-0.3.1.dist-info → linear_mcp_fast-0.4.0.dist-info}/WHEEL +0 -0
- {linear_mcp_fast-0.3.1.dist-info → linear_mcp_fast-0.4.0.dist-info}/entry_points.txt +0 -0
- {linear_mcp_fast-0.3.1.dist-info → linear_mcp_fast-0.4.0.dist-info}/top_level.txt +0 -0
linear_mcp_fast/reader.py
CHANGED
|
@@ -39,6 +39,13 @@ class CachedData:
|
|
|
39
39
|
comments_by_issue: dict[str, list[str]] = field(default_factory=dict)
|
|
40
40
|
projects: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
41
41
|
issue_content: dict[str, str] = field(default_factory=dict) # issueId -> description
|
|
42
|
+
labels: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
43
|
+
initiatives: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
44
|
+
cycles: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
45
|
+
documents: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
46
|
+
document_content: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
47
|
+
milestones: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
48
|
+
project_updates: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
42
49
|
loaded_at: float = 0.0
|
|
43
50
|
|
|
44
51
|
def is_expired(self) -> bool:
|
|
@@ -60,7 +67,6 @@ class LinearLocalReader:
|
|
|
60
67
|
self._db_path = db_path
|
|
61
68
|
self._blob_path = blob_path
|
|
62
69
|
self._cache = CachedData()
|
|
63
|
-
self._stores: DetectedStores | None = None
|
|
64
70
|
|
|
65
71
|
def _check_db_exists(self) -> None:
|
|
66
72
|
"""Verify the Linear database exists."""
|
|
@@ -75,17 +81,20 @@ class LinearLocalReader:
|
|
|
75
81
|
self._check_db_exists()
|
|
76
82
|
return ccl_chromium_indexeddb.WrappedIndexDB(self._db_path, self._blob_path)
|
|
77
83
|
|
|
78
|
-
def
|
|
84
|
+
def _find_all_linear_dbs(
|
|
79
85
|
self, wrapper: ccl_chromium_indexeddb.WrappedIndexDB
|
|
80
|
-
) -> ccl_chromium_indexeddb.WrappedDatabase:
|
|
81
|
-
"""Find
|
|
86
|
+
) -> list[ccl_chromium_indexeddb.WrappedDatabase]:
|
|
87
|
+
"""Find all Linear databases with data."""
|
|
88
|
+
databases = []
|
|
82
89
|
for db_id in wrapper.database_ids:
|
|
83
90
|
if "linear_" in db_id.name and db_id.name != "linear_databases":
|
|
84
91
|
db = wrapper[db_id.name, db_id.origin]
|
|
85
92
|
# Skip empty databases
|
|
86
93
|
if list(db.object_store_names):
|
|
87
|
-
|
|
88
|
-
|
|
94
|
+
databases.append(db)
|
|
95
|
+
if not databases:
|
|
96
|
+
raise ValueError("Could not find Linear database in IndexedDB")
|
|
97
|
+
return databases
|
|
89
98
|
|
|
90
99
|
def _to_str(self, val: Any) -> str:
|
|
91
100
|
"""Convert value to string, handling bytes."""
|
|
@@ -207,19 +216,30 @@ class LinearLocalReader:
|
|
|
207
216
|
pass
|
|
208
217
|
|
|
209
218
|
def _reload_cache(self) -> None:
|
|
210
|
-
"""Reload all data from
|
|
219
|
+
"""Reload all data from all Linear IndexedDB databases."""
|
|
211
220
|
wrapper = self._get_wrapper()
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
# Detect stores if not already done
|
|
215
|
-
if self._stores is None:
|
|
216
|
-
self._stores = detect_stores(db)
|
|
221
|
+
databases = self._find_all_linear_dbs(wrapper)
|
|
217
222
|
|
|
218
223
|
cache = CachedData(loaded_at=time.time())
|
|
219
224
|
|
|
225
|
+
# Load from all databases
|
|
226
|
+
for db in databases:
|
|
227
|
+
stores = detect_stores(db)
|
|
228
|
+
self._load_from_db(db, stores, cache)
|
|
229
|
+
|
|
230
|
+
self._cache = cache
|
|
231
|
+
|
|
232
|
+
def _load_from_db(
|
|
233
|
+
self,
|
|
234
|
+
db: ccl_chromium_indexeddb.WrappedDatabase,
|
|
235
|
+
stores: DetectedStores,
|
|
236
|
+
cache: CachedData,
|
|
237
|
+
) -> None:
|
|
238
|
+
"""Load data from a single database into the cache."""
|
|
239
|
+
|
|
220
240
|
# Load teams
|
|
221
|
-
if
|
|
222
|
-
for val in self._load_from_store(db,
|
|
241
|
+
if stores.teams:
|
|
242
|
+
for val in self._load_from_store(db, stores.teams):
|
|
223
243
|
cache.teams[val["id"]] = {
|
|
224
244
|
"id": val["id"],
|
|
225
245
|
"key": val.get("key"),
|
|
@@ -227,8 +247,8 @@ class LinearLocalReader:
|
|
|
227
247
|
}
|
|
228
248
|
|
|
229
249
|
# Load users from all detected user stores
|
|
230
|
-
if
|
|
231
|
-
for store_name in
|
|
250
|
+
if stores.users:
|
|
251
|
+
for store_name in stores.users:
|
|
232
252
|
for val in self._load_from_store(db, store_name):
|
|
233
253
|
if val.get("id") not in cache.users:
|
|
234
254
|
cache.users[val["id"]] = {
|
|
@@ -239,8 +259,8 @@ class LinearLocalReader:
|
|
|
239
259
|
}
|
|
240
260
|
|
|
241
261
|
# Load workflow states from all detected state stores
|
|
242
|
-
if
|
|
243
|
-
for store_name in
|
|
262
|
+
if stores.workflow_states:
|
|
263
|
+
for store_name in stores.workflow_states:
|
|
244
264
|
for val in self._load_from_store(db, store_name):
|
|
245
265
|
if val.get("id") not in cache.states:
|
|
246
266
|
cache.states[val["id"]] = {
|
|
@@ -253,8 +273,8 @@ class LinearLocalReader:
|
|
|
253
273
|
}
|
|
254
274
|
|
|
255
275
|
# Load issues
|
|
256
|
-
if
|
|
257
|
-
for val in self._load_from_store(db,
|
|
276
|
+
if stores.issues:
|
|
277
|
+
for val in self._load_from_store(db, stores.issues):
|
|
258
278
|
team = cache.teams.get(val.get("teamId"), {})
|
|
259
279
|
team_key = team.get("key", "???")
|
|
260
280
|
identifier = f"{team_key}-{val.get('number')}"
|
|
@@ -283,8 +303,8 @@ class LinearLocalReader:
|
|
|
283
303
|
}
|
|
284
304
|
|
|
285
305
|
# Load comments
|
|
286
|
-
if
|
|
287
|
-
for val in self._load_from_store(db,
|
|
306
|
+
if stores.comments:
|
|
307
|
+
for val in self._load_from_store(db, stores.comments):
|
|
288
308
|
comment_id = val.get("id")
|
|
289
309
|
issue_id = val.get("issueId")
|
|
290
310
|
if not comment_id or not issue_id:
|
|
@@ -304,8 +324,8 @@ class LinearLocalReader:
|
|
|
304
324
|
cache.comments_by_issue[issue_id].append(comment_id)
|
|
305
325
|
|
|
306
326
|
# Load projects
|
|
307
|
-
if
|
|
308
|
-
for val in self._load_from_store(db,
|
|
327
|
+
if stores.projects:
|
|
328
|
+
for val in self._load_from_store(db, stores.projects):
|
|
309
329
|
cache.projects[val["id"]] = {
|
|
310
330
|
"id": val["id"],
|
|
311
331
|
"name": val.get("name"),
|
|
@@ -326,8 +346,8 @@ class LinearLocalReader:
|
|
|
326
346
|
}
|
|
327
347
|
|
|
328
348
|
# Load issue content (Y.js encoded descriptions)
|
|
329
|
-
if
|
|
330
|
-
for val in self._load_from_store(db,
|
|
349
|
+
if stores.issue_content:
|
|
350
|
+
for val in self._load_from_store(db, stores.issue_content):
|
|
331
351
|
issue_id = val.get("issueId")
|
|
332
352
|
content_state = val.get("contentState")
|
|
333
353
|
if issue_id and content_state:
|
|
@@ -340,7 +360,101 @@ class LinearLocalReader:
|
|
|
340
360
|
if issue_id in cache.issues and not cache.issues[issue_id].get("description"):
|
|
341
361
|
cache.issues[issue_id]["description"] = desc
|
|
342
362
|
|
|
343
|
-
|
|
363
|
+
# Load labels from all detected label stores
|
|
364
|
+
if stores.labels:
|
|
365
|
+
for store_name in stores.labels:
|
|
366
|
+
for val in self._load_from_store(db, store_name):
|
|
367
|
+
if val.get("id") not in cache.labels:
|
|
368
|
+
cache.labels[val["id"]] = {
|
|
369
|
+
"id": val["id"],
|
|
370
|
+
"name": val.get("name"),
|
|
371
|
+
"color": val.get("color"),
|
|
372
|
+
"isGroup": val.get("isGroup"),
|
|
373
|
+
"parentId": val.get("parentId"),
|
|
374
|
+
"teamId": val.get("teamId"),
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
# Load initiatives
|
|
378
|
+
if stores.initiatives:
|
|
379
|
+
for val in self._load_from_store(db, stores.initiatives):
|
|
380
|
+
cache.initiatives[val["id"]] = {
|
|
381
|
+
"id": val["id"],
|
|
382
|
+
"name": val.get("name"),
|
|
383
|
+
"slugId": val.get("slugId"),
|
|
384
|
+
"color": val.get("color"),
|
|
385
|
+
"status": val.get("status"),
|
|
386
|
+
"ownerId": val.get("ownerId"),
|
|
387
|
+
"teamIds": val.get("teamIds", []),
|
|
388
|
+
"createdAt": val.get("createdAt"),
|
|
389
|
+
"updatedAt": val.get("updatedAt"),
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
# Load cycles
|
|
393
|
+
if stores.cycles:
|
|
394
|
+
for val in self._load_from_store(db, stores.cycles):
|
|
395
|
+
cache.cycles[val["id"]] = {
|
|
396
|
+
"id": val["id"],
|
|
397
|
+
"number": val.get("number"),
|
|
398
|
+
"teamId": val.get("teamId"),
|
|
399
|
+
"startsAt": val.get("startsAt"),
|
|
400
|
+
"endsAt": val.get("endsAt"),
|
|
401
|
+
"completedAt": val.get("completedAt"),
|
|
402
|
+
"currentProgress": val.get("currentProgress"),
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
# Load documents
|
|
406
|
+
if stores.documents:
|
|
407
|
+
for val in self._load_from_store(db, stores.documents):
|
|
408
|
+
doc_id = val.get("id")
|
|
409
|
+
# Documents may have multiple versions, keep the latest by updatedAt
|
|
410
|
+
existing = cache.documents.get(doc_id)
|
|
411
|
+
if existing and existing.get("updatedAt", "") >= val.get("updatedAt", ""):
|
|
412
|
+
continue
|
|
413
|
+
cache.documents[doc_id] = {
|
|
414
|
+
"id": doc_id,
|
|
415
|
+
"title": val.get("title"),
|
|
416
|
+
"slugId": val.get("slugId"),
|
|
417
|
+
"projectId": val.get("projectId"),
|
|
418
|
+
"creatorId": val.get("creatorId"),
|
|
419
|
+
"createdAt": val.get("createdAt"),
|
|
420
|
+
"updatedAt": val.get("updatedAt"),
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
# Load document content
|
|
424
|
+
if stores.document_content:
|
|
425
|
+
for val in self._load_from_store(db, stores.document_content):
|
|
426
|
+
content_id = val.get("documentContentId")
|
|
427
|
+
if content_id:
|
|
428
|
+
cache.document_content[content_id] = {
|
|
429
|
+
"id": val.get("id"),
|
|
430
|
+
"documentContentId": content_id,
|
|
431
|
+
"contentData": val.get("contentData"),
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
# Load milestones
|
|
435
|
+
if stores.milestones:
|
|
436
|
+
for val in self._load_from_store(db, stores.milestones):
|
|
437
|
+
cache.milestones[val["id"]] = {
|
|
438
|
+
"id": val["id"],
|
|
439
|
+
"name": val.get("name"),
|
|
440
|
+
"projectId": val.get("projectId"),
|
|
441
|
+
"targetDate": val.get("targetDate"),
|
|
442
|
+
"sortOrder": val.get("sortOrder"),
|
|
443
|
+
"currentProgress": val.get("currentProgress"),
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
# Load project updates
|
|
447
|
+
if stores.project_updates:
|
|
448
|
+
for val in self._load_from_store(db, stores.project_updates):
|
|
449
|
+
cache.project_updates[val["id"]] = {
|
|
450
|
+
"id": val["id"],
|
|
451
|
+
"body": val.get("body"),
|
|
452
|
+
"health": val.get("health"),
|
|
453
|
+
"projectId": val.get("projectId"),
|
|
454
|
+
"userId": val.get("userId"),
|
|
455
|
+
"createdAt": val.get("createdAt"),
|
|
456
|
+
"updatedAt": val.get("updatedAt"),
|
|
457
|
+
}
|
|
344
458
|
|
|
345
459
|
def _ensure_cache(self) -> CachedData:
|
|
346
460
|
"""Ensure the cache is loaded and not expired."""
|
|
@@ -378,6 +492,36 @@ class LinearLocalReader:
|
|
|
378
492
|
"""Get all projects."""
|
|
379
493
|
return self._ensure_cache().projects
|
|
380
494
|
|
|
495
|
+
@property
|
|
496
|
+
def labels(self) -> dict[str, dict[str, Any]]:
|
|
497
|
+
"""Get all labels."""
|
|
498
|
+
return self._ensure_cache().labels
|
|
499
|
+
|
|
500
|
+
@property
|
|
501
|
+
def initiatives(self) -> dict[str, dict[str, Any]]:
|
|
502
|
+
"""Get all initiatives."""
|
|
503
|
+
return self._ensure_cache().initiatives
|
|
504
|
+
|
|
505
|
+
@property
|
|
506
|
+
def cycles(self) -> dict[str, dict[str, Any]]:
|
|
507
|
+
"""Get all cycles."""
|
|
508
|
+
return self._ensure_cache().cycles
|
|
509
|
+
|
|
510
|
+
@property
|
|
511
|
+
def documents(self) -> dict[str, dict[str, Any]]:
|
|
512
|
+
"""Get all documents."""
|
|
513
|
+
return self._ensure_cache().documents
|
|
514
|
+
|
|
515
|
+
@property
|
|
516
|
+
def milestones(self) -> dict[str, dict[str, Any]]:
|
|
517
|
+
"""Get all milestones."""
|
|
518
|
+
return self._ensure_cache().milestones
|
|
519
|
+
|
|
520
|
+
@property
|
|
521
|
+
def project_updates(self) -> dict[str, dict[str, Any]]:
|
|
522
|
+
"""Get all project updates."""
|
|
523
|
+
return self._ensure_cache().project_updates
|
|
524
|
+
|
|
381
525
|
def get_comments_for_issue(self, issue_id: str) -> list[dict[str, Any]]:
|
|
382
526
|
"""Get all comments for an issue, sorted by creation time."""
|
|
383
527
|
cache = self._ensure_cache()
|
|
@@ -530,3 +674,49 @@ class LinearLocalReader:
|
|
|
530
674
|
return ""
|
|
531
675
|
project = self.projects.get(project_id, {})
|
|
532
676
|
return project.get("name", "")
|
|
677
|
+
|
|
678
|
+
def get_label_name(self, label_id: str | None) -> str:
|
|
679
|
+
"""Get label name from label ID."""
|
|
680
|
+
if not label_id:
|
|
681
|
+
return ""
|
|
682
|
+
label = self.labels.get(label_id, {})
|
|
683
|
+
return label.get("name", "")
|
|
684
|
+
|
|
685
|
+
def get_cycles_for_team(self, team_id: str) -> list[dict[str, Any]]:
|
|
686
|
+
"""Get all cycles for a team, sorted by number descending."""
|
|
687
|
+
cycles = [c for c in self.cycles.values() if c.get("teamId") == team_id]
|
|
688
|
+
return sorted(cycles, key=lambda c: c.get("number", 0), reverse=True)
|
|
689
|
+
|
|
690
|
+
def get_documents_for_project(self, project_id: str) -> list[dict[str, Any]]:
|
|
691
|
+
"""Get all documents for a project."""
|
|
692
|
+
return [d for d in self.documents.values() if d.get("projectId") == project_id]
|
|
693
|
+
|
|
694
|
+
def get_milestones_for_project(self, project_id: str) -> list[dict[str, Any]]:
|
|
695
|
+
"""Get all milestones for a project, sorted by sortOrder."""
|
|
696
|
+
milestones = [m for m in self.milestones.values() if m.get("projectId") == project_id]
|
|
697
|
+
return sorted(milestones, key=lambda m: m.get("sortOrder", 0))
|
|
698
|
+
|
|
699
|
+
def get_updates_for_project(self, project_id: str) -> list[dict[str, Any]]:
|
|
700
|
+
"""Get all updates for a project, sorted by creation time descending."""
|
|
701
|
+
updates = [u for u in self.project_updates.values() if u.get("projectId") == project_id]
|
|
702
|
+
return sorted(updates, key=lambda u: u.get("createdAt", ""), reverse=True)
|
|
703
|
+
|
|
704
|
+
def find_initiative(self, search: str) -> dict[str, Any] | None:
|
|
705
|
+
"""Find an initiative by name or slugId (case-insensitive partial match)."""
|
|
706
|
+
search_lower = search.lower()
|
|
707
|
+
for initiative in self.initiatives.values():
|
|
708
|
+
name = self._to_str(initiative.get("name", ""))
|
|
709
|
+
slug_id = self._to_str(initiative.get("slugId", ""))
|
|
710
|
+
if search_lower in name.lower() or search_lower == slug_id.lower():
|
|
711
|
+
return initiative
|
|
712
|
+
return None
|
|
713
|
+
|
|
714
|
+
def find_document(self, search: str) -> dict[str, Any] | None:
|
|
715
|
+
"""Find a document by title or slugId (case-insensitive partial match)."""
|
|
716
|
+
search_lower = search.lower()
|
|
717
|
+
for doc in self.documents.values():
|
|
718
|
+
title = self._to_str(doc.get("title", ""))
|
|
719
|
+
slug_id = self._to_str(doc.get("slugId", ""))
|
|
720
|
+
if search_lower in title.lower() or search_lower == slug_id.lower():
|
|
721
|
+
return doc
|
|
722
|
+
return None
|
linear_mcp_fast/server.py
CHANGED
|
@@ -150,66 +150,6 @@ def get_issue(identifier: str) -> dict[str, Any] | None:
|
|
|
150
150
|
}
|
|
151
151
|
|
|
152
152
|
|
|
153
|
-
@mcp.tool()
|
|
154
|
-
def list_my_issues(
|
|
155
|
-
name: str,
|
|
156
|
-
state_type: str | None = None,
|
|
157
|
-
limit: int | None = None,
|
|
158
|
-
) -> dict[str, Any]:
|
|
159
|
-
"""
|
|
160
|
-
List issues assigned to a user.
|
|
161
|
-
|
|
162
|
-
Args:
|
|
163
|
-
name: User name to search for
|
|
164
|
-
state_type: Optional filter (started, unstarted, completed, canceled, backlog)
|
|
165
|
-
limit: Maximum issues (default: all)
|
|
166
|
-
|
|
167
|
-
Returns:
|
|
168
|
-
User info with their issues
|
|
169
|
-
"""
|
|
170
|
-
reader = get_reader()
|
|
171
|
-
|
|
172
|
-
user = reader.find_user(name)
|
|
173
|
-
if not user:
|
|
174
|
-
return {"error": f"User '{name}' not found"}
|
|
175
|
-
|
|
176
|
-
all_issues = sorted(
|
|
177
|
-
reader.get_issues_for_user(user["id"]),
|
|
178
|
-
key=lambda x: (x.get("priority") or 4, x.get("id", "")),
|
|
179
|
-
)
|
|
180
|
-
|
|
181
|
-
counts_by_state: dict[str, int] = {}
|
|
182
|
-
for issue in all_issues:
|
|
183
|
-
issue_state_type = reader.get_state_type(issue.get("stateId", ""))
|
|
184
|
-
counts_by_state[issue_state_type] = counts_by_state.get(issue_state_type, 0) + 1
|
|
185
|
-
|
|
186
|
-
if state_type:
|
|
187
|
-
all_issues = [
|
|
188
|
-
i for i in all_issues
|
|
189
|
-
if reader.get_state_type(i.get("stateId", "")) == state_type
|
|
190
|
-
]
|
|
191
|
-
|
|
192
|
-
page = all_issues[:limit] if limit else all_issues
|
|
193
|
-
|
|
194
|
-
results = []
|
|
195
|
-
for issue in page:
|
|
196
|
-
results.append({
|
|
197
|
-
"identifier": issue.get("identifier"),
|
|
198
|
-
"title": issue.get("title"),
|
|
199
|
-
"priority": issue.get("priority"),
|
|
200
|
-
"state": reader.get_state_name(issue.get("stateId", "")),
|
|
201
|
-
"stateType": reader.get_state_type(issue.get("stateId", "")),
|
|
202
|
-
"dueDate": issue.get("dueDate"),
|
|
203
|
-
})
|
|
204
|
-
|
|
205
|
-
return {
|
|
206
|
-
"user": {"name": user.get("name"), "email": user.get("email")},
|
|
207
|
-
"totalIssues": sum(counts_by_state.values()),
|
|
208
|
-
"countsByState": counts_by_state,
|
|
209
|
-
"issues": results,
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
|
|
213
153
|
@mcp.tool()
|
|
214
154
|
def list_teams() -> list[dict[str, Any]]:
|
|
215
155
|
"""
|
|
@@ -456,6 +396,300 @@ def list_issue_statuses(team: str) -> list[dict[str, Any]]:
|
|
|
456
396
|
return results
|
|
457
397
|
|
|
458
398
|
|
|
399
|
+
@mcp.tool()
|
|
400
|
+
def list_comments(issue_id: str) -> list[dict[str, Any]]:
|
|
401
|
+
"""
|
|
402
|
+
List comments for a specific issue.
|
|
403
|
+
|
|
404
|
+
Args:
|
|
405
|
+
issue_id: Issue identifier (e.g., 'UK-55')
|
|
406
|
+
|
|
407
|
+
Returns:
|
|
408
|
+
List of comments with author info
|
|
409
|
+
"""
|
|
410
|
+
reader = get_reader()
|
|
411
|
+
issue = reader.get_issue_by_identifier(issue_id)
|
|
412
|
+
|
|
413
|
+
if not issue:
|
|
414
|
+
return []
|
|
415
|
+
|
|
416
|
+
comments = reader.get_comments_for_issue(issue["id"])
|
|
417
|
+
results = []
|
|
418
|
+
for comment in comments:
|
|
419
|
+
user = reader.users.get(comment.get("userId", ""), {})
|
|
420
|
+
results.append({
|
|
421
|
+
"id": comment.get("id"),
|
|
422
|
+
"author": user.get("name", "Unknown"),
|
|
423
|
+
"body": comment.get("body", ""),
|
|
424
|
+
"createdAt": comment.get("createdAt"),
|
|
425
|
+
"updatedAt": comment.get("updatedAt"),
|
|
426
|
+
})
|
|
427
|
+
|
|
428
|
+
return results
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
@mcp.tool()
|
|
432
|
+
def list_issue_labels(team: str | None = None) -> list[dict[str, Any]]:
|
|
433
|
+
"""
|
|
434
|
+
List available issue labels.
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
team: Optional team key to filter team-specific labels
|
|
438
|
+
|
|
439
|
+
Returns:
|
|
440
|
+
List of labels
|
|
441
|
+
"""
|
|
442
|
+
reader = get_reader()
|
|
443
|
+
|
|
444
|
+
team_id = None
|
|
445
|
+
if team:
|
|
446
|
+
team_obj = reader.find_team(team)
|
|
447
|
+
if team_obj:
|
|
448
|
+
team_id = team_obj["id"]
|
|
449
|
+
|
|
450
|
+
results = []
|
|
451
|
+
for label in reader.labels.values():
|
|
452
|
+
# Include workspace labels (no teamId) and team-specific labels
|
|
453
|
+
if team_id and label.get("teamId") and label.get("teamId") != team_id:
|
|
454
|
+
continue
|
|
455
|
+
results.append({
|
|
456
|
+
"id": label.get("id"),
|
|
457
|
+
"name": label.get("name"),
|
|
458
|
+
"color": label.get("color"),
|
|
459
|
+
"isGroup": label.get("isGroup"),
|
|
460
|
+
})
|
|
461
|
+
|
|
462
|
+
results.sort(key=lambda x: x.get("name", "") or "")
|
|
463
|
+
return results
|
|
464
|
+
|
|
465
|
+
|
|
466
|
+
@mcp.tool()
|
|
467
|
+
def list_initiatives() -> list[dict[str, Any]]:
|
|
468
|
+
"""
|
|
469
|
+
List all initiatives.
|
|
470
|
+
|
|
471
|
+
Returns:
|
|
472
|
+
List of initiatives
|
|
473
|
+
"""
|
|
474
|
+
reader = get_reader()
|
|
475
|
+
results = []
|
|
476
|
+
|
|
477
|
+
for initiative in reader.initiatives.values():
|
|
478
|
+
results.append({
|
|
479
|
+
"id": initiative.get("id"),
|
|
480
|
+
"name": initiative.get("name"),
|
|
481
|
+
"slugId": initiative.get("slugId"),
|
|
482
|
+
"color": initiative.get("color"),
|
|
483
|
+
"status": initiative.get("status"),
|
|
484
|
+
"owner": reader.get_user_name(initiative.get("ownerId")),
|
|
485
|
+
})
|
|
486
|
+
|
|
487
|
+
results.sort(key=lambda x: x.get("name", "") or "")
|
|
488
|
+
return results
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
@mcp.tool()
|
|
492
|
+
def get_initiative(name: str) -> dict[str, Any] | None:
|
|
493
|
+
"""
|
|
494
|
+
Get initiative details by name.
|
|
495
|
+
|
|
496
|
+
Args:
|
|
497
|
+
name: Initiative name (partial match)
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
Initiative details or None if not found
|
|
501
|
+
"""
|
|
502
|
+
reader = get_reader()
|
|
503
|
+
initiative = reader.find_initiative(name)
|
|
504
|
+
|
|
505
|
+
if not initiative:
|
|
506
|
+
return None
|
|
507
|
+
|
|
508
|
+
return {
|
|
509
|
+
"id": initiative.get("id"),
|
|
510
|
+
"name": initiative.get("name"),
|
|
511
|
+
"slugId": initiative.get("slugId"),
|
|
512
|
+
"color": initiative.get("color"),
|
|
513
|
+
"status": initiative.get("status"),
|
|
514
|
+
"owner": reader.get_user_name(initiative.get("ownerId")),
|
|
515
|
+
"teamIds": initiative.get("teamIds", []),
|
|
516
|
+
"createdAt": initiative.get("createdAt"),
|
|
517
|
+
"updatedAt": initiative.get("updatedAt"),
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
@mcp.tool()
|
|
522
|
+
def list_cycles(team: str) -> list[dict[str, Any]]:
|
|
523
|
+
"""
|
|
524
|
+
List cycles for a team.
|
|
525
|
+
|
|
526
|
+
Args:
|
|
527
|
+
team: Team key (e.g., 'UK')
|
|
528
|
+
|
|
529
|
+
Returns:
|
|
530
|
+
List of cycles sorted by number (newest first)
|
|
531
|
+
"""
|
|
532
|
+
reader = get_reader()
|
|
533
|
+
|
|
534
|
+
team_obj = reader.find_team(team)
|
|
535
|
+
if not team_obj:
|
|
536
|
+
return []
|
|
537
|
+
|
|
538
|
+
cycles = reader.get_cycles_for_team(team_obj["id"])
|
|
539
|
+
results = []
|
|
540
|
+
for cycle in cycles:
|
|
541
|
+
progress = cycle.get("currentProgress", {})
|
|
542
|
+
results.append({
|
|
543
|
+
"id": cycle.get("id"),
|
|
544
|
+
"number": cycle.get("number"),
|
|
545
|
+
"startsAt": cycle.get("startsAt"),
|
|
546
|
+
"endsAt": cycle.get("endsAt"),
|
|
547
|
+
"completedAt": cycle.get("completedAt"),
|
|
548
|
+
"progress": {
|
|
549
|
+
"completed": progress.get("completedIssueCount", 0),
|
|
550
|
+
"started": progress.get("startedIssueCount", 0),
|
|
551
|
+
"unstarted": progress.get("unstartedIssueCount", 0),
|
|
552
|
+
"total": progress.get("scopeCount", 0),
|
|
553
|
+
} if progress else None,
|
|
554
|
+
})
|
|
555
|
+
|
|
556
|
+
return results
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
@mcp.tool()
|
|
560
|
+
def list_documents(project: str | None = None) -> list[dict[str, Any]]:
|
|
561
|
+
"""
|
|
562
|
+
List documents, optionally filtered by project.
|
|
563
|
+
|
|
564
|
+
Args:
|
|
565
|
+
project: Optional project name to filter
|
|
566
|
+
|
|
567
|
+
Returns:
|
|
568
|
+
List of documents
|
|
569
|
+
"""
|
|
570
|
+
reader = get_reader()
|
|
571
|
+
|
|
572
|
+
project_id = None
|
|
573
|
+
if project:
|
|
574
|
+
project_obj = reader.find_project(project)
|
|
575
|
+
if project_obj:
|
|
576
|
+
project_id = project_obj["id"]
|
|
577
|
+
else:
|
|
578
|
+
return []
|
|
579
|
+
|
|
580
|
+
results = []
|
|
581
|
+
for doc in reader.documents.values():
|
|
582
|
+
if project_id and doc.get("projectId") != project_id:
|
|
583
|
+
continue
|
|
584
|
+
results.append({
|
|
585
|
+
"id": doc.get("id"),
|
|
586
|
+
"title": doc.get("title"),
|
|
587
|
+
"slugId": doc.get("slugId"),
|
|
588
|
+
"project": reader.get_project_name(doc.get("projectId")),
|
|
589
|
+
"createdAt": doc.get("createdAt"),
|
|
590
|
+
"updatedAt": doc.get("updatedAt"),
|
|
591
|
+
})
|
|
592
|
+
|
|
593
|
+
results.sort(key=lambda x: x.get("updatedAt", "") or "", reverse=True)
|
|
594
|
+
return results
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
@mcp.tool()
|
|
598
|
+
def get_document(name: str) -> dict[str, Any] | None:
|
|
599
|
+
"""
|
|
600
|
+
Get document details by title.
|
|
601
|
+
|
|
602
|
+
Args:
|
|
603
|
+
name: Document title (partial match)
|
|
604
|
+
|
|
605
|
+
Returns:
|
|
606
|
+
Document details or None if not found
|
|
607
|
+
"""
|
|
608
|
+
reader = get_reader()
|
|
609
|
+
doc = reader.find_document(name)
|
|
610
|
+
|
|
611
|
+
if not doc:
|
|
612
|
+
return None
|
|
613
|
+
|
|
614
|
+
return {
|
|
615
|
+
"id": doc.get("id"),
|
|
616
|
+
"title": doc.get("title"),
|
|
617
|
+
"slugId": doc.get("slugId"),
|
|
618
|
+
"project": reader.get_project_name(doc.get("projectId")),
|
|
619
|
+
"creator": reader.get_user_name(doc.get("creatorId")),
|
|
620
|
+
"createdAt": doc.get("createdAt"),
|
|
621
|
+
"updatedAt": doc.get("updatedAt"),
|
|
622
|
+
"url": f"https://linear.app/document/{doc.get('slugId')}",
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
|
|
626
|
+
@mcp.tool()
|
|
627
|
+
def list_milestones(project: str) -> list[dict[str, Any]]:
|
|
628
|
+
"""
|
|
629
|
+
List milestones for a project.
|
|
630
|
+
|
|
631
|
+
Args:
|
|
632
|
+
project: Project name
|
|
633
|
+
|
|
634
|
+
Returns:
|
|
635
|
+
List of milestones sorted by order
|
|
636
|
+
"""
|
|
637
|
+
reader = get_reader()
|
|
638
|
+
|
|
639
|
+
project_obj = reader.find_project(project)
|
|
640
|
+
if not project_obj:
|
|
641
|
+
return []
|
|
642
|
+
|
|
643
|
+
milestones = reader.get_milestones_for_project(project_obj["id"])
|
|
644
|
+
results = []
|
|
645
|
+
for milestone in milestones:
|
|
646
|
+
progress = milestone.get("currentProgress", {})
|
|
647
|
+
results.append({
|
|
648
|
+
"id": milestone.get("id"),
|
|
649
|
+
"name": milestone.get("name"),
|
|
650
|
+
"targetDate": milestone.get("targetDate"),
|
|
651
|
+
"progress": {
|
|
652
|
+
"completed": progress.get("completedIssueCount", 0),
|
|
653
|
+
"started": progress.get("startedIssueCount", 0),
|
|
654
|
+
"unstarted": progress.get("unstartedIssueCount", 0),
|
|
655
|
+
"total": progress.get("scopeCount", 0),
|
|
656
|
+
} if progress else None,
|
|
657
|
+
})
|
|
658
|
+
|
|
659
|
+
return results
|
|
660
|
+
|
|
661
|
+
|
|
662
|
+
@mcp.tool()
|
|
663
|
+
def list_project_updates(project: str) -> list[dict[str, Any]]:
|
|
664
|
+
"""
|
|
665
|
+
List updates for a project.
|
|
666
|
+
|
|
667
|
+
Args:
|
|
668
|
+
project: Project name
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
List of project updates sorted by date (newest first)
|
|
672
|
+
"""
|
|
673
|
+
reader = get_reader()
|
|
674
|
+
|
|
675
|
+
project_obj = reader.find_project(project)
|
|
676
|
+
if not project_obj:
|
|
677
|
+
return []
|
|
678
|
+
|
|
679
|
+
updates = reader.get_updates_for_project(project_obj["id"])
|
|
680
|
+
results = []
|
|
681
|
+
for update in updates:
|
|
682
|
+
results.append({
|
|
683
|
+
"id": update.get("id"),
|
|
684
|
+
"body": update.get("body"),
|
|
685
|
+
"health": update.get("health"),
|
|
686
|
+
"author": reader.get_user_name(update.get("userId")),
|
|
687
|
+
"createdAt": update.get("createdAt"),
|
|
688
|
+
})
|
|
689
|
+
|
|
690
|
+
return results
|
|
691
|
+
|
|
692
|
+
|
|
459
693
|
def main():
|
|
460
694
|
"""Run the MCP server."""
|
|
461
695
|
mcp.run()
|
|
@@ -22,6 +22,14 @@ class DetectedStores:
|
|
|
22
22
|
comments: str | None = None
|
|
23
23
|
projects: str | None = None
|
|
24
24
|
issue_content: str | None = None # Y.js encoded issue descriptions
|
|
25
|
+
labels: list[str] | None = None # Issue labels (team + workspace)
|
|
26
|
+
initiatives: str | None = None
|
|
27
|
+
project_statuses: str | None = None
|
|
28
|
+
cycles: str | None = None
|
|
29
|
+
documents: str | None = None
|
|
30
|
+
document_content: str | None = None
|
|
31
|
+
milestones: str | None = None
|
|
32
|
+
project_updates: str | None = None
|
|
25
33
|
|
|
26
34
|
|
|
27
35
|
def _is_issue_record(record: dict[str, Any]) -> bool:
|
|
@@ -75,6 +83,66 @@ def _is_issue_content_record(record: dict[str, Any]) -> bool:
|
|
|
75
83
|
return required.issubset(record.keys())
|
|
76
84
|
|
|
77
85
|
|
|
86
|
+
def _is_label_record(record: dict[str, Any]) -> bool:
|
|
87
|
+
"""Check if a record looks like a label."""
|
|
88
|
+
required = {"name", "color", "isGroup"}
|
|
89
|
+
return required.issubset(record.keys())
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _is_initiative_record(record: dict[str, Any]) -> bool:
|
|
93
|
+
"""Check if a record looks like an initiative."""
|
|
94
|
+
required = {"name", "ownerId", "slugId", "frequencyResolution"}
|
|
95
|
+
return required.issubset(record.keys())
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _is_project_status_record(record: dict[str, Any]) -> bool:
|
|
99
|
+
"""Check if a record looks like a project status."""
|
|
100
|
+
if not {"name", "color", "position", "type", "indefinite"}.issubset(record.keys()):
|
|
101
|
+
return False
|
|
102
|
+
# Must not have teamId (that's workflow state)
|
|
103
|
+
return "teamId" not in record
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _is_cycle_record(record: dict[str, Any]) -> bool:
|
|
107
|
+
"""Check if a record looks like a cycle."""
|
|
108
|
+
required = {"number", "teamId", "startsAt", "endsAt"}
|
|
109
|
+
return required.issubset(record.keys())
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _is_document_record(record: dict[str, Any]) -> bool:
|
|
113
|
+
"""Check if a record looks like a document."""
|
|
114
|
+
required = {"title", "slugId", "projectId", "sortOrder"}
|
|
115
|
+
has_required = required.issubset(record.keys())
|
|
116
|
+
# Must not be an issue
|
|
117
|
+
not_issue = "number" not in record and "stateId" not in record
|
|
118
|
+
return has_required and not_issue
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _is_document_content_record(record: dict[str, Any]) -> bool:
|
|
122
|
+
"""Check if a record looks like document content."""
|
|
123
|
+
required = {"documentContentId", "contentData"}
|
|
124
|
+
return required.issubset(record.keys())
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def _is_milestone_record(record: dict[str, Any]) -> bool:
|
|
128
|
+
"""Check if a record looks like a project milestone."""
|
|
129
|
+
required = {"name", "projectId", "sortOrder"}
|
|
130
|
+
has_required = required.issubset(record.keys())
|
|
131
|
+
# May have targetDate, currentProgress
|
|
132
|
+
has_progress = "currentProgress" in record or "targetDate" in record
|
|
133
|
+
return has_required and has_progress
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _is_project_update_record(record: dict[str, Any]) -> bool:
|
|
137
|
+
"""Check if a record looks like a project update."""
|
|
138
|
+
# Has body and either projectId or health field
|
|
139
|
+
has_body = "body" in record
|
|
140
|
+
has_project = "projectId" in record or "health" in record
|
|
141
|
+
# Must not be a comment
|
|
142
|
+
not_comment = "issueId" not in record
|
|
143
|
+
return has_body and has_project and not_comment
|
|
144
|
+
|
|
145
|
+
|
|
78
146
|
def detect_stores(db: ccl_chromium_indexeddb.WrappedDatabase) -> DetectedStores:
|
|
79
147
|
"""
|
|
80
148
|
Detect object stores by sampling their first record.
|
|
@@ -85,7 +153,7 @@ def detect_stores(db: ccl_chromium_indexeddb.WrappedDatabase) -> DetectedStores:
|
|
|
85
153
|
Returns:
|
|
86
154
|
DetectedStores with detected store names for each entity type.
|
|
87
155
|
"""
|
|
88
|
-
result = DetectedStores(users=[], workflow_states=[])
|
|
156
|
+
result = DetectedStores(users=[], workflow_states=[], labels=[])
|
|
89
157
|
|
|
90
158
|
for store_name in db.object_store_names:
|
|
91
159
|
if store_name is None or store_name.startswith("_") or "_partial" in store_name:
|
|
@@ -118,6 +186,24 @@ def detect_stores(db: ccl_chromium_indexeddb.WrappedDatabase) -> DetectedStores:
|
|
|
118
186
|
result.projects = store_name
|
|
119
187
|
elif _is_issue_content_record(val) and result.issue_content is None:
|
|
120
188
|
result.issue_content = store_name
|
|
189
|
+
elif _is_label_record(val) and store_name not in (result.labels or []):
|
|
190
|
+
if result.labels is None:
|
|
191
|
+
result.labels = []
|
|
192
|
+
result.labels.append(store_name)
|
|
193
|
+
elif _is_initiative_record(val) and result.initiatives is None:
|
|
194
|
+
result.initiatives = store_name
|
|
195
|
+
elif _is_project_status_record(val) and result.project_statuses is None:
|
|
196
|
+
result.project_statuses = store_name
|
|
197
|
+
elif _is_cycle_record(val) and result.cycles is None:
|
|
198
|
+
result.cycles = store_name
|
|
199
|
+
elif _is_document_record(val) and result.documents is None:
|
|
200
|
+
result.documents = store_name
|
|
201
|
+
elif _is_document_content_record(val) and result.document_content is None:
|
|
202
|
+
result.document_content = store_name
|
|
203
|
+
elif _is_milestone_record(val) and result.milestones is None:
|
|
204
|
+
result.milestones = store_name
|
|
205
|
+
elif _is_project_update_record(val) and result.project_updates is None:
|
|
206
|
+
result.project_updates = store_name
|
|
121
207
|
|
|
122
208
|
break # Only check first record
|
|
123
209
|
except Exception:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: linear-mcp-fast
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4.0
|
|
4
4
|
Summary: Fast MCP server for Linear - reads from Linear.app's local cache on macOS
|
|
5
5
|
Author: everything-chalna
|
|
6
6
|
License-Expression: MIT
|
|
@@ -48,6 +48,7 @@ Linear.app (Electron) syncs all your data to a local IndexedDB. This MCP server
|
|
|
48
48
|
- **Offline access** - Works without internet
|
|
49
49
|
- **Faster iteration** - No rate limits, no latency
|
|
50
50
|
- **Issue descriptions** - Extracts text from Y.js encoded content (v0.3.0+)
|
|
51
|
+
- **All workspaces** - Reads from all Linear workspaces on your machine (v0.4.0+)
|
|
51
52
|
|
|
52
53
|
## Requirements
|
|
53
54
|
|
|
@@ -138,6 +139,15 @@ Tools mirror the official Linear MCP for easy switching:
|
|
|
138
139
|
| `list_users` | List all users |
|
|
139
140
|
| `get_user` | Get user details |
|
|
140
141
|
| `list_issue_statuses` | List workflow states for a team |
|
|
142
|
+
| `list_comments` | List comments for an issue |
|
|
143
|
+
| `list_issue_labels` | List available issue labels |
|
|
144
|
+
| `list_initiatives` | List all initiatives |
|
|
145
|
+
| `get_initiative` | Get initiative details |
|
|
146
|
+
| `list_cycles` | List cycles for a team |
|
|
147
|
+
| `list_documents` | List documents (optionally by project) |
|
|
148
|
+
| `get_document` | Get document details |
|
|
149
|
+
| `list_milestones` | List milestones for a project |
|
|
150
|
+
| `list_project_updates` | List updates for a project |
|
|
141
151
|
|
|
142
152
|
For writes (create issue, add comment, update status), use the official Linear MCP.
|
|
143
153
|
|
|
@@ -23,17 +23,17 @@ ccl_simplesnappy/__init__.py,sha256=OqArK0MfdVl2oMw1MwpWSYWWVLFT-VLIWnEXXCsacJo,
|
|
|
23
23
|
ccl_simplesnappy/ccl_simplesnappy.py,sha256=dLv1wejr2vCa2b_ZinozXVfcSsZIzJrt5ZkyxA3cQXA,10461
|
|
24
24
|
linear_mcp_fast/__init__.py,sha256=T-ioPzoZXC3a_zLilZuDBCXUn8nDjvITS7svk7BwWmY,138
|
|
25
25
|
linear_mcp_fast/__main__.py,sha256=2wkhXADcE2oGdtEpGrIvvEe9YGKjpwnJ3DBWghkVQKk,124
|
|
26
|
-
linear_mcp_fast/reader.py,sha256=
|
|
27
|
-
linear_mcp_fast/server.py,sha256=
|
|
28
|
-
linear_mcp_fast/store_detector.py,sha256=
|
|
26
|
+
linear_mcp_fast/reader.py,sha256=gLNy8VBL5NNjvRJvvlcoUkHPaIf_X_rxASm_sR62WyU,28641
|
|
27
|
+
linear_mcp_fast/server.py,sha256=QUXpIJGGk8oatN5jKMImcN_zfvYi_okr4m5mw_8iJ5U,18801
|
|
28
|
+
linear_mcp_fast/store_detector.py,sha256=nUaX6HGSS_L4lqXHYo6aw6979O_7qR1sAHP_KybWLZg,8375
|
|
29
29
|
tools_and_utilities/Chromium_dump_local_storage.py,sha256=gG-pKFFk6lo332LQy2JvInlQh9Zldm5zAsuibb-dBkQ,4337
|
|
30
30
|
tools_and_utilities/Chromium_dump_session_storage.py,sha256=17BKFWioo6fPwYkH58QycJCA-z85RtWMBXsJ_29hHQs,3484
|
|
31
31
|
tools_and_utilities/benchmark.py,sha256=fyD5U6yI7Y0TkyhYtvvaHyk9Y2jJe2yxYWoFPQWypzA,1089
|
|
32
32
|
tools_and_utilities/ccl_chrome_audit.py,sha256=irGyYJae0apZDZCn23jMKmY3tYQgWyZEL8vdUBcHLZk,24695
|
|
33
33
|
tools_and_utilities/dump_indexeddb_details.py,sha256=ipNWLKPQoSNhCtPHKWvMWpKu8FhCnvc4Rciyx-90boI,2298
|
|
34
34
|
tools_and_utilities/dump_leveldb.py,sha256=hj7QnOHG64KK2fKsZ9qQOVqUUmHUtxUZqPYl4EZJO9U,1882
|
|
35
|
-
linear_mcp_fast-0.
|
|
36
|
-
linear_mcp_fast-0.
|
|
37
|
-
linear_mcp_fast-0.
|
|
38
|
-
linear_mcp_fast-0.
|
|
39
|
-
linear_mcp_fast-0.
|
|
35
|
+
linear_mcp_fast-0.4.0.dist-info/METADATA,sha256=UE51XV2JDYjhpqtb07leypxr1UUtPGxd4mv7lDQ6WSc,5829
|
|
36
|
+
linear_mcp_fast-0.4.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
37
|
+
linear_mcp_fast-0.4.0.dist-info/entry_points.txt,sha256=Aa98tAkWz_08mS_SRyfyx0k3PuMBQoMygT88HCKMyWk,57
|
|
38
|
+
linear_mcp_fast-0.4.0.dist-info/top_level.txt,sha256=j-O2BoBpFBpGyTl2V1cp0ZjxZAQwpkweeNxG4BcQ7io,73
|
|
39
|
+
linear_mcp_fast-0.4.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|