linear-mcp-fast 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccl_chromium_reader/__init__.py +2 -0
- ccl_chromium_reader/ccl_chromium_cache.py +1335 -0
- ccl_chromium_reader/ccl_chromium_filesystem.py +302 -0
- ccl_chromium_reader/ccl_chromium_history.py +357 -0
- ccl_chromium_reader/ccl_chromium_indexeddb.py +1060 -0
- ccl_chromium_reader/ccl_chromium_localstorage.py +454 -0
- ccl_chromium_reader/ccl_chromium_notifications.py +268 -0
- ccl_chromium_reader/ccl_chromium_profile_folder.py +568 -0
- ccl_chromium_reader/ccl_chromium_sessionstorage.py +368 -0
- ccl_chromium_reader/ccl_chromium_snss2.py +332 -0
- ccl_chromium_reader/ccl_shared_proto_db_downloads.py +189 -0
- ccl_chromium_reader/common.py +19 -0
- ccl_chromium_reader/download_common.py +78 -0
- ccl_chromium_reader/profile_folder_protocols.py +276 -0
- ccl_chromium_reader/serialization_formats/__init__.py +0 -0
- ccl_chromium_reader/serialization_formats/ccl_blink_value_deserializer.py +401 -0
- ccl_chromium_reader/serialization_formats/ccl_easy_chromium_pickle.py +133 -0
- ccl_chromium_reader/serialization_formats/ccl_protobuff.py +276 -0
- ccl_chromium_reader/serialization_formats/ccl_v8_value_deserializer.py +627 -0
- ccl_chromium_reader/storage_formats/__init__.py +0 -0
- ccl_chromium_reader/storage_formats/ccl_leveldb.py +582 -0
- ccl_simplesnappy/__init__.py +1 -0
- ccl_simplesnappy/ccl_simplesnappy.py +306 -0
- linear_mcp_fast/__init__.py +8 -0
- linear_mcp_fast/__main__.py +6 -0
- linear_mcp_fast/reader.py +433 -0
- linear_mcp_fast/server.py +367 -0
- linear_mcp_fast/store_detector.py +117 -0
- linear_mcp_fast-0.1.0.dist-info/METADATA +160 -0
- linear_mcp_fast-0.1.0.dist-info/RECORD +39 -0
- linear_mcp_fast-0.1.0.dist-info/WHEEL +5 -0
- linear_mcp_fast-0.1.0.dist-info/entry_points.txt +2 -0
- linear_mcp_fast-0.1.0.dist-info/top_level.txt +4 -0
- tools_and_utilities/Chromium_dump_local_storage.py +111 -0
- tools_and_utilities/Chromium_dump_session_storage.py +92 -0
- tools_and_utilities/benchmark.py +35 -0
- tools_and_utilities/ccl_chrome_audit.py +651 -0
- tools_and_utilities/dump_indexeddb_details.py +59 -0
- tools_and_utilities/dump_leveldb.py +53 -0
|
@@ -0,0 +1,433 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Linear Local Data Reader with TTL-based caching.
|
|
3
|
+
|
|
4
|
+
Reads Linear's local IndexedDB cache to provide fast access to issues, users,
|
|
5
|
+
teams, workflow states, and comments without API calls.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
import time
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from ccl_chromium_reader import ccl_chromium_indexeddb # type: ignore
|
|
15
|
+
|
|
16
|
+
from .store_detector import DetectedStores, detect_stores
|
|
17
|
+
|
|
18
|
+
LINEAR_DB_PATH = os.path.expanduser(
|
|
19
|
+
"~/Library/Application Support/Linear/IndexedDB/https_linear.app_0.indexeddb.leveldb"
|
|
20
|
+
)
|
|
21
|
+
LINEAR_BLOB_PATH = os.path.expanduser(
|
|
22
|
+
"~/Library/Application Support/Linear/IndexedDB/https_linear.app_0.indexeddb.blob"
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
CACHE_TTL_SECONDS = 300 # 5 minutes
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class CachedData:
|
|
30
|
+
"""Container for cached Linear data."""
|
|
31
|
+
|
|
32
|
+
teams: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
33
|
+
users: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
34
|
+
states: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
35
|
+
issues: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
36
|
+
comments: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
37
|
+
comments_by_issue: dict[str, list[str]] = field(default_factory=dict)
|
|
38
|
+
projects: dict[str, dict[str, Any]] = field(default_factory=dict)
|
|
39
|
+
loaded_at: float = 0.0
|
|
40
|
+
|
|
41
|
+
def is_expired(self) -> bool:
|
|
42
|
+
"""Check if the cache has expired."""
|
|
43
|
+
return time.time() - self.loaded_at > CACHE_TTL_SECONDS
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class LinearLocalReader:
|
|
47
|
+
"""
|
|
48
|
+
Reader for Linear's local IndexedDB cache.
|
|
49
|
+
|
|
50
|
+
Provides fast, local-only access to Linear data without API calls.
|
|
51
|
+
Data is cached in memory with a 5-minute TTL.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(
|
|
55
|
+
self, db_path: str = LINEAR_DB_PATH, blob_path: str = LINEAR_BLOB_PATH
|
|
56
|
+
):
|
|
57
|
+
self._db_path = db_path
|
|
58
|
+
self._blob_path = blob_path
|
|
59
|
+
self._cache = CachedData()
|
|
60
|
+
self._stores: DetectedStores | None = None
|
|
61
|
+
|
|
62
|
+
def _check_db_exists(self) -> None:
|
|
63
|
+
"""Verify the Linear database exists."""
|
|
64
|
+
if not os.path.exists(self._db_path):
|
|
65
|
+
raise FileNotFoundError(
|
|
66
|
+
f"Linear database not found at {self._db_path}. "
|
|
67
|
+
"Please ensure Linear.app is installed and has been opened at least once."
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
def _get_wrapper(self) -> ccl_chromium_indexeddb.WrappedIndexDB:
|
|
71
|
+
"""Get an IndexedDB wrapper instance."""
|
|
72
|
+
self._check_db_exists()
|
|
73
|
+
return ccl_chromium_indexeddb.WrappedIndexDB(self._db_path, self._blob_path)
|
|
74
|
+
|
|
75
|
+
def _find_linear_db(
|
|
76
|
+
self, wrapper: ccl_chromium_indexeddb.WrappedIndexDB
|
|
77
|
+
) -> ccl_chromium_indexeddb.WrappedDatabase:
|
|
78
|
+
"""Find the main Linear database."""
|
|
79
|
+
for db_id in wrapper.database_ids:
|
|
80
|
+
if "linear_" in db_id.name and db_id.name != "linear_databases":
|
|
81
|
+
return wrapper[db_id.name, db_id.origin]
|
|
82
|
+
raise ValueError("Could not find Linear database in IndexedDB")
|
|
83
|
+
|
|
84
|
+
def _to_str(self, val: Any) -> str:
|
|
85
|
+
"""Convert value to string, handling bytes."""
|
|
86
|
+
if val is None:
|
|
87
|
+
return ""
|
|
88
|
+
if isinstance(val, bytes):
|
|
89
|
+
return val.decode("utf-8", errors="replace")
|
|
90
|
+
return str(val)
|
|
91
|
+
|
|
92
|
+
def _extract_comment_text(self, body_data: Any) -> str:
|
|
93
|
+
"""Extract plain text from ProseMirror bodyData format."""
|
|
94
|
+
if body_data is None:
|
|
95
|
+
return ""
|
|
96
|
+
if isinstance(body_data, str):
|
|
97
|
+
try:
|
|
98
|
+
body_data = json.loads(body_data)
|
|
99
|
+
except json.JSONDecodeError:
|
|
100
|
+
return body_data
|
|
101
|
+
|
|
102
|
+
def extract(node: Any) -> str:
|
|
103
|
+
if isinstance(node, dict):
|
|
104
|
+
node_type = node.get("type", "")
|
|
105
|
+
if node_type == "text":
|
|
106
|
+
return node.get("text", "")
|
|
107
|
+
if node_type == "suggestion_userMentions":
|
|
108
|
+
label = node.get("attrs", {}).get("label", "")
|
|
109
|
+
return f"@{label}" if label else ""
|
|
110
|
+
if node_type == "hardBreak":
|
|
111
|
+
return "\n"
|
|
112
|
+
content = node.get("content", [])
|
|
113
|
+
return "".join(extract(c) for c in content)
|
|
114
|
+
elif isinstance(node, list):
|
|
115
|
+
return "".join(extract(c) for c in node)
|
|
116
|
+
return ""
|
|
117
|
+
|
|
118
|
+
return extract(body_data)
|
|
119
|
+
|
|
120
|
+
def _load_from_store(
|
|
121
|
+
self, db: ccl_chromium_indexeddb.WrappedDatabase, store_name: str
|
|
122
|
+
):
|
|
123
|
+
"""Load all records from a store, handling None values."""
|
|
124
|
+
try:
|
|
125
|
+
store = db[store_name]
|
|
126
|
+
for record in store.iterate_records():
|
|
127
|
+
if record.value:
|
|
128
|
+
yield record.value
|
|
129
|
+
except Exception:
|
|
130
|
+
pass
|
|
131
|
+
|
|
132
|
+
def _reload_cache(self) -> None:
|
|
133
|
+
"""Reload all data from the IndexedDB."""
|
|
134
|
+
wrapper = self._get_wrapper()
|
|
135
|
+
db = self._find_linear_db(wrapper)
|
|
136
|
+
|
|
137
|
+
# Detect stores if not already done
|
|
138
|
+
if self._stores is None:
|
|
139
|
+
self._stores = detect_stores(db)
|
|
140
|
+
|
|
141
|
+
cache = CachedData(loaded_at=time.time())
|
|
142
|
+
|
|
143
|
+
# Load teams
|
|
144
|
+
if self._stores.teams:
|
|
145
|
+
for val in self._load_from_store(db, self._stores.teams):
|
|
146
|
+
cache.teams[val["id"]] = {
|
|
147
|
+
"id": val["id"],
|
|
148
|
+
"key": val.get("key"),
|
|
149
|
+
"name": val.get("name"),
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
# Load users from all detected user stores
|
|
153
|
+
if self._stores.users:
|
|
154
|
+
for store_name in self._stores.users:
|
|
155
|
+
for val in self._load_from_store(db, store_name):
|
|
156
|
+
if val.get("id") not in cache.users:
|
|
157
|
+
cache.users[val["id"]] = {
|
|
158
|
+
"id": val["id"],
|
|
159
|
+
"name": val.get("name"),
|
|
160
|
+
"displayName": val.get("displayName"),
|
|
161
|
+
"email": val.get("email"),
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
# Load workflow states from all detected state stores
|
|
165
|
+
if self._stores.workflow_states:
|
|
166
|
+
for store_name in self._stores.workflow_states:
|
|
167
|
+
for val in self._load_from_store(db, store_name):
|
|
168
|
+
if val.get("id") not in cache.states:
|
|
169
|
+
cache.states[val["id"]] = {
|
|
170
|
+
"id": val["id"],
|
|
171
|
+
"name": val.get("name"),
|
|
172
|
+
"type": val.get("type"),
|
|
173
|
+
"color": val.get("color"),
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
# Load issues
|
|
177
|
+
if self._stores.issues:
|
|
178
|
+
for val in self._load_from_store(db, self._stores.issues):
|
|
179
|
+
team = cache.teams.get(val.get("teamId"), {})
|
|
180
|
+
team_key = team.get("key", "???")
|
|
181
|
+
identifier = f"{team_key}-{val.get('number')}"
|
|
182
|
+
|
|
183
|
+
cache.issues[val["id"]] = {
|
|
184
|
+
"id": val["id"],
|
|
185
|
+
"identifier": identifier,
|
|
186
|
+
"title": val.get("title"),
|
|
187
|
+
"description": val.get("description"),
|
|
188
|
+
"number": val.get("number"),
|
|
189
|
+
"priority": val.get("priority"),
|
|
190
|
+
"estimate": val.get("estimate"),
|
|
191
|
+
"teamId": val.get("teamId"),
|
|
192
|
+
"stateId": val.get("stateId"),
|
|
193
|
+
"assigneeId": val.get("assigneeId"),
|
|
194
|
+
"projectId": val.get("projectId"),
|
|
195
|
+
"labelIds": val.get("labelIds", []),
|
|
196
|
+
"dueDate": val.get("dueDate"),
|
|
197
|
+
"createdAt": val.get("createdAt"),
|
|
198
|
+
"updatedAt": val.get("updatedAt"),
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
# Load comments
|
|
202
|
+
if self._stores.comments:
|
|
203
|
+
for val in self._load_from_store(db, self._stores.comments):
|
|
204
|
+
comment_id = val.get("id")
|
|
205
|
+
issue_id = val.get("issueId")
|
|
206
|
+
if not comment_id or not issue_id:
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
cache.comments[comment_id] = {
|
|
210
|
+
"id": comment_id,
|
|
211
|
+
"issueId": issue_id,
|
|
212
|
+
"userId": val.get("userId"),
|
|
213
|
+
"body": self._extract_comment_text(val.get("bodyData")),
|
|
214
|
+
"createdAt": val.get("createdAt"),
|
|
215
|
+
"updatedAt": val.get("updatedAt"),
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
if issue_id not in cache.comments_by_issue:
|
|
219
|
+
cache.comments_by_issue[issue_id] = []
|
|
220
|
+
cache.comments_by_issue[issue_id].append(comment_id)
|
|
221
|
+
|
|
222
|
+
# Load projects
|
|
223
|
+
if self._stores.projects:
|
|
224
|
+
for val in self._load_from_store(db, self._stores.projects):
|
|
225
|
+
cache.projects[val["id"]] = {
|
|
226
|
+
"id": val["id"],
|
|
227
|
+
"name": val.get("name"),
|
|
228
|
+
"description": val.get("description"),
|
|
229
|
+
"slugId": val.get("slugId"),
|
|
230
|
+
"icon": val.get("icon"),
|
|
231
|
+
"color": val.get("color"),
|
|
232
|
+
"state": val.get("state"),
|
|
233
|
+
"statusId": val.get("statusId"),
|
|
234
|
+
"priority": val.get("priority"),
|
|
235
|
+
"teamIds": val.get("teamIds", []),
|
|
236
|
+
"memberIds": val.get("memberIds", []),
|
|
237
|
+
"leadId": val.get("leadId"),
|
|
238
|
+
"startDate": val.get("startDate"),
|
|
239
|
+
"targetDate": val.get("targetDate"),
|
|
240
|
+
"createdAt": val.get("createdAt"),
|
|
241
|
+
"updatedAt": val.get("updatedAt"),
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
self._cache = cache
|
|
245
|
+
|
|
246
|
+
def _ensure_cache(self) -> CachedData:
|
|
247
|
+
"""Ensure the cache is loaded and not expired."""
|
|
248
|
+
if self._cache.is_expired() or not self._cache.teams:
|
|
249
|
+
self._reload_cache()
|
|
250
|
+
return self._cache
|
|
251
|
+
|
|
252
|
+
@property
|
|
253
|
+
def teams(self) -> dict[str, dict[str, Any]]:
|
|
254
|
+
"""Get all teams."""
|
|
255
|
+
return self._ensure_cache().teams
|
|
256
|
+
|
|
257
|
+
@property
|
|
258
|
+
def users(self) -> dict[str, dict[str, Any]]:
|
|
259
|
+
"""Get all users."""
|
|
260
|
+
return self._ensure_cache().users
|
|
261
|
+
|
|
262
|
+
@property
|
|
263
|
+
def states(self) -> dict[str, dict[str, Any]]:
|
|
264
|
+
"""Get all workflow states."""
|
|
265
|
+
return self._ensure_cache().states
|
|
266
|
+
|
|
267
|
+
@property
|
|
268
|
+
def issues(self) -> dict[str, dict[str, Any]]:
|
|
269
|
+
"""Get all issues."""
|
|
270
|
+
return self._ensure_cache().issues
|
|
271
|
+
|
|
272
|
+
@property
|
|
273
|
+
def comments(self) -> dict[str, dict[str, Any]]:
|
|
274
|
+
"""Get all comments."""
|
|
275
|
+
return self._ensure_cache().comments
|
|
276
|
+
|
|
277
|
+
@property
|
|
278
|
+
def projects(self) -> dict[str, dict[str, Any]]:
|
|
279
|
+
"""Get all projects."""
|
|
280
|
+
return self._ensure_cache().projects
|
|
281
|
+
|
|
282
|
+
def get_comments_for_issue(self, issue_id: str) -> list[dict[str, Any]]:
|
|
283
|
+
"""Get all comments for an issue, sorted by creation time."""
|
|
284
|
+
cache = self._ensure_cache()
|
|
285
|
+
comment_ids = cache.comments_by_issue.get(issue_id, [])
|
|
286
|
+
comments = [cache.comments[cid] for cid in comment_ids if cid in cache.comments]
|
|
287
|
+
return sorted(comments, key=lambda c: c.get("createdAt", ""))
|
|
288
|
+
|
|
289
|
+
def find_user(self, search: str) -> dict[str, Any] | None:
|
|
290
|
+
"""Find a user by name or display name (case-insensitive partial match)."""
|
|
291
|
+
search_lower = search.lower()
|
|
292
|
+
candidates: list[tuple[int, dict[str, Any]]] = []
|
|
293
|
+
|
|
294
|
+
for user in self.users.values():
|
|
295
|
+
name = self._to_str(user.get("name", ""))
|
|
296
|
+
display_name = self._to_str(user.get("displayName", ""))
|
|
297
|
+
|
|
298
|
+
name_lower = name.lower()
|
|
299
|
+
display_lower = display_name.lower()
|
|
300
|
+
|
|
301
|
+
if search_lower in name_lower or search_lower in display_lower:
|
|
302
|
+
score = 0
|
|
303
|
+
if name_lower.startswith(search_lower):
|
|
304
|
+
score = 100
|
|
305
|
+
elif f" {search_lower}" in f" {name_lower}":
|
|
306
|
+
score = 50
|
|
307
|
+
elif display_lower.startswith(search_lower):
|
|
308
|
+
score = 40
|
|
309
|
+
else:
|
|
310
|
+
score = 10
|
|
311
|
+
|
|
312
|
+
candidates.append((score, user))
|
|
313
|
+
|
|
314
|
+
if candidates:
|
|
315
|
+
candidates.sort(key=lambda x: -x[0])
|
|
316
|
+
return candidates[0][1]
|
|
317
|
+
return None
|
|
318
|
+
|
|
319
|
+
def find_team(self, search: str) -> dict[str, Any] | None:
|
|
320
|
+
"""Find a team by key or name (case-insensitive)."""
|
|
321
|
+
search_lower = search.lower()
|
|
322
|
+
search_upper = search.upper()
|
|
323
|
+
|
|
324
|
+
for team in self.teams.values():
|
|
325
|
+
key = team.get("key", "")
|
|
326
|
+
name = self._to_str(team.get("name", ""))
|
|
327
|
+
|
|
328
|
+
if key == search_upper or search_lower in name.lower():
|
|
329
|
+
return team
|
|
330
|
+
return None
|
|
331
|
+
|
|
332
|
+
def get_issue_by_identifier(self, identifier: str) -> dict[str, Any] | None:
|
|
333
|
+
"""Get an issue by its identifier (e.g., 'UK-1234')."""
|
|
334
|
+
identifier_upper = identifier.upper()
|
|
335
|
+
for issue in self.issues.values():
|
|
336
|
+
if issue.get("identifier", "").upper() == identifier_upper:
|
|
337
|
+
return issue
|
|
338
|
+
return None
|
|
339
|
+
|
|
340
|
+
def find_project(self, search: str) -> dict[str, Any] | None:
|
|
341
|
+
"""Find a project by name or slugId (case-insensitive partial match)."""
|
|
342
|
+
search_lower = search.lower()
|
|
343
|
+
candidates: list[tuple[int, dict[str, Any]]] = []
|
|
344
|
+
|
|
345
|
+
for project in self.projects.values():
|
|
346
|
+
name = self._to_str(project.get("name", ""))
|
|
347
|
+
slug_id = self._to_str(project.get("slugId", ""))
|
|
348
|
+
|
|
349
|
+
name_lower = name.lower()
|
|
350
|
+
slug_lower = slug_id.lower()
|
|
351
|
+
|
|
352
|
+
if search_lower in name_lower or search_lower == slug_lower:
|
|
353
|
+
score = 0
|
|
354
|
+
if name_lower == search_lower:
|
|
355
|
+
score = 100
|
|
356
|
+
elif name_lower.startswith(search_lower):
|
|
357
|
+
score = 80
|
|
358
|
+
elif slug_lower == search_lower:
|
|
359
|
+
score = 70
|
|
360
|
+
else:
|
|
361
|
+
score = 10
|
|
362
|
+
|
|
363
|
+
candidates.append((score, project))
|
|
364
|
+
|
|
365
|
+
if candidates:
|
|
366
|
+
candidates.sort(key=lambda x: -x[0])
|
|
367
|
+
return candidates[0][1]
|
|
368
|
+
return None
|
|
369
|
+
|
|
370
|
+
def get_issues_for_user(self, user_id: str) -> list[dict[str, Any]]:
|
|
371
|
+
"""Get all issues assigned to a user."""
|
|
372
|
+
return [
|
|
373
|
+
issue
|
|
374
|
+
for issue in self.issues.values()
|
|
375
|
+
if issue.get("assigneeId") == user_id
|
|
376
|
+
]
|
|
377
|
+
|
|
378
|
+
def get_state_name(self, state_id: str) -> str:
|
|
379
|
+
"""Get state name from state ID."""
|
|
380
|
+
state = self.states.get(state_id, {})
|
|
381
|
+
return state.get("name", "Unknown")
|
|
382
|
+
|
|
383
|
+
def get_state_type(self, state_id: str) -> str:
|
|
384
|
+
"""Get state type from state ID."""
|
|
385
|
+
state = self.states.get(state_id, {})
|
|
386
|
+
return state.get("type", "unknown")
|
|
387
|
+
|
|
388
|
+
def search_issues(self, query: str, limit: int = 50) -> list[dict[str, Any]]:
|
|
389
|
+
"""Search issues by title (case-insensitive)."""
|
|
390
|
+
query_lower = query.lower()
|
|
391
|
+
results = []
|
|
392
|
+
|
|
393
|
+
for issue in self.issues.values():
|
|
394
|
+
title = self._to_str(issue.get("title", ""))
|
|
395
|
+
if query_lower in title.lower():
|
|
396
|
+
results.append(issue)
|
|
397
|
+
if len(results) >= limit:
|
|
398
|
+
break
|
|
399
|
+
|
|
400
|
+
return results
|
|
401
|
+
|
|
402
|
+
def get_summary(self) -> dict[str, int]:
|
|
403
|
+
"""Get a summary of loaded data counts."""
|
|
404
|
+
cache = self._ensure_cache()
|
|
405
|
+
return {
|
|
406
|
+
"teams": len(cache.teams),
|
|
407
|
+
"users": len(cache.users),
|
|
408
|
+
"states": len(cache.states),
|
|
409
|
+
"issues": len(cache.issues),
|
|
410
|
+
"comments": len(cache.comments),
|
|
411
|
+
"projects": len(cache.projects),
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
def get_user_name(self, user_id: str | None) -> str:
|
|
415
|
+
"""Get user name from user ID."""
|
|
416
|
+
if not user_id:
|
|
417
|
+
return "Unassigned"
|
|
418
|
+
user = self.users.get(user_id, {})
|
|
419
|
+
return user.get("name") or user.get("displayName") or "Unknown"
|
|
420
|
+
|
|
421
|
+
def get_team_key(self, team_id: str | None) -> str:
|
|
422
|
+
"""Get team key from team ID."""
|
|
423
|
+
if not team_id:
|
|
424
|
+
return "???"
|
|
425
|
+
team = self.teams.get(team_id, {})
|
|
426
|
+
return team.get("key", "???")
|
|
427
|
+
|
|
428
|
+
def get_project_name(self, project_id: str | None) -> str:
|
|
429
|
+
"""Get project name from project ID."""
|
|
430
|
+
if not project_id:
|
|
431
|
+
return ""
|
|
432
|
+
project = self.projects.get(project_id, {})
|
|
433
|
+
return project.get("name", "")
|