membot 0.5.2 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/.claude/skills/membot.md +25 -10
  2. package/.cursor/rules/membot.mdc +25 -10
  3. package/README.md +36 -4
  4. package/package.json +8 -5
  5. package/scripts/apply-patches.sh +0 -11
  6. package/scripts/build-test-docx.ts +84 -0
  7. package/src/cli.ts +2 -2
  8. package/src/commands/login-page.mustache +50 -0
  9. package/src/commands/login.ts +83 -0
  10. package/src/config/schemas.ts +23 -5
  11. package/src/constants.ts +20 -1
  12. package/src/context.ts +1 -24
  13. package/src/db/files.ts +21 -25
  14. package/src/db/migrations/003-downloader-columns.ts +58 -0
  15. package/src/db/migrations.ts +2 -1
  16. package/src/ingest/converter/docx.ts +47 -5
  17. package/src/ingest/converter/html.ts +10 -3
  18. package/src/ingest/converter/image.ts +40 -3
  19. package/src/ingest/converter/images-inline.ts +132 -0
  20. package/src/ingest/converter/index.ts +13 -3
  21. package/src/ingest/converter/xlsx.ts +111 -0
  22. package/src/ingest/downloaders/browser.ts +180 -0
  23. package/src/ingest/downloaders/generic-web.ts +81 -0
  24. package/src/ingest/downloaders/github.ts +178 -0
  25. package/src/ingest/downloaders/google-docs.ts +56 -0
  26. package/src/ingest/downloaders/google-shared.ts +86 -0
  27. package/src/ingest/downloaders/google-sheets.ts +58 -0
  28. package/src/ingest/downloaders/google-slides.ts +53 -0
  29. package/src/ingest/downloaders/index.ts +182 -0
  30. package/src/ingest/downloaders/linear.ts +291 -0
  31. package/src/ingest/fetcher.ts +104 -129
  32. package/src/ingest/ingest.ts +44 -71
  33. package/src/mcp/instructions.ts +4 -2
  34. package/src/operations/add.ts +6 -4
  35. package/src/operations/info.ts +4 -6
  36. package/src/operations/move.ts +2 -3
  37. package/src/operations/refresh.ts +2 -4
  38. package/src/operations/remove.ts +23 -2
  39. package/src/operations/tree.ts +1 -1
  40. package/src/operations/types.ts +1 -1
  41. package/src/refresh/runner.ts +60 -115
  42. package/src/types/text-modules.d.ts +5 -0
  43. package/patches/@evantahler%2Fmcpx@0.21.4.patch +0 -51
  44. package/src/commands/mcpx.ts +0 -112
  45. package/src/ingest/agent-fetcher.ts +0 -639
@@ -0,0 +1,58 @@
1
+ import { HelpfulError } from "../../errors.ts";
2
+ import { sha256Hex } from "../local-reader.ts";
3
+ import { fetchWithBrowserCookies } from "./google-shared.ts";
4
+ import type { DownloadedRemote, Downloader } from "./index.ts";
5
+
6
+ const SHEET_PATH = /^\/spreadsheets\/d\/([a-zA-Z0-9_-]+)/;
7
+
8
+ const XLSX_MIME = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet";
9
+
10
+ /**
11
+ * Download a Google Sheet as `.xlsx` (the workbook's native format)
12
+ * — the export includes **every tab** in a single file. The bytes
13
+ * flow through `convertXlsx`, which renders each tab as a markdown
14
+ * `## <tab name>` section with a real GitHub-flavored pipe table.
15
+ * Cleaner than the PDF route (preserves cell structure, no layout
16
+ * truncation) and `format=html` is no longer supported by Google.
17
+ */
18
+ export const googleSheetsDownloader: Downloader = {
19
+ name: "google-sheets",
20
+ description:
21
+ "Google Sheets (docs.google.com/spreadsheets/d/<id>) — exports every tab as .xlsx, rendered to markdown tables locally.",
22
+ logins: [
23
+ {
24
+ kind: "browser",
25
+ name: "Google",
26
+ url: "https://accounts.google.com/signin",
27
+ description: "covers Docs, Sheets, and Slides",
28
+ },
29
+ ],
30
+ matches(url) {
31
+ return url.hostname === "docs.google.com" && SHEET_PATH.test(url.pathname);
32
+ },
33
+ async download(url, ctx): Promise<DownloadedRemote> {
34
+ const sheetId = extractSheetId(url);
35
+ const exportUrl = `https://docs.google.com/spreadsheets/d/${sheetId}/export?format=xlsx`;
36
+ const body = await fetchWithBrowserCookies(exportUrl, ctx, "Google Sheets", url);
37
+ return {
38
+ bytes: new Uint8Array(body),
39
+ sha256: sha256Hex(body),
40
+ mimeType: XLSX_MIME,
41
+ downloader: "google-sheets",
42
+ downloaderArgs: { sheet_id: sheetId },
43
+ sourceUrl: url.toString(),
44
+ };
45
+ },
46
+ };
47
+
48
+ function extractSheetId(url: URL): string {
49
+ const match = url.pathname.match(SHEET_PATH);
50
+ if (!match || !match[1]) {
51
+ throw new HelpfulError({
52
+ kind: "input_error",
53
+ message: `not a Google Sheets URL: ${url.toString()}`,
54
+ hint: "Pass a URL like https://docs.google.com/spreadsheets/d/<SHEET_ID>/edit.",
55
+ });
56
+ }
57
+ return match[1];
58
+ }
@@ -0,0 +1,53 @@
1
+ import { HelpfulError } from "../../errors.ts";
2
+ import { sha256Hex } from "../local-reader.ts";
3
+ import { fetchWithBrowserCookies } from "./google-shared.ts";
4
+ import type { DownloadedRemote, Downloader } from "./index.ts";
5
+
6
+ const SLIDE_PATH = /^\/presentation\/d\/([a-zA-Z0-9_-]+)/;
7
+
8
+ /**
9
+ * Download a Google Slides deck as a PDF via the canonical export
10
+ * endpoint. PDF preserves layout and text-on-slides faithfully; the
11
+ * existing `convertPdf` pipeline (unpdf) extracts the speaker text +
12
+ * bullets without losing slide ordering.
13
+ */
14
+ export const googleSlidesDownloader: Downloader = {
15
+ name: "google-slides",
16
+ description: "Google Slides (docs.google.com/presentation/d/<id>) — exports as PDF for layout-faithful conversion.",
17
+ logins: [
18
+ {
19
+ kind: "browser",
20
+ name: "Google",
21
+ url: "https://accounts.google.com/signin",
22
+ description: "covers Docs, Sheets, and Slides",
23
+ },
24
+ ],
25
+ matches(url) {
26
+ return url.hostname === "docs.google.com" && SLIDE_PATH.test(url.pathname);
27
+ },
28
+ async download(url, ctx): Promise<DownloadedRemote> {
29
+ const slidesId = extractSlidesId(url);
30
+ const exportUrl = `https://docs.google.com/presentation/d/${slidesId}/export/pdf`;
31
+ const body = await fetchWithBrowserCookies(exportUrl, ctx, "Google Slides", url);
32
+ return {
33
+ bytes: new Uint8Array(body),
34
+ sha256: sha256Hex(body),
35
+ mimeType: "application/pdf",
36
+ downloader: "google-slides",
37
+ downloaderArgs: { slides_id: slidesId },
38
+ sourceUrl: url.toString(),
39
+ };
40
+ },
41
+ };
42
+
43
+ function extractSlidesId(url: URL): string {
44
+ const match = url.pathname.match(SLIDE_PATH);
45
+ if (!match || !match[1]) {
46
+ throw new HelpfulError({
47
+ kind: "input_error",
48
+ message: `not a Google Slides URL: ${url.toString()}`,
49
+ hint: "Pass a URL like https://docs.google.com/presentation/d/<SLIDES_ID>/edit.",
50
+ });
51
+ }
52
+ return match[1];
53
+ }
@@ -0,0 +1,182 @@
1
+ import type { MembotConfig } from "../../config/schemas.ts";
2
+ import type { logger as Logger } from "../../output/logger.ts";
3
+ import type { BrowserPool } from "./browser.ts";
4
+ import { genericWebDownloader } from "./generic-web.ts";
5
+ import { githubDownloader } from "./github.ts";
6
+ import { googleDocsDownloader } from "./google-docs.ts";
7
+ import { googleSheetsDownloader } from "./google-sheets.ts";
8
+ import { googleSlidesDownloader } from "./google-slides.ts";
9
+ import { linearDownloader } from "./linear.ts";
10
+
11
+ /**
12
+ * The shape every URL fetch produces — drop-in replacement for the
13
+ * old `FetchedRemote` shape. `downloader` + `downloaderArgs` get
14
+ * persisted on the row so refresh replays the same downloader against
15
+ * the same URL deterministically (no LLM, no agent loop).
16
+ */
17
+ export interface DownloadedRemote {
18
+ bytes: Uint8Array;
19
+ sha256: string;
20
+ mimeType: string;
21
+ downloader: string;
22
+ downloaderArgs: Record<string, unknown>;
23
+ sourceUrl: string;
24
+ }
25
+
26
+ export interface DownloaderCtx {
27
+ pool: BrowserPool;
28
+ logger: typeof Logger;
29
+ config: MembotConfig;
30
+ /**
31
+ * Optional sublabel hook for the host's progress spinner. Long-running
32
+ * downloaders (multi-query GraphQL, paginated REST fetches, headless
33
+ * browser navigation) can call this with short status strings —
34
+ * "fetching", "rendering", "parsing 3/4 pages" — and the CLI will
35
+ * surface them under the per-entry progress bar. No-op when the host
36
+ * doesn't supply one (e.g. MCP server, JSON-mode CLI).
37
+ */
38
+ onProgress?: (sublabel: string) => void;
39
+ }
40
+
41
+ /**
42
+ * One tactic for fetching a URL. Specific downloaders (Google,
43
+ * GitHub, Linear) match URLs by host/pattern and hit the canonical
44
+ * export endpoint; the generic-web downloader is the registry's
45
+ * always-matching catch-all (HEADs the URL, prints to PDF if HTML,
46
+ * else streams the raw bytes through). Adding a 6th service is one
47
+ * file — implement `Downloader`, register it here.
48
+ *
49
+ * If a downloader requires a logged-in browser session, it declares
50
+ * one or more `LoginEntry` objects; the `membot login` page collects
51
+ * those across every downloader, dedupes by URL, and renders one
52
+ * button per service.
53
+ */
54
+ export interface Downloader {
55
+ name: string;
56
+ description: string;
57
+ matches(url: URL): boolean;
58
+ download(url: URL, ctx: DownloaderCtx): Promise<DownloadedRemote>;
59
+ logins?: LoginEntry[];
60
+ /**
61
+ * Force the BrowserPool into headed mode for this downloader's
62
+ * fetches. Used for SPAs that detect headless Chromium and refuse
63
+ * to hydrate; we don't currently use it (services that needed it
64
+ * have moved to the API-key flow), but the hook remains for
65
+ * future cookie-based downloaders.
66
+ */
67
+ requireHeaded?: boolean;
68
+ /**
69
+ * The downloader authenticates via a config-stored API key, not
70
+ * browser cookies. The fetcher uses this to skip the auto-login
71
+ * browser prompt on `auth_error` (opening a browser doesn't help
72
+ * when the missing credential is in the config file).
73
+ */
74
+ requiresApiKey?: boolean;
75
+ }
76
+
77
+ /**
78
+ * A service the user might need to set up before fetches against it
79
+ * succeed. Two flavors:
80
+ * - `kind: "browser"` — the user clicks a link in the `membot login`
81
+ * browser, signs in, and closes the window. Cookies + IndexedDB
82
+ * land in the persistent profile and downloaders use them
83
+ * automatically.
84
+ * - `kind: "api_key"` — the user visits the service's API-key page,
85
+ * copies the key, and runs the displayed `setupCommand`. The key
86
+ * lives in `~/.membot/config.json` and downloaders read it from
87
+ * `ctx.config`.
88
+ *
89
+ * Multiple downloaders can declare the same `LoginEntry` (e.g. all
90
+ * three Google downloaders share Google sign-in); the login page
91
+ * dedupes by `(kind, url)`.
92
+ */
93
+ export type LoginEntry = BrowserLoginEntry | ApiKeyLoginEntry;
94
+
95
+ export interface BrowserLoginEntry {
96
+ kind: "browser";
97
+ /** Display name (e.g. "Google"). */
98
+ name: string;
99
+ /** Login URL the button opens. */
100
+ url: string;
101
+ /** Optional one-liner shown next to the button. */
102
+ description?: string;
103
+ }
104
+
105
+ export interface ApiKeyLoginEntry {
106
+ kind: "api_key";
107
+ /** Display name (e.g. "Linear"). */
108
+ name: string;
109
+ /** Settings page where the user creates the key. */
110
+ url: string;
111
+ /** Shell command the user copies — e.g. `membot config set linear.api_key <KEY>`. */
112
+ setupCommand: string;
113
+ /** Optional one-liner shown next to the link. */
114
+ description?: string;
115
+ }
116
+
117
+ const REGISTRY: Downloader[] = [
118
+ googleDocsDownloader,
119
+ googleSheetsDownloader,
120
+ googleSlidesDownloader,
121
+ githubDownloader,
122
+ linearDownloader,
123
+ genericWebDownloader,
124
+ ];
125
+
126
+ /**
127
+ * Find the first downloader that matches `url`. Returns `null` only
128
+ * if `url` doesn't parse — in normal use the generic-web downloader
129
+ * matches everything else, so callers can treat `findDownloader` as
130
+ * total over valid URLs.
131
+ */
132
+ export function findDownloader(url: string | URL): Downloader | null {
133
+ let parsed: URL;
134
+ try {
135
+ parsed = typeof url === "string" ? new URL(url) : url;
136
+ } catch {
137
+ return null;
138
+ }
139
+ for (const d of REGISTRY) {
140
+ if (d.matches(parsed)) return d;
141
+ }
142
+ return null;
143
+ }
144
+
145
+ /** Lookup by name (used by refresh to replay a persisted downloader). */
146
+ export function findDownloaderByName(name: string): Downloader | null {
147
+ return REGISTRY.find((d) => d.name === name) ?? null;
148
+ }
149
+
150
+ /** Read-only view of every registered downloader. */
151
+ export function listDownloaders(): readonly Downloader[] {
152
+ return REGISTRY;
153
+ }
154
+
155
+ /**
156
+ * Collect every `LoginEntry` declared by a downloader, deduped by URL
157
+ * within each kind. Used by `membot login` to render one button per
158
+ * service (browser-login) and one set of instructions per service
159
+ * (api-key) even when multiple downloaders share the same setup
160
+ * (e.g. Google Docs / Sheets / Slides all share Google sign-in).
161
+ */
162
+ export function collectLoginEntries(): { browser: BrowserLoginEntry[]; apiKey: ApiKeyLoginEntry[] } {
163
+ const browser = new Map<string, BrowserLoginEntry>();
164
+ const apiKey = new Map<string, ApiKeyLoginEntry>();
165
+ for (const d of REGISTRY) {
166
+ if (!d.logins) continue;
167
+ for (const entry of d.logins) {
168
+ if (entry.kind === "browser") {
169
+ if (!browser.has(entry.url)) browser.set(entry.url, entry);
170
+ } else {
171
+ if (!apiKey.has(entry.url)) apiKey.set(entry.url, entry);
172
+ }
173
+ }
174
+ }
175
+ return { browser: [...browser.values()], apiKey: [...apiKey.values()] };
176
+ }
177
+
178
+ /**
179
+ * Compute a stable sha256 hex digest of the bytes. Re-exposed here
180
+ * because every downloader uses it.
181
+ */
182
+ export { sha256Hex } from "../local-reader.ts";
@@ -0,0 +1,291 @@
1
+ import { HelpfulError } from "../../errors.ts";
2
+ import { sha256Hex } from "../local-reader.ts";
3
+ import type { DownloadedRemote, Downloader } from "./index.ts";
4
+
5
+ const ISSUE_PATH = /^\/([^/]+)\/issue\/([A-Z]+-\d+)(?:$|\/|#|\?)/;
6
+ const PROJECT_PATH = /^\/([^/]+)\/project\/([^/?#]+)/;
7
+
8
+ const GRAPHQL_ENDPOINT = "https://api.linear.app/graphql";
9
+
10
+ /**
11
+ * Linear's web app uses a sophisticated cookie + signed-request scheme
12
+ * (`client-api.linear.app/graphql` with `useraccount`/`linear-client-id`
13
+ * headers) that's not realistically replayable from outside a real
14
+ * Linear browser session. Instead we use Linear's official API at
15
+ * `api.linear.app/graphql` with a personal API key — set up once via
16
+ * `membot config set downloaders.linear.api_key <KEY>` after creating
17
+ * the key at https://linear.app/settings/api.
18
+ *
19
+ * The API gives us the structured issue/project payload (title, body,
20
+ * comments, status, …) directly; we render it to markdown
21
+ * deterministically rather than scraping the rendered DOM.
22
+ */
23
+ export const linearDownloader: Downloader = {
24
+ name: "linear",
25
+ description: "Linear (linear.app/<workspace>/issue/<KEY> and /project/<slug>) — uses the Linear API.",
26
+ logins: [
27
+ {
28
+ kind: "api_key",
29
+ name: "Linear",
30
+ url: "https://linear.app/settings/api",
31
+ setupCommand: "membot config set downloaders.linear.api_key <KEY>",
32
+ description: "create a personal API key, then run the command on the right",
33
+ },
34
+ ],
35
+ requiresApiKey: true,
36
+ matches(url) {
37
+ return url.hostname === "linear.app" && (ISSUE_PATH.test(url.pathname) || PROJECT_PATH.test(url.pathname));
38
+ },
39
+ async download(url, ctx): Promise<DownloadedRemote> {
40
+ const apiKey = ctx.config.downloaders.linear.api_key.trim();
41
+ if (apiKey === "") {
42
+ throw new HelpfulError({
43
+ kind: "auth_error",
44
+ message: `Linear API key not configured.`,
45
+ hint: "Create a personal API key at https://linear.app/settings/api, then run `membot config set downloaders.linear.api_key <KEY>`.",
46
+ });
47
+ }
48
+
49
+ const issueMatch = url.pathname.match(ISSUE_PATH);
50
+ const projectMatch = url.pathname.match(PROJECT_PATH);
51
+ let markdown: string;
52
+ let downloaderArgs: Record<string, unknown>;
53
+
54
+ if (issueMatch) {
55
+ const identifier = issueMatch[2] as string;
56
+ ctx.onProgress?.(`querying issue ${identifier}`);
57
+ const issue = await fetchIssue(identifier, apiKey, url);
58
+ markdown = renderIssue(issue);
59
+ downloaderArgs = { kind: "issue", workspace: issueMatch[1], identifier };
60
+ } else if (projectMatch) {
61
+ const slug = projectMatch[2] as string;
62
+ const slugId = extractProjectSlugId(slug);
63
+ ctx.onProgress?.(`querying project ${slugId}`);
64
+ const project = await fetchProject(slugId, apiKey, url);
65
+ markdown = renderProject(project);
66
+ downloaderArgs = { kind: "project", workspace: projectMatch[1], slug, slug_id: slugId };
67
+ } else {
68
+ throw new HelpfulError({
69
+ kind: "input_error",
70
+ message: `not a Linear issue/project URL: ${url.toString()}`,
71
+ hint: "Pass a URL like https://linear.app/<workspace>/issue/<KEY> or .../project/<slug>.",
72
+ });
73
+ }
74
+
75
+ const bytes = new TextEncoder().encode(markdown);
76
+ return {
77
+ bytes,
78
+ sha256: sha256Hex(bytes),
79
+ mimeType: "text/markdown",
80
+ downloader: "linear",
81
+ downloaderArgs,
82
+ sourceUrl: url.toString(),
83
+ };
84
+ },
85
+ };
86
+
87
+ interface LinearUser {
88
+ name?: string | null;
89
+ displayName?: string | null;
90
+ email?: string | null;
91
+ }
92
+
93
+ interface LinearComment {
94
+ body: string | null;
95
+ createdAt: string | null;
96
+ user: LinearUser | null;
97
+ }
98
+
99
+ interface LinearIssue {
100
+ identifier: string;
101
+ url: string;
102
+ title: string;
103
+ description: string | null;
104
+ priorityLabel: string | null;
105
+ state: { name: string } | null;
106
+ assignee: LinearUser | null;
107
+ creator: LinearUser | null;
108
+ createdAt: string;
109
+ updatedAt: string;
110
+ comments: { nodes: LinearComment[] };
111
+ }
112
+
113
+ interface LinearProject {
114
+ id: string;
115
+ url: string;
116
+ name: string;
117
+ slugId: string;
118
+ description: string | null;
119
+ content: string | null;
120
+ state: string | null;
121
+ startDate: string | null;
122
+ targetDate: string | null;
123
+ createdAt: string;
124
+ updatedAt: string;
125
+ lead: LinearUser | null;
126
+ members: { nodes: LinearUser[] };
127
+ }
128
+
129
+ const ISSUE_QUERY = `query Issue($id: String!) {
130
+ issue(id: $id) {
131
+ identifier url title description priorityLabel
132
+ state { name }
133
+ assignee { name displayName email }
134
+ creator { name displayName email }
135
+ createdAt updatedAt
136
+ comments(first: 100) {
137
+ nodes { body createdAt user { name displayName email } }
138
+ }
139
+ }
140
+ }`;
141
+
142
+ const PROJECT_QUERY = `query ProjectBySlug($slugId: String!) {
143
+ projects(filter: { slugId: { eq: $slugId } }, first: 1) {
144
+ nodes {
145
+ id url name slugId description content state startDate targetDate createdAt updatedAt
146
+ lead { name displayName email }
147
+ members(first: 50) { nodes { name displayName email } }
148
+ }
149
+ }
150
+ }`;
151
+
152
+ async function fetchIssue(identifier: string, apiKey: string, url: URL): Promise<LinearIssue> {
153
+ const result = await graphql<{ issue: LinearIssue | null }>(apiKey, ISSUE_QUERY, { id: identifier }, url);
154
+ if (!result.issue) {
155
+ throw new HelpfulError({
156
+ kind: "not_found",
157
+ message: `Linear has no issue ${identifier} visible to this API key.`,
158
+ hint: "Verify the URL exists and that the API key belongs to a member of the issue's workspace.",
159
+ });
160
+ }
161
+ return result.issue;
162
+ }
163
+
164
+ async function fetchProject(slugId: string, apiKey: string, url: URL): Promise<LinearProject> {
165
+ const result = await graphql<{ projects: { nodes: LinearProject[] } }>(apiKey, PROJECT_QUERY, { slugId }, url);
166
+ const project = result.projects.nodes[0];
167
+ if (!project) {
168
+ throw new HelpfulError({
169
+ kind: "not_found",
170
+ message: `Linear has no project with slug ${slugId} visible to this API key.`,
171
+ hint: "Verify the URL exists and that the API key belongs to a member of the project's workspace.",
172
+ });
173
+ }
174
+ return project;
175
+ }
176
+
177
+ /**
178
+ * The trailing token on a Linear project URL is `<name>-<slugId>`,
179
+ * where `slugId` is a 12-char hex suffix. Linear's API matches by
180
+ * `slugId` exactly, so we slice the suffix off here.
181
+ */
182
+ function extractProjectSlugId(slug: string): string {
183
+ const match = slug.match(/-([0-9a-f]{8,})$/i);
184
+ return match ? (match[1] as string) : slug;
185
+ }
186
+
187
+ async function graphql<T>(apiKey: string, query: string, variables: Record<string, unknown>, url: URL): Promise<T> {
188
+ const response = await fetch(GRAPHQL_ENDPOINT, {
189
+ method: "POST",
190
+ headers: {
191
+ "Content-Type": "application/json",
192
+ Authorization: apiKey,
193
+ },
194
+ body: JSON.stringify({ query, variables }),
195
+ });
196
+ if (!response.ok) {
197
+ throw new HelpfulError({
198
+ kind: response.status === 401 || response.status === 403 ? "auth_error" : "network_error",
199
+ message: `Linear GraphQL returned ${response.status} ${response.statusText} for ${url.toString()}.`,
200
+ hint:
201
+ response.status === 401 || response.status === 403
202
+ ? "Re-create the API key at https://linear.app/settings/api and run `membot config set downloaders.linear.api_key <KEY>`."
203
+ : "Check that the URL is reachable and that the API key has access to the issue/project.",
204
+ });
205
+ }
206
+ const json = (await response.json()) as { data?: T; errors?: Array<{ message: string }> };
207
+ if (json.errors && json.errors.length > 0) {
208
+ const detail = json.errors.map((e) => e.message).join("; ");
209
+ throw new HelpfulError({
210
+ kind: "input_error",
211
+ message: `Linear GraphQL errors for ${url.toString()}: ${detail}`,
212
+ hint: "Verify the URL is correct and the API key has visibility into the workspace.",
213
+ });
214
+ }
215
+ if (!json.data) {
216
+ throw new HelpfulError({
217
+ kind: "internal_error",
218
+ message: `Linear GraphQL returned no data for ${url.toString()}.`,
219
+ hint: "Re-run with `--verbose` and report the response shape.",
220
+ });
221
+ }
222
+ return json.data;
223
+ }
224
+
225
+ function renderIssue(issue: LinearIssue): string {
226
+ const lines: string[] = [];
227
+ lines.push(`# ${issue.identifier}: ${issue.title}`);
228
+ lines.push("");
229
+ lines.push(`- URL: ${issue.url}`);
230
+ if (issue.state) lines.push(`- Status: ${issue.state.name}`);
231
+ if (issue.priorityLabel) lines.push(`- Priority: ${issue.priorityLabel}`);
232
+ if (issue.assignee) lines.push(`- Assignee: ${userLabel(issue.assignee)}`);
233
+ if (issue.creator) lines.push(`- Author: ${userLabel(issue.creator)}`);
234
+ lines.push(`- Created: ${issue.createdAt}`);
235
+ lines.push(`- Updated: ${issue.updatedAt}`);
236
+ lines.push("");
237
+ if (issue.description) {
238
+ lines.push("## Description");
239
+ lines.push("");
240
+ lines.push(issue.description.trim());
241
+ lines.push("");
242
+ }
243
+ const comments = issue.comments.nodes;
244
+ if (comments.length > 0) {
245
+ lines.push(`## Comments (${comments.length})`);
246
+ lines.push("");
247
+ for (const c of comments) {
248
+ const who = c.user ? userLabel(c.user) : "(unknown)";
249
+ lines.push(`### ${who} — ${c.createdAt ?? ""}`);
250
+ lines.push("");
251
+ lines.push((c.body ?? "").trim());
252
+ lines.push("");
253
+ }
254
+ }
255
+ return lines.join("\n").trim();
256
+ }
257
+
258
+ function renderProject(project: LinearProject): string {
259
+ const lines: string[] = [];
260
+ lines.push(`# ${project.name}`);
261
+ lines.push("");
262
+ lines.push(`- URL: ${project.url}`);
263
+ if (project.state) lines.push(`- State: ${project.state}`);
264
+ if (project.startDate) lines.push(`- Start: ${project.startDate}`);
265
+ if (project.targetDate) lines.push(`- Target: ${project.targetDate}`);
266
+ if (project.lead) lines.push(`- Lead: ${userLabel(project.lead)}`);
267
+ const members = project.members.nodes;
268
+ if (members.length > 0) lines.push(`- Members: ${members.map(userLabel).join(", ")}`);
269
+ lines.push(`- Created: ${project.createdAt}`);
270
+ lines.push(`- Updated: ${project.updatedAt}`);
271
+ lines.push("");
272
+ if (project.description) {
273
+ lines.push("## Summary");
274
+ lines.push("");
275
+ lines.push(project.description.trim());
276
+ lines.push("");
277
+ }
278
+ if (project.content) {
279
+ lines.push("## Overview");
280
+ lines.push("");
281
+ lines.push(project.content.trim());
282
+ lines.push("");
283
+ }
284
+ return lines.join("\n").trim();
285
+ }
286
+
287
+ function userLabel(user: LinearUser): string {
288
+ const name = user.displayName ?? user.name ?? "(unknown)";
289
+ if (user.email) return `${name} <${user.email}>`;
290
+ return name;
291
+ }