jobjourney-claude-plugin 3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. package/.claude-plugin/plugin.json +11 -0
  2. package/.mcp.json +11 -0
  3. package/LICENSE +21 -0
  4. package/README.md +333 -0
  5. package/dist/agent/heartbeat.d.ts +11 -0
  6. package/dist/agent/heartbeat.js +31 -0
  7. package/dist/agent/index.d.ts +1 -0
  8. package/dist/agent/index.js +43 -0
  9. package/dist/agent/process.d.ts +1 -0
  10. package/dist/agent/process.js +21 -0
  11. package/dist/agent/scheduler.d.ts +20 -0
  12. package/dist/agent/scheduler.js +106 -0
  13. package/dist/api.d.ts +2 -0
  14. package/dist/api.js +18 -0
  15. package/dist/config/paths.d.ts +6 -0
  16. package/dist/config/paths.js +10 -0
  17. package/dist/constants.d.ts +11 -0
  18. package/dist/constants.js +27 -0
  19. package/dist/discovery/analysis/description-analysis.d.ts +7 -0
  20. package/dist/discovery/analysis/description-analysis.js +306 -0
  21. package/dist/discovery/analysis/enrichment.d.ts +6 -0
  22. package/dist/discovery/analysis/enrichment.js +205 -0
  23. package/dist/discovery/analysis/pr-detection.d.ts +2 -0
  24. package/dist/discovery/analysis/pr-detection.js +152 -0
  25. package/dist/discovery/analysis/types.d.ts +41 -0
  26. package/dist/discovery/analysis/types.js +1 -0
  27. package/dist/discovery/ats/detector.d.ts +22 -0
  28. package/dist/discovery/ats/detector.js +114 -0
  29. package/dist/discovery/ats/greenhouse.d.ts +29 -0
  30. package/dist/discovery/ats/greenhouse.js +80 -0
  31. package/dist/discovery/ats/lever.d.ts +32 -0
  32. package/dist/discovery/ats/lever.js +128 -0
  33. package/dist/discovery/ats/registry.d.ts +9 -0
  34. package/dist/discovery/ats/registry.js +15 -0
  35. package/dist/discovery/core/normalize.d.ts +2 -0
  36. package/dist/discovery/core/normalize.js +9 -0
  37. package/dist/discovery/core/run-discovery.d.ts +18 -0
  38. package/dist/discovery/core/run-discovery.js +253 -0
  39. package/dist/discovery/core/types.d.ts +67 -0
  40. package/dist/discovery/core/types.js +36 -0
  41. package/dist/discovery/fallback/company-site.d.ts +59 -0
  42. package/dist/discovery/fallback/company-site.js +273 -0
  43. package/dist/discovery/parity/cases.d.ts +2 -0
  44. package/dist/discovery/parity/cases.js +89 -0
  45. package/dist/discovery/parity/cli.d.ts +1 -0
  46. package/dist/discovery/parity/cli.js +12 -0
  47. package/dist/discovery/parity/live-smoke-cli.d.ts +1 -0
  48. package/dist/discovery/parity/live-smoke-cli.js +9 -0
  49. package/dist/discovery/parity/live-smoke.d.ts +60 -0
  50. package/dist/discovery/parity/live-smoke.js +293 -0
  51. package/dist/discovery/parity/python-reference.d.ts +2 -0
  52. package/dist/discovery/parity/python-reference.js +54 -0
  53. package/dist/discovery/parity/run-parity.d.ts +3 -0
  54. package/dist/discovery/parity/run-parity.js +106 -0
  55. package/dist/discovery/parity/ts-reference.d.ts +2 -0
  56. package/dist/discovery/parity/ts-reference.js +25 -0
  57. package/dist/discovery/parity/types.d.ts +55 -0
  58. package/dist/discovery/parity/types.js +1 -0
  59. package/dist/discovery/sources/base.d.ts +11 -0
  60. package/dist/discovery/sources/base.js +1 -0
  61. package/dist/discovery/sources/indeed-browser.d.ts +6 -0
  62. package/dist/discovery/sources/indeed-browser.js +6 -0
  63. package/dist/discovery/sources/jora-browser.d.ts +6 -0
  64. package/dist/discovery/sources/jora-browser.js +6 -0
  65. package/dist/discovery/sources/linkedin-guest.d.ts +34 -0
  66. package/dist/discovery/sources/linkedin-guest.js +412 -0
  67. package/dist/discovery/sources/registry.d.ts +13 -0
  68. package/dist/discovery/sources/registry.js +21 -0
  69. package/dist/discovery/sources/seek-browser.d.ts +7 -0
  70. package/dist/discovery/sources/seek-browser.js +35 -0
  71. package/dist/discovery/storage/discovery-jobs-repo.d.ts +13 -0
  72. package/dist/discovery/storage/discovery-jobs-repo.js +51 -0
  73. package/dist/discovery/utils/http.d.ts +38 -0
  74. package/dist/discovery/utils/http.js +84 -0
  75. package/dist/discovery/utils/rate-limit.d.ts +27 -0
  76. package/dist/discovery/utils/rate-limit.js +44 -0
  77. package/dist/index.d.ts +2 -0
  78. package/dist/index.js +65 -0
  79. package/dist/scraper/core/browser.d.ts +6 -0
  80. package/dist/scraper/core/browser.js +79 -0
  81. package/dist/scraper/core/markdown.d.ts +2 -0
  82. package/dist/scraper/core/markdown.js +35 -0
  83. package/dist/scraper/core/run-scrape.d.ts +15 -0
  84. package/dist/scraper/core/run-scrape.js +106 -0
  85. package/dist/scraper/core/types.d.ts +31 -0
  86. package/dist/scraper/core/types.js +1 -0
  87. package/dist/scraper/sources/linkedin.d.ts +6 -0
  88. package/dist/scraper/sources/linkedin.js +376 -0
  89. package/dist/scraper/sources/seek.d.ts +6 -0
  90. package/dist/scraper/sources/seek.js +266 -0
  91. package/dist/storage/sqlite/db.d.ts +2 -0
  92. package/dist/storage/sqlite/db.js +11 -0
  93. package/dist/storage/sqlite/jobs-repo.d.ts +94 -0
  94. package/dist/storage/sqlite/jobs-repo.js +136 -0
  95. package/dist/storage/sqlite/migrations.d.ts +2 -0
  96. package/dist/storage/sqlite/migrations.js +122 -0
  97. package/dist/storage/sqlite/schedules-repo.d.ts +33 -0
  98. package/dist/storage/sqlite/schedules-repo.js +64 -0
  99. package/dist/storage/sqlite/scrape-runs-repo.d.ts +35 -0
  100. package/dist/storage/sqlite/scrape-runs-repo.js +67 -0
  101. package/dist/tools/ai.d.ts +3 -0
  102. package/dist/tools/ai.js +189 -0
  103. package/dist/tools/analytics.d.ts +3 -0
  104. package/dist/tools/analytics.js +36 -0
  105. package/dist/tools/chatbot.d.ts +3 -0
  106. package/dist/tools/chatbot.js +29 -0
  107. package/dist/tools/coffee-chat.d.ts +3 -0
  108. package/dist/tools/coffee-chat.js +231 -0
  109. package/dist/tools/comments.d.ts +3 -0
  110. package/dist/tools/comments.js +96 -0
  111. package/dist/tools/cv.d.ts +3 -0
  112. package/dist/tools/cv.js +63 -0
  113. package/dist/tools/dashboard.d.ts +3 -0
  114. package/dist/tools/dashboard.js +31 -0
  115. package/dist/tools/documents.d.ts +3 -0
  116. package/dist/tools/documents.js +97 -0
  117. package/dist/tools/jobs.d.ts +3 -0
  118. package/dist/tools/jobs.js +276 -0
  119. package/dist/tools/local-scraping.d.ts +32 -0
  120. package/dist/tools/local-scraping.js +454 -0
  121. package/dist/tools/notifications.d.ts +3 -0
  122. package/dist/tools/notifications.js +72 -0
  123. package/dist/tools/profile.d.ts +3 -0
  124. package/dist/tools/profile.js +246 -0
  125. package/dist/tools/scraping.d.ts +3 -0
  126. package/dist/tools/scraping.js +35 -0
  127. package/dist/tools/subscription.d.ts +3 -0
  128. package/dist/tools/subscription.js +76 -0
  129. package/dist/types.d.ts +3 -0
  130. package/dist/types.js +1 -0
  131. package/dist/version.d.ts +2 -0
  132. package/dist/version.js +2 -0
  133. package/package.json +61 -0
@@ -0,0 +1,11 @@
1
+ {
2
+ "name": "jobjourney",
3
+ "description": "Track job applications, get AI resume evaluations, generate cover letters, prep for interviews, and manage coffee chat networking — all through Claude Code.",
4
+ "version": "3.1.0",
5
+ "author": "JobJourney",
6
+ "homepage": "https://jobjourney.me",
7
+ "repository": "https://github.com/Rorogogogo/jobjourney-claude-plugin",
8
+ "mcp": {
9
+ "config_path": "../.mcp.json"
10
+ }
11
+ }
package/.mcp.json ADDED
@@ -0,0 +1,11 @@
1
+ {
2
+ "mcpServers": {
3
+ "jobjourney": {
4
+ "type": "http",
5
+ "url": "https://server.jobjourney.me/mcp",
6
+ "headers": {
7
+ "X-API-Key": "${JOBJOURNEY_API_KEY}"
8
+ }
9
+ }
10
+ }
11
+ }
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 JobJourney
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,333 @@
1
+ # 🚀 JobJourney Claude Plugin
2
+
3
+ > A production-ready MCP server for JobJourney with AI job-search tools, local job discovery, and scheduled scraping from Claude.
4
+
5
+ [![npm version](https://img.shields.io/npm/v/jobjourney-claude-plugin)](https://www.npmjs.com/package/jobjourney-claude-plugin)
6
+ ![License](https://img.shields.io/badge/license-MIT-blue.svg)
7
+ ![Node](https://img.shields.io/badge/node-%3E%3D18-brightgreen.svg)
8
+ ![Protocol](https://img.shields.io/badge/protocol-MCP-7c3aed)
9
+ [![GitHub stars](https://img.shields.io/github/stars/Rorogogogo/jobjourney-claude-plugin?style=social)](https://github.com/Rorogogogo/jobjourney-claude-plugin)
10
+
11
+ ---
12
+
13
+ ## ✨ What It Does
14
+
15
+ - 🤖 **AI job-search workflows** for resume fit scoring, cover letters, CV generation, interview prep, and career chat
16
+ - 🗂️ **Application tracking** with saved jobs, notes, status changes, starring, search, and dashboard analytics
17
+ - 🔍 **Local job discovery** with a canonical discovery engine that stores results in local SQLite
18
+ - 🌐 **Mixed scraping strategy**: LinkedIn uses direct HTTP guest scraping, while blocked sites like SEEK use Playwright
19
+ - 🏢 **ATS expansion** for supported providers like Greenhouse and Lever after discovery
20
+ - ⏰ **Scheduled discovery** through the background agent and MCP tools
21
+ - 💾 **Local storage** for jobs, runs, schedules, and discovery reports in `~/.jobjourney/jobs.db`
22
+
23
+ ---
24
+
25
+ ## 📸 Demo
26
+
27
+ Use it naturally from Claude:
28
+
29
+ > "Use `discover_jobs` with keyword `full stack`, location `Sydney`, sources `linkedin` and `seek`, pages `1`."
30
+
31
+ > "Use `search_jobs` and show me the latest LinkedIn roles in Sydney."
32
+
33
+ > "Use `schedule_discovery` to run every day at 9am for backend jobs in Melbourne."
34
+
35
+ > "Evaluate how well my resume matches this job and draft a cover letter."
36
+
37
+ If you want product screenshots or GIFs later, this is the right place to add them.
38
+
39
+ ---
40
+
41
+ ## 🧭 Remote Vs Local
42
+
43
+ This project has two distinct usage modes.
44
+
45
+ | Mode | Best for | Includes |
46
+ |---|---|---|
47
+ | **Remote MCP** | Fastest setup, cloud-backed JobJourney tools | Job tracking, AI tools, documents, networking, profile, analytics |
48
+ | **Local plugin / stdio** | Full local scraping and scheduled discovery | Everything above, plus `discover_jobs`, `search_jobs`, `schedule_discovery`, `login_jobsite`, local SQLite |
49
+
50
+ Important:
51
+
52
+ - **Local scraping requires the local plugin**, not just the hosted MCP endpoint.
53
+ - **LinkedIn discovery** uses direct HTTP guest scraping.
54
+ - **SEEK discovery** uses Playwright and local browser session support.
55
+
56
+ ---
57
+
58
+ ## 📦 Installation
59
+
60
+ ### Option A: Remote MCP
61
+
62
+ Use this if you want the hosted JobJourney tools with the least setup.
63
+
64
+ ```bash
65
+ claude mcp add jobjourney -t url -h "X-API-Key: jj_your_api_key_here" https://server.jobjourney.me/mcp
66
+ ```
67
+
68
+ Or add it manually to `~/.claude.json`:
69
+
70
+ ```json
71
+ {
72
+ "mcpServers": {
73
+ "jobjourney": {
74
+ "type": "url",
75
+ "url": "https://server.jobjourney.me/mcp",
76
+ "headers": {
77
+ "X-API-Key": "jj_your_api_key_here"
78
+ }
79
+ }
80
+ }
81
+ }
82
+ ```
83
+
84
+ ### Option B: Local stdio plugin
85
+
86
+ Use this if you want local discovery, Playwright-backed scraping, scheduling, and SQLite storage.
87
+
88
+ ```bash
89
+ claude mcp add jobjourney \
90
+ -e JOBJOURNEY_API_URL=https://server.jobjourney.me \
91
+ -e JOBJOURNEY_API_KEY=jj_your_api_key_here \
92
+ -e TRANSPORT=stdio \
93
+ -- npx -y jobjourney-claude-plugin
94
+ ```
95
+
96
+ If you prefer Claude Desktop config:
97
+
98
+ ```json
99
+ {
100
+ "mcpServers": {
101
+ "jobjourney": {
102
+ "command": "npx",
103
+ "args": ["-y", "jobjourney-claude-plugin"],
104
+ "env": {
105
+ "JOBJOURNEY_API_URL": "https://server.jobjourney.me",
106
+ "JOBJOURNEY_API_KEY": "jj_your_api_key_here",
107
+ "TRANSPORT": "stdio"
108
+ }
109
+ }
110
+ }
111
+ }
112
+ ```
113
+
114
+ ### Playwright prerequisite
115
+
116
+ For local browser-backed sources like SEEK, install a browser once:
117
+
118
+ ```bash
119
+ npx playwright install chromium
120
+ ```
121
+
122
+ ---
123
+
124
+ ## 🚀 Quick Start
125
+
126
+ ### 1. Connect the plugin
127
+
128
+ ```bash
129
+ claude mcp add jobjourney \
130
+ -e JOBJOURNEY_API_URL=https://server.jobjourney.me \
131
+ -e JOBJOURNEY_API_KEY=jj_your_api_key_here \
132
+ -e TRANSPORT=stdio \
133
+ -- npx -y jobjourney-claude-plugin
134
+ ```
135
+
136
+ ### 2. Log in to browser-backed sites when needed
137
+
138
+ From Claude:
139
+
140
+ ```text
141
+ Use login_jobsite with site "seek"
142
+ ```
143
+
144
+ ### 3. Run discovery
145
+
146
+ From Claude:
147
+
148
+ ```text
149
+ Use discover_jobs with keyword "full stack", location "Sydney", sources ["linkedin", "seek"], pages 1
150
+ ```
151
+
152
+ ### 4. Query the stored results
153
+
154
+ ```text
155
+ Use search_jobs with source "linkedin" and limit 5
156
+ ```
157
+
158
+ ### 5. Schedule it
159
+
160
+ ```text
161
+ Use schedule_discovery with keyword "full stack", location "Sydney", time "09:00", sources ["linkedin", "seek"]
162
+ ```
163
+
164
+ ---
165
+
166
+ ## 🔍 Source Support
167
+
168
+ | Source | Status | Transport | Notes |
169
+ |---|---|---|---|
170
+ | `linkedin` | Active | HTTP guest scraping | Primary supported LinkedIn path |
171
+ | `seek` | Active | Playwright | Local browser session support |
172
+ | `indeed` | Planned | Playwright | Not implemented yet |
173
+ | `jora` | Planned | Playwright | Not implemented yet |
174
+
175
+ | ATS | Support |
176
+ |---|---|
177
+ | `greenhouse` | Detect + expand |
178
+ | `lever` | Detect + expand |
179
+ | `workday` | Detect only |
180
+ | `smartrecruiters` | Detect only |
181
+ | `ashby` | Detect only |
182
+
183
+ ---
184
+
185
+ ## 🧠 How Local Discovery Works
186
+
187
+ The local discovery engine lives under `src/discovery` and uses one canonical job model across all sources.
188
+
189
+ ### LinkedIn
190
+
191
+ 1. Fetch guest search results
192
+ 2. Fetch guest job detail HTML for each posting
193
+ 3. Extract description, metadata, and external apply URL
194
+ 4. Detect ATS from the external URL
195
+ 5. Expand supported ATS companies
196
+
197
+ ### SEEK
198
+
199
+ 1. Launch Playwright
200
+ 2. Use the browser-backed source flow
201
+ 3. Normalize results into the same canonical job schema
202
+
203
+ ### Storage
204
+
205
+ Local runs are stored in:
206
+
207
+ - jobs DB: `~/.jobjourney/jobs.db`
208
+ - agent heartbeat: `~/.jobjourney/agent-heartbeat.json`
209
+
210
+ The database stores:
211
+
212
+ - discovered jobs
213
+ - scrape/discovery runs
214
+ - schedules
215
+
216
+ ---
217
+
218
+ ## 🛠 Key Tools
219
+
220
+ This MCP exposes a broad JobJourney toolset. For local discovery, these are the most important ones:
221
+
222
+ | Tool | What it does |
223
+ |---|---|
224
+ | `discover_jobs` | Run the canonical multi-source discovery engine and store results locally |
225
+ | `search_jobs` | Query jobs already stored in local SQLite |
226
+ | `schedule_discovery` | Schedule recurring local discovery runs |
227
+ | `get_latest_discovery_report` | Show the latest discovery batch summary |
228
+ | `scrape_jobs` | Legacy one-off local scrape path |
229
+ | `login_jobsite` | Save browser login state for supported sites |
230
+ | `check_login_status` | Confirm browser login state |
231
+
232
+ And the broader platform also includes:
233
+
234
+ - job tracking
235
+ - AI fit evaluation
236
+ - cover letter and CV generation
237
+ - mock interviews
238
+ - dashboard analytics
239
+ - coffee chat networking
240
+ - profile and document management
241
+
242
+ ---
243
+
244
+ ## 🏗 Architecture
245
+
246
+ ```text
247
+ src/
248
+ index.ts # FastMCP server entrypoint
249
+ tools/ # MCP tool registration
250
+ discovery/ # Canonical local discovery engine
251
+ core/ # orchestration and job types
252
+ sources/ # linkedin guest, seek browser, planned sources
253
+ ats/ # ATS detection and supported crawlers
254
+ analysis/ # salary, tech stack, PR, experience enrichment
255
+ fallback/ # optional company career-page probing
256
+ storage/ # discovery persistence adapters
257
+ parity/ # TS vs Python parity harness
258
+ scraper/ # legacy browser scraper layer, being phased down
259
+ storage/sqlite/ # SQLite repos and migrations
260
+ agent/ # background scheduling agent
261
+ config/ # path and runtime config
262
+ ```
263
+
264
+ Built with FastMCP, TypeScript, Zod, Playwright, and SQLite.
265
+
266
+ ---
267
+
268
+ ## ⚙️ Environment Variables
269
+
270
+ | Variable | Description | Default |
271
+ |---|---|---|
272
+ | `JOBJOURNEY_API_URL` | JobJourney backend base URL | `https://server.jobjourney.me` |
273
+ | `JOBJOURNEY_API_KEY` | API key for backend-authenticated features | - |
274
+ | `TRANSPORT` | MCP transport: `stdio` or `httpStream` | `stdio` |
275
+ | `PORT` | HTTP port when `TRANSPORT=httpStream` | `8080` |
276
+
277
+ ---
278
+
279
+ ## 🧪 Development
280
+
281
+ ```bash
282
+ git clone https://github.com/Rorogogogo/jobjourney-claude-plugin.git
283
+ cd jobjourney-claude-plugin
284
+ npm install
285
+ npx playwright install chromium
286
+ npm run build
287
+ npm test
288
+ npm run typecheck
289
+ ```
290
+
291
+ Useful local commands:
292
+
293
+ ```bash
294
+ npm run start
295
+ npm run agent
296
+ npm run parity:discovery
297
+ npm run parity:live-smoke
298
+ ```
299
+
300
+ ---
301
+
302
+ ## 🤝 Contributing
303
+
304
+ Contributions are welcome. Useful contribution areas right now:
305
+
306
+ - tightening the canonical `src/discovery` architecture
307
+ - implementing `indeed` and `jora`
308
+ - improving live parity coverage
309
+ - reducing remaining legacy surface in `src/scraper`
310
+
311
+ Standard flow:
312
+
313
+ ```bash
314
+ git checkout -b feature/my-change
315
+ npm test
316
+ npm run typecheck
317
+ git commit -m "feat: my change"
318
+ ```
319
+
320
+ ---
321
+
322
+ ## 🔗 Links
323
+
324
+ - [Website](https://jobjourney.me)
325
+ - [npm package](https://www.npmjs.com/package/jobjourney-claude-plugin)
326
+ - [GitHub repository](https://github.com/Rorogogogo/jobjourney-claude-plugin)
327
+ - [Issues](https://github.com/Rorogogogo/jobjourney-claude-plugin/issues)
328
+
329
+ ---
330
+
331
+ ## 📄 License
332
+
333
+ [MIT](LICENSE) © JobJourney
@@ -0,0 +1,11 @@
1
+ export interface HeartbeatData {
2
+ pid: number;
3
+ updatedAt: string;
4
+ }
5
+ export declare function writeHeartbeat(homeDir?: string): void;
6
+ export declare function readHeartbeat(homeDir?: string): HeartbeatData | null;
7
+ export declare function isAgentHealthy(options?: {
8
+ homeDir?: string;
9
+ maxAgeMs?: number;
10
+ now?: string;
11
+ }): boolean;
@@ -0,0 +1,31 @@
1
+ import { readFileSync, writeFileSync, mkdirSync } from "node:fs";
2
+ import path from "node:path";
3
+ import { getJobJourneyPaths } from "../config/paths.js";
4
+ export function writeHeartbeat(homeDir) {
5
+ const paths = getJobJourneyPaths(homeDir);
6
+ mkdirSync(path.dirname(paths.heartbeatPath), { recursive: true });
7
+ const data = {
8
+ pid: process.pid,
9
+ updatedAt: new Date().toISOString(),
10
+ };
11
+ writeFileSync(paths.heartbeatPath, JSON.stringify(data, null, 2));
12
+ }
13
+ export function readHeartbeat(homeDir) {
14
+ const paths = getJobJourneyPaths(homeDir);
15
+ try {
16
+ const raw = readFileSync(paths.heartbeatPath, "utf-8");
17
+ return JSON.parse(raw);
18
+ }
19
+ catch {
20
+ return null;
21
+ }
22
+ }
23
+ export function isAgentHealthy(options) {
24
+ const maxAge = options?.maxAgeMs ?? 60_000;
25
+ const now = options?.now ? new Date(options.now).getTime() : Date.now();
26
+ const heartbeat = readHeartbeat(options?.homeDir);
27
+ if (!heartbeat)
28
+ return false;
29
+ const updatedAt = new Date(heartbeat.updatedAt).getTime();
30
+ return now - updatedAt < maxAge;
31
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,43 @@
1
+ import { AgentScheduler } from "./scheduler.js";
2
+ import { writeHeartbeat } from "./heartbeat.js";
3
+ const RECONCILE_INTERVAL_MS = 30_000;
4
+ const HEARTBEAT_INTERVAL_MS = 15_000;
5
+ async function main() {
6
+ console.log("[agent] starting jobjourney-agent");
7
+ const homeDir = process.env.JOBJOURNEY_HOME;
8
+ const scheduler = new AgentScheduler();
9
+ // Initial reconciliation
10
+ scheduler.reconcile();
11
+ writeHeartbeat(homeDir);
12
+ // Periodic reconciliation
13
+ setInterval(() => {
14
+ try {
15
+ scheduler.reconcile();
16
+ }
17
+ catch (error) {
18
+ console.error("[agent] reconciliation error:", error);
19
+ }
20
+ }, RECONCILE_INTERVAL_MS);
21
+ // Periodic heartbeat
22
+ setInterval(() => {
23
+ try {
24
+ writeHeartbeat(homeDir);
25
+ }
26
+ catch (error) {
27
+ console.error("[agent] heartbeat error:", error);
28
+ }
29
+ }, HEARTBEAT_INTERVAL_MS);
30
+ // Handle graceful shutdown
31
+ const shutdown = () => {
32
+ console.log("[agent] shutting down");
33
+ scheduler.stop();
34
+ process.exit(0);
35
+ };
36
+ process.on("SIGINT", shutdown);
37
+ process.on("SIGTERM", shutdown);
38
+ console.log("[agent] running, reconcile every", RECONCILE_INTERVAL_MS / 1000, "s");
39
+ }
40
+ main().catch((error) => {
41
+ console.error("[agent] fatal:", error);
42
+ process.exit(1);
43
+ });
@@ -0,0 +1 @@
1
+ export declare function ensureAgentRunning(homeDir?: string): boolean;
@@ -0,0 +1,21 @@
1
+ import { spawn } from "node:child_process";
2
+ import path from "node:path";
3
+ import { fileURLToPath } from "node:url";
4
+ import { isAgentHealthy } from "./heartbeat.js";
5
+ const __dirname = path.dirname(fileURLToPath(import.meta.url));
6
+ export function ensureAgentRunning(homeDir) {
7
+ if (isAgentHealthy({ homeDir })) {
8
+ return false; // already running
9
+ }
10
+ const agentEntry = path.resolve(__dirname, "index.js");
11
+ const child = spawn(process.execPath, [agentEntry], {
12
+ detached: true,
13
+ stdio: "ignore",
14
+ env: {
15
+ ...process.env,
16
+ ...(homeDir ? { JOBJOURNEY_HOME: homeDir } : {}),
17
+ },
18
+ });
19
+ child.unref();
20
+ return true; // spawned
21
+ }
@@ -0,0 +1,20 @@
1
+ import { runScrape } from "../scraper/core/run-scrape.js";
2
+ import { runDiscovery } from "../discovery/core/run-discovery.js";
3
+ interface AgentSchedulerDeps {
4
+ runScrape?: typeof runScrape;
5
+ runDiscovery?: typeof runDiscovery;
6
+ }
7
+ export declare class AgentScheduler {
8
+ private tasks;
9
+ private dbPath?;
10
+ private readonly runScrapeImpl;
11
+ private readonly runDiscoveryImpl;
12
+ constructor(dbPath?: string, deps?: AgentSchedulerDeps);
13
+ private readonly discoveryLogger;
14
+ reconcile(): void;
15
+ private runScheduledJob;
16
+ runScheduledJobForTest(id: number, keyword: string, location: string, source: string, runMode: string, sources: string | null): Promise<void>;
17
+ stop(): void;
18
+ get activeCount(): number;
19
+ }
20
+ export {};
@@ -0,0 +1,106 @@
1
+ import cron from "node-cron";
2
+ import { openDatabase } from "../storage/sqlite/db.js";
3
+ import { SchedulesRepo } from "../storage/sqlite/schedules-repo.js";
4
+ import { runScrape } from "../scraper/core/run-scrape.js";
5
+ import { runDiscovery } from "../discovery/core/run-discovery.js";
6
+ import { DiscoveryJobsRepo } from "../discovery/storage/discovery-jobs-repo.js";
7
+ import { ScrapeRunsRepo } from "../storage/sqlite/scrape-runs-repo.js";
8
+ export class AgentScheduler {
9
+ tasks = new Map();
10
+ dbPath;
11
+ runScrapeImpl;
12
+ runDiscoveryImpl;
13
+ constructor(dbPath, deps = {}) {
14
+ this.dbPath = dbPath;
15
+ this.runScrapeImpl = deps.runScrape ?? runScrape;
16
+ this.runDiscoveryImpl = deps.runDiscovery ?? runDiscovery;
17
+ }
18
+ discoveryLogger = (payload) => {
19
+ console.log(`[agent:discover] ${JSON.stringify(payload)}`);
20
+ };
21
+ reconcile() {
22
+ const db = openDatabase(this.dbPath);
23
+ try {
24
+ const repo = new SchedulesRepo(db);
25
+ const schedules = repo.list(true);
26
+ // Remove tasks for schedules that no longer exist or are disabled
27
+ const activeIds = new Set(schedules.map((s) => s.id));
28
+ for (const [id, entry] of this.tasks) {
29
+ if (!activeIds.has(id)) {
30
+ entry.task.stop();
31
+ this.tasks.delete(id);
32
+ }
33
+ }
34
+ // Add new schedules
35
+ for (const schedule of schedules) {
36
+ const s = schedule;
37
+ if (!this.tasks.has(s.id) && cron.validate(s.cron)) {
38
+ const task = cron.schedule(s.cron, () => {
39
+ void this.runScheduledJob(s.id, s.keyword, s.location, s.source, s.run_mode, s.sources);
40
+ });
41
+ this.tasks.set(s.id, { scheduleId: s.id, task });
42
+ }
43
+ }
44
+ }
45
+ finally {
46
+ db.close();
47
+ }
48
+ }
49
+ async runScheduledJob(id, keyword, location, source, runMode, sources) {
50
+ const db = openDatabase(this.dbPath);
51
+ const runsRepo = new ScrapeRunsRepo(db);
52
+ const run = runsRepo.createRun({
53
+ scheduleId: id,
54
+ keyword,
55
+ location,
56
+ source,
57
+ runMode: runMode === "discover" ? "discover" : "scrape",
58
+ sources: sources ?? undefined,
59
+ });
60
+ try {
61
+ if (runMode === "discover") {
62
+ const result = await this.runDiscoveryImpl({
63
+ keyword,
64
+ location,
65
+ sources: sources ? sources.split(",").map((value) => value.trim()).filter(Boolean) : undefined,
66
+ pages: 30,
67
+ }, {
68
+ logger: this.discoveryLogger,
69
+ });
70
+ new DiscoveryJobsRepo(db).upsertJobs(result.jobs, {
71
+ keyword,
72
+ location,
73
+ runId: run.id,
74
+ });
75
+ runsRepo.finishRun(run.id, { status: "success", jobCount: result.jobs.length });
76
+ }
77
+ else {
78
+ await this.runScrapeImpl({ keyword, location, source, dbPath: this.dbPath });
79
+ runsRepo.finishRun(run.id, { status: "success" });
80
+ }
81
+ new SchedulesRepo(db).updateLastRunAt(id);
82
+ }
83
+ catch (error) {
84
+ runsRepo.finishRun(run.id, {
85
+ status: "error",
86
+ error: error instanceof Error ? error.message : String(error),
87
+ });
88
+ console.error(`[agent] scrape failed for schedule ${id}:`, error);
89
+ }
90
+ finally {
91
+ db.close();
92
+ }
93
+ }
94
+ async runScheduledJobForTest(id, keyword, location, source, runMode, sources) {
95
+ return this.runScheduledJob(id, keyword, location, source, runMode, sources);
96
+ }
97
+ stop() {
98
+ for (const [, entry] of this.tasks) {
99
+ entry.task.stop();
100
+ }
101
+ this.tasks.clear();
102
+ }
103
+ get activeCount() {
104
+ return this.tasks.size;
105
+ }
106
+ }
package/dist/api.d.ts ADDED
@@ -0,0 +1,2 @@
1
+ export declare const API_BASE_URL: string;
2
+ export declare function apiCall(endpoint: string, options?: RequestInit, apiKey?: string): Promise<unknown>;
package/dist/api.js ADDED
@@ -0,0 +1,18 @@
1
+ export const API_BASE_URL = process.env.JOBJOURNEY_API_URL || "http://localhost:5014";
2
+ export async function apiCall(endpoint, options = {}, apiKey) {
3
+ const url = `${API_BASE_URL}${endpoint}`;
4
+ const headers = {
5
+ "Content-Type": "application/json",
6
+ ...(apiKey && { "X-API-Key": apiKey }),
7
+ ...options.headers,
8
+ };
9
+ const response = await fetch(url, {
10
+ ...options,
11
+ headers,
12
+ });
13
+ if (!response.ok) {
14
+ const errorText = await response.text();
15
+ throw new Error(`API error ${response.status}: ${errorText}`);
16
+ }
17
+ return response.json();
18
+ }
@@ -0,0 +1,6 @@
1
+ export interface JobJourneyPaths {
2
+ dataDir: string;
3
+ dbPath: string;
4
+ heartbeatPath: string;
5
+ }
6
+ export declare function getJobJourneyPaths(homeDir?: string): JobJourneyPaths;
@@ -0,0 +1,10 @@
1
+ import os from "node:os";
2
+ import path from "node:path";
3
+ export function getJobJourneyPaths(homeDir = os.homedir()) {
4
+ const dataDir = path.join(homeDir, ".jobjourney");
5
+ return {
6
+ dataDir,
7
+ dbPath: path.join(dataDir, "jobs.db"),
8
+ heartbeatPath: path.join(dataDir, "agent-heartbeat.json"),
9
+ };
10
+ }
@@ -0,0 +1,11 @@
1
+ export declare const JOB_STATUS: {
2
+ readonly EXPIRED: 0;
3
+ readonly SAVED: 1;
4
+ readonly APPLIED: 2;
5
+ readonly INITIAL_INTERVIEW: 3;
6
+ readonly FINAL_INTERVIEW: 4;
7
+ readonly OFFERED: 5;
8
+ readonly REJECTED: 6;
9
+ };
10
+ export declare const STATUS_TEXT: Record<number, string>;
11
+ export declare const STATUS_MAP: Record<string, number>;