calibrcv 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +196 -0
- package/bin/calibrcv.js +193 -0
- package/package.json +58 -0
- package/src/config.js +31 -0
- package/src/constants/hbs-verbs.js +15 -0
- package/src/constants/latex-template.js +114 -0
- package/src/lib/ai-router.js +125 -0
- package/src/lib/ats-scorer.js +479 -0
- package/src/lib/job-scraper.js +105 -0
- package/src/lib/latex-compiler.js +125 -0
- package/src/lib/latex-escape.js +58 -0
- package/src/lib/page-loop.js +99 -0
- package/src/lib/pdf-extractor.js +49 -0
- package/src/pipeline/orchestrator.js +197 -0
- package/src/prompts/analyze.js +48 -0
- package/src/prompts/latex.js +44 -0
- package/src/prompts/synthesize.js +75 -0
- package/src/prompts/tailor.js +33 -0
- package/src/prompts/trim.js +34 -0
- package/src/providers/gemini.js +35 -0
- package/src/providers/groq.js +34 -0
- package/src/providers/ollama.js +71 -0
- package/src/providers/openrouter.js +46 -0
- package/src/ui/interview.js +65 -0
- package/src/ui/report.js +73 -0
- package/src/ui/spinner.js +60 -0
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import { PDFDocument } from 'pdf-lib';
|
|
2
|
+
import { execSync } from 'child_process';
|
|
3
|
+
import { writeFileSync, readFileSync, existsSync, mkdirSync, unlinkSync } from 'fs';
|
|
4
|
+
import { join } from 'path';
|
|
5
|
+
import { randomUUID } from 'crypto';
|
|
6
|
+
import { validateLatex } from './latex-escape.js';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Compile LaTeX source to PDF.
|
|
10
|
+
* Tries: tectonic -> pdflatex -> latexmk+xelatex -> fallback (source only).
|
|
11
|
+
*
|
|
12
|
+
* @param {string} latexSource - Complete LaTeX document source.
|
|
13
|
+
* @returns {Promise<{ pdfBuffer: Buffer|null, pageCount: number, compilationMethod: string, latexSource: string, error?: string }>}
|
|
14
|
+
*/
|
|
15
|
+
export async function compileLaTeX(latexSource) {
|
|
16
|
+
const validation = validateLatex(latexSource);
|
|
17
|
+
if (!validation.valid) {
|
|
18
|
+
return {
|
|
19
|
+
pdfBuffer: null,
|
|
20
|
+
pageCount: 0,
|
|
21
|
+
compilationMethod: 'none',
|
|
22
|
+
latexSource,
|
|
23
|
+
error: `LaTeX validation failed: ${validation.errors.join('; ')}`,
|
|
24
|
+
};
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const jobId = randomUUID().slice(0, 8);
|
|
28
|
+
const tmpDir = join('/tmp', `calibrcv-${jobId}`);
|
|
29
|
+
const env = { ...process.env, PATH: `${process.env.PATH}:/Library/TeX/texbin:/usr/local/bin` };
|
|
30
|
+
|
|
31
|
+
try {
|
|
32
|
+
if (!existsSync(tmpDir)) {
|
|
33
|
+
mkdirSync(tmpDir, { recursive: true });
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const texPath = join(tmpDir, 'resume.tex');
|
|
37
|
+
const pdfPath = join(tmpDir, 'resume.pdf');
|
|
38
|
+
writeFileSync(texPath, latexSource, 'utf-8');
|
|
39
|
+
|
|
40
|
+
// Strategy 1: tectonic
|
|
41
|
+
try {
|
|
42
|
+
execSync(`tectonic "${texPath}" --outdir "${tmpDir}" 2>&1`, {
|
|
43
|
+
timeout: 30000, stdio: 'pipe', env,
|
|
44
|
+
});
|
|
45
|
+
if (existsSync(pdfPath)) {
|
|
46
|
+
const pdfBuffer = readFileSync(pdfPath);
|
|
47
|
+
const pageCount = await countPages(pdfBuffer);
|
|
48
|
+
cleanup(tmpDir);
|
|
49
|
+
return { pdfBuffer, pageCount, compilationMethod: 'tectonic', latexSource };
|
|
50
|
+
}
|
|
51
|
+
} catch (_) { /* try next */ }
|
|
52
|
+
|
|
53
|
+
// Strategy 2: pdflatex
|
|
54
|
+
try {
|
|
55
|
+
execSync(
|
|
56
|
+
`cd "${tmpDir}" && pdflatex -interaction=nonstopmode -halt-on-error resume.tex 2>&1`,
|
|
57
|
+
{ timeout: 30000, stdio: 'pipe', env }
|
|
58
|
+
);
|
|
59
|
+
if (existsSync(pdfPath)) {
|
|
60
|
+
const pdfBuffer = readFileSync(pdfPath);
|
|
61
|
+
const pageCount = await countPages(pdfBuffer);
|
|
62
|
+
cleanup(tmpDir);
|
|
63
|
+
return { pdfBuffer, pageCount, compilationMethod: 'pdflatex', latexSource };
|
|
64
|
+
}
|
|
65
|
+
} catch (_) { /* try next */ }
|
|
66
|
+
|
|
67
|
+
// Strategy 3: latexmk + xelatex
|
|
68
|
+
try {
|
|
69
|
+
execSync(
|
|
70
|
+
`cd "${tmpDir}" && latexmk -xelatex -interaction=nonstopmode -halt-on-error resume.tex 2>&1`,
|
|
71
|
+
{ timeout: 45000, stdio: 'pipe', env }
|
|
72
|
+
);
|
|
73
|
+
if (existsSync(pdfPath)) {
|
|
74
|
+
const pdfBuffer = readFileSync(pdfPath);
|
|
75
|
+
const pageCount = await countPages(pdfBuffer);
|
|
76
|
+
cleanup(tmpDir);
|
|
77
|
+
return { pdfBuffer, pageCount, compilationMethod: 'xelatex', latexSource };
|
|
78
|
+
}
|
|
79
|
+
} catch (_) { /* try next */ }
|
|
80
|
+
|
|
81
|
+
// Strategy 4: Fallback
|
|
82
|
+
cleanup(tmpDir);
|
|
83
|
+
return {
|
|
84
|
+
pdfBuffer: null,
|
|
85
|
+
pageCount: 0,
|
|
86
|
+
compilationMethod: 'none',
|
|
87
|
+
latexSource,
|
|
88
|
+
error: 'No LaTeX compiler found. Install tectonic (brew install tectonic) or texlive. Your .tex source has been saved.',
|
|
89
|
+
};
|
|
90
|
+
|
|
91
|
+
} catch (err) {
|
|
92
|
+
cleanup(tmpDir);
|
|
93
|
+
return {
|
|
94
|
+
pdfBuffer: null,
|
|
95
|
+
pageCount: 0,
|
|
96
|
+
compilationMethod: 'none',
|
|
97
|
+
latexSource,
|
|
98
|
+
error: `Compilation error: ${err.message}`,
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Count pages in a PDF buffer.
|
|
105
|
+
*/
|
|
106
|
+
export async function countPages(pdfBuffer) {
|
|
107
|
+
try {
|
|
108
|
+
const pdfDoc = await PDFDocument.load(pdfBuffer);
|
|
109
|
+
return pdfDoc.getPageCount();
|
|
110
|
+
} catch (_) {
|
|
111
|
+
return 1;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
function cleanup(dirPath) {
|
|
116
|
+
try {
|
|
117
|
+
const files = ['resume.tex', 'resume.pdf', 'resume.aux', 'resume.log',
|
|
118
|
+
'resume.out', 'resume.fls', 'resume.fdb_latexmk', 'resume.xdv',
|
|
119
|
+
'resume.synctex.gz'];
|
|
120
|
+
for (const f of files) {
|
|
121
|
+
const fp = join(dirPath, f);
|
|
122
|
+
if (existsSync(fp)) unlinkSync(fp);
|
|
123
|
+
}
|
|
124
|
+
} catch (_) { /* non-critical */ }
|
|
125
|
+
}
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Escape special LaTeX characters in text content.
|
|
3
|
+
* @param {string} text - Raw text to escape.
|
|
4
|
+
* @returns {string} LaTeX-safe text.
|
|
5
|
+
*/
|
|
6
|
+
export function escapeLatex(text) {
|
|
7
|
+
if (!text || typeof text !== 'string') return '';
|
|
8
|
+
|
|
9
|
+
return text
|
|
10
|
+
.replace(/\\/g, '\\textbackslash{}')
|
|
11
|
+
.replace(/&/g, '\\&')
|
|
12
|
+
.replace(/%/g, '\\%')
|
|
13
|
+
.replace(/\$/g, '\\$')
|
|
14
|
+
.replace(/#/g, '\\#')
|
|
15
|
+
.replace(/_/g, '\\_')
|
|
16
|
+
.replace(/\{/g, '\\{')
|
|
17
|
+
.replace(/\}/g, '\\}')
|
|
18
|
+
.replace(/~/g, '\\textasciitilde{}')
|
|
19
|
+
.replace(/\^/g, '\\textasciicircum{}')
|
|
20
|
+
.replace(/>/g, '$>$')
|
|
21
|
+
.replace(/</g, '$<$')
|
|
22
|
+
.replace(/—/g, ';')
|
|
23
|
+
.replace(/[\u2018\u2019]/g, "'")
|
|
24
|
+
.replace(/[\u201C\u201D]/g, '"');
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Validate that a LaTeX source string is structurally valid.
|
|
29
|
+
* @param {string} latexSource
|
|
30
|
+
* @returns {{ valid: boolean, errors: string[] }}
|
|
31
|
+
*/
|
|
32
|
+
export function validateLatex(latexSource) {
|
|
33
|
+
const errors = [];
|
|
34
|
+
|
|
35
|
+
if (!latexSource || typeof latexSource !== 'string') {
|
|
36
|
+
return { valid: false, errors: ['LaTeX source is empty or not a string'] };
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const trimmed = latexSource.trim();
|
|
40
|
+
|
|
41
|
+
if (!trimmed.startsWith('\\documentclass')) {
|
|
42
|
+
errors.push('Missing \\documentclass at start');
|
|
43
|
+
}
|
|
44
|
+
if (!trimmed.endsWith('\\end{document}')) {
|
|
45
|
+
errors.push('Missing \\end{document} at end');
|
|
46
|
+
}
|
|
47
|
+
if (!trimmed.includes('\\begin{document}')) {
|
|
48
|
+
errors.push('Missing \\begin{document}');
|
|
49
|
+
}
|
|
50
|
+
if (!trimmed.includes('\\pdfgentounicode=1')) {
|
|
51
|
+
errors.push('Missing \\pdfgentounicode=1');
|
|
52
|
+
}
|
|
53
|
+
if (/—/.test(trimmed)) {
|
|
54
|
+
errors.push('Contains em dashes which break LaTeX compilation');
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return { valid: errors.length === 0, errors };
|
|
58
|
+
}
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import { buildTrimPrompt } from '../prompts/trim.js';
|
|
2
|
+
import { validateLatex } from './latex-escape.js';
|
|
3
|
+
|
|
4
|
+
const MAX_ITERATIONS = 6;
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Agentic 1-page enforcement loop.
|
|
8
|
+
* Compiles LaTeX, checks page count, iteratively trims until it fits on 1 page.
|
|
9
|
+
*
|
|
10
|
+
* @param {string} latexCode - Initial LaTeX source.
|
|
11
|
+
* @param {string} targetSector - Target industry sector.
|
|
12
|
+
* @param {Function} callAI - The AI router function.
|
|
13
|
+
* @param {Function} compileFn - LaTeX compilation function.
|
|
14
|
+
* @param {Function} [onProgress] - Optional progress callback.
|
|
15
|
+
* @returns {Promise<{ latexCode: string, pdfBuffer: Buffer|null, pageCount: number, iterations: number, warning?: string }>}
|
|
16
|
+
*/
|
|
17
|
+
export async function enforceOnePage(latexCode, targetSector, callAI, compileFn, onProgress = null) {
|
|
18
|
+
let currentLatex = latexCode;
|
|
19
|
+
let lastResult = null;
|
|
20
|
+
|
|
21
|
+
for (let iteration = 0; iteration <= MAX_ITERATIONS; iteration++) {
|
|
22
|
+
if (onProgress) {
|
|
23
|
+
onProgress({ iteration, pageCount: null, stage: 'compiling' });
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const result = await compileFn(currentLatex);
|
|
27
|
+
lastResult = result;
|
|
28
|
+
|
|
29
|
+
if (!result.pdfBuffer) {
|
|
30
|
+
return {
|
|
31
|
+
latexCode: currentLatex,
|
|
32
|
+
pdfBuffer: null,
|
|
33
|
+
pageCount: 0,
|
|
34
|
+
iterations: iteration,
|
|
35
|
+
warning: `Compilation failed: ${result.error}`,
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
if (onProgress) {
|
|
40
|
+
onProgress({ iteration, pageCount: result.pageCount, stage: 'checking' });
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
if (result.pageCount === 1) {
|
|
44
|
+
return {
|
|
45
|
+
latexCode: currentLatex,
|
|
46
|
+
pdfBuffer: result.pdfBuffer,
|
|
47
|
+
pageCount: 1,
|
|
48
|
+
iterations: iteration,
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
if (iteration >= MAX_ITERATIONS) {
|
|
53
|
+
return {
|
|
54
|
+
latexCode: currentLatex,
|
|
55
|
+
pdfBuffer: result.pdfBuffer,
|
|
56
|
+
pageCount: result.pageCount,
|
|
57
|
+
iterations: iteration,
|
|
58
|
+
warning: `Could not fit to 1 page after ${MAX_ITERATIONS} attempts. Current: ${result.pageCount} pages`,
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
if (onProgress) {
|
|
63
|
+
onProgress({ iteration: iteration + 1, pageCount: result.pageCount, stage: 'trimming' });
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
try {
|
|
67
|
+
let trimmedLatex = await callAI(
|
|
68
|
+
buildTrimPrompt(targetSector, result.pageCount, iteration + 1),
|
|
69
|
+
`Current LaTeX:\n${currentLatex}`,
|
|
70
|
+
{ responseFormat: 'text', taskName: `trim-iteration-${iteration + 1}` }
|
|
71
|
+
);
|
|
72
|
+
|
|
73
|
+
let cleaned = trimmedLatex.trim();
|
|
74
|
+
if (cleaned.startsWith('```')) {
|
|
75
|
+
cleaned = cleaned
|
|
76
|
+
.replace(/^```(?:latex|tex)?\n?/m, '')
|
|
77
|
+
.replace(/\n?```$/m, '')
|
|
78
|
+
.trim();
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
const validation = validateLatex(cleaned);
|
|
82
|
+
if (!validation.valid) {
|
|
83
|
+
continue;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
currentLatex = cleaned;
|
|
87
|
+
} catch (_) {
|
|
88
|
+
// Continue with the current version
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
return {
|
|
93
|
+
latexCode: currentLatex,
|
|
94
|
+
pdfBuffer: lastResult?.pdfBuffer || null,
|
|
95
|
+
pageCount: lastResult?.pageCount || 0,
|
|
96
|
+
iterations: MAX_ITERATIONS,
|
|
97
|
+
warning: 'Exhausted all trim iterations',
|
|
98
|
+
};
|
|
99
|
+
}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import pdfParse from 'pdf-parse';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Extract plain text from a PDF buffer.
|
|
5
|
+
* @param {Buffer} pdfBuffer - Raw PDF file bytes.
|
|
6
|
+
* @returns {Promise<{ text: string, pages: number, info: object }>}
|
|
7
|
+
*/
|
|
8
|
+
export async function extractTextFromPDF(pdfBuffer) {
|
|
9
|
+
if (!pdfBuffer || pdfBuffer.length === 0) {
|
|
10
|
+
throw new PDFExtractionError('Empty PDF buffer provided');
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
try {
|
|
14
|
+
const data = await pdfParse(pdfBuffer, { max: 10 });
|
|
15
|
+
const text = data.text?.trim() || '';
|
|
16
|
+
|
|
17
|
+
if (text.length < 50) {
|
|
18
|
+
throw new PDFExtractionError(
|
|
19
|
+
'PDF appears to contain very little text. It may be image-based or scanned. ' +
|
|
20
|
+
'Please use a text-based PDF for best results.'
|
|
21
|
+
);
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
return {
|
|
25
|
+
text,
|
|
26
|
+
pages: data.numpages || 1,
|
|
27
|
+
info: {
|
|
28
|
+
title: data.info?.Title || null,
|
|
29
|
+
author: data.info?.Author || null,
|
|
30
|
+
creator: data.info?.Creator || null,
|
|
31
|
+
},
|
|
32
|
+
};
|
|
33
|
+
} catch (err) {
|
|
34
|
+
if (err instanceof PDFExtractionError) throw err;
|
|
35
|
+
|
|
36
|
+
if (err.message?.includes('encrypted') || err.message?.includes('password')) {
|
|
37
|
+
throw new PDFExtractionError('This PDF is password-protected. Please use an unprotected version.');
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
throw new PDFExtractionError(`Failed to parse PDF: ${err.message}`);
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
export class PDFExtractionError extends Error {
|
|
45
|
+
constructor(msg) {
|
|
46
|
+
super(msg);
|
|
47
|
+
this.name = 'PDFExtractionError';
|
|
48
|
+
}
|
|
49
|
+
}
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
import EventEmitter from 'events';
|
|
2
|
+
import { callAI, parseJSONSafely } from '../lib/ai-router.js';
|
|
3
|
+
import { extractTextFromPDF } from '../lib/pdf-extractor.js';
|
|
4
|
+
import { buildAnalyzePrompt } from '../prompts/analyze.js';
|
|
5
|
+
import { buildSynthesizePrompt } from '../prompts/synthesize.js';
|
|
6
|
+
import { buildLatexPrompt } from '../prompts/latex.js';
|
|
7
|
+
import { buildTailorPrompt } from '../prompts/tailor.js';
|
|
8
|
+
import { enforceOnePage } from '../lib/page-loop.js';
|
|
9
|
+
import { compileLaTeX } from '../lib/latex-compiler.js';
|
|
10
|
+
import { atsScorer } from '../lib/ats-scorer.js';
|
|
11
|
+
import { writeFileSync } from 'fs';
|
|
12
|
+
import { dirname, join } from 'path';
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* ResumePipeline: orchestrates the full resume build.
|
|
16
|
+
* Refactored for CLI: no Supabase, no SSE, writes to local disk.
|
|
17
|
+
*/
|
|
18
|
+
export class ResumePipeline extends EventEmitter {
|
|
19
|
+
|
|
20
|
+
constructor() {
|
|
21
|
+
super();
|
|
22
|
+
this.state = 'idle';
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
setState(state, message, progress) {
|
|
26
|
+
this.state = state;
|
|
27
|
+
this.emit('progress', { state, message, progress });
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Run the full resume build pipeline.
|
|
32
|
+
*
|
|
33
|
+
* @param {object} params
|
|
34
|
+
* @param {Buffer} params.pdfBuffer - Raw PDF file bytes.
|
|
35
|
+
* @param {string} params.targetSector - Target industry sector.
|
|
36
|
+
* @param {Array|null} [params.enrichmentAnswers] - Answers from enrichment interview.
|
|
37
|
+
* @param {string|null} [params.resumeText] - Pre-extracted resume text.
|
|
38
|
+
* @param {object|null} [params.analysis] - Pre-computed analysis.
|
|
39
|
+
* @param {string|null} [params.jobDescription] - Job description text for tailoring.
|
|
40
|
+
* @param {string|null} [params.outputPath] - Where to write the PDF.
|
|
41
|
+
* @returns {Promise<object>}
|
|
42
|
+
*/
|
|
43
|
+
async run({
|
|
44
|
+
pdfBuffer,
|
|
45
|
+
targetSector,
|
|
46
|
+
enrichmentAnswers = null,
|
|
47
|
+
resumeText = null,
|
|
48
|
+
analysis = null,
|
|
49
|
+
jobDescription = null,
|
|
50
|
+
outputPath = null,
|
|
51
|
+
}) {
|
|
52
|
+
try {
|
|
53
|
+
// STAGE 1: Parse PDF
|
|
54
|
+
let text = resumeText;
|
|
55
|
+
if (!text) {
|
|
56
|
+
this.setState('parsing', 'Extracting resume content...', 5);
|
|
57
|
+
const extracted = await extractTextFromPDF(pdfBuffer);
|
|
58
|
+
text = extracted.text;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// STAGE 2: Analyze
|
|
62
|
+
let analysisResult = analysis;
|
|
63
|
+
if (!analysisResult) {
|
|
64
|
+
this.setState('analyzing', 'Analyzing your profile...', 15);
|
|
65
|
+
const raw = await callAI(
|
|
66
|
+
buildAnalyzePrompt(targetSector),
|
|
67
|
+
`Resume text:\n${text}`,
|
|
68
|
+
{ responseFormat: 'json', taskName: 'analyze' }
|
|
69
|
+
);
|
|
70
|
+
analysisResult = parseJSONSafely(raw);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// STAGE 3: Enrichment Gate
|
|
74
|
+
if (!enrichmentAnswers) {
|
|
75
|
+
this.setState('enriching', 'Waiting for your answers...', 25);
|
|
76
|
+
return {
|
|
77
|
+
requiresEnrichment: true,
|
|
78
|
+
questions: analysisResult.follow_up_questions,
|
|
79
|
+
analysis: analysisResult,
|
|
80
|
+
resumeText: text,
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// STAGE 4: Synthesize Content
|
|
85
|
+
this.setState('synthesizing', 'Rewriting with CalibrCV rules...', 45);
|
|
86
|
+
const enrichmentContext = enrichmentAnswers
|
|
87
|
+
.map(a => `Q: ${a.question}\nA: ${a.answer}`)
|
|
88
|
+
.join('\n\n');
|
|
89
|
+
|
|
90
|
+
const synthesisRaw = await callAI(
|
|
91
|
+
buildSynthesizePrompt(targetSector),
|
|
92
|
+
`Original resume:\n${text}\n\nEnrichment answers:\n${enrichmentContext}`,
|
|
93
|
+
{ responseFormat: 'json', taskName: 'synthesize' }
|
|
94
|
+
);
|
|
95
|
+
let resumeJSON = parseJSONSafely(synthesisRaw);
|
|
96
|
+
|
|
97
|
+
// STAGE 4b: Tailor (optional)
|
|
98
|
+
if (jobDescription) {
|
|
99
|
+
this.setState('tailoring', 'Tailoring for the target role...', 50);
|
|
100
|
+
const tailorRaw = await callAI(
|
|
101
|
+
buildTailorPrompt(
|
|
102
|
+
resumeJSON.experience?.[0]?.title || 'Target Role',
|
|
103
|
+
'Target Company'
|
|
104
|
+
),
|
|
105
|
+
`Master resume JSON:\n${JSON.stringify(resumeJSON, null, 2)}\n\nJob description:\n${jobDescription}`,
|
|
106
|
+
{ responseFormat: 'json', taskName: 'tailor' }
|
|
107
|
+
);
|
|
108
|
+
resumeJSON = parseJSONSafely(tailorRaw);
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// STAGE 5: Generate LaTeX
|
|
112
|
+
this.setState('generating_latex', 'Engineering your layout...', 60);
|
|
113
|
+
let latexRaw = await callAI(
|
|
114
|
+
buildLatexPrompt(),
|
|
115
|
+
`Resume JSON:\n${JSON.stringify(resumeJSON, null, 2)}`,
|
|
116
|
+
{ responseFormat: 'text', taskName: 'generate-latex' }
|
|
117
|
+
);
|
|
118
|
+
|
|
119
|
+
if (latexRaw.trim().startsWith('```')) {
|
|
120
|
+
latexRaw = latexRaw
|
|
121
|
+
.replace(/^```(?:latex|tex)?\n?/m, '')
|
|
122
|
+
.replace(/\n?```$/m, '')
|
|
123
|
+
.trim();
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if (!latexRaw.trim().startsWith('\\documentclass')) {
|
|
127
|
+
latexRaw = await callAI(
|
|
128
|
+
buildLatexPrompt(),
|
|
129
|
+
`IMPORTANT: Return ONLY raw LaTeX starting with \\documentclass.\n\nResume JSON:\n${JSON.stringify(resumeJSON, null, 2)}`,
|
|
130
|
+
{ responseFormat: 'text', taskName: 'generate-latex-retry' }
|
|
131
|
+
);
|
|
132
|
+
if (latexRaw.trim().startsWith('```')) {
|
|
133
|
+
latexRaw = latexRaw
|
|
134
|
+
.replace(/^```(?:latex|tex)?\n?/m, '')
|
|
135
|
+
.replace(/\n?```$/m, '')
|
|
136
|
+
.trim();
|
|
137
|
+
}
|
|
138
|
+
if (!latexRaw.trim().startsWith('\\documentclass')) {
|
|
139
|
+
throw new Error('AI returned invalid LaTeX after 2 attempts');
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// STAGE 6+7: Compile + 1-page Enforcement
|
|
144
|
+
this.setState('compiling', 'Compiling to PDF...', 70);
|
|
145
|
+
const { latexCode, pdfBuffer: compiledPDF, pageCount, iterations, warning } =
|
|
146
|
+
await enforceOnePage(
|
|
147
|
+
latexRaw,
|
|
148
|
+
targetSector,
|
|
149
|
+
callAI,
|
|
150
|
+
compileLaTeX,
|
|
151
|
+
(progress) => {
|
|
152
|
+
if (progress.stage === 'trimming') {
|
|
153
|
+
this.setState('trimming',
|
|
154
|
+
`Fitting to one page (attempt ${progress.iteration}/6)...`,
|
|
155
|
+
70 + (progress.iteration * 3));
|
|
156
|
+
} else if (progress.stage === 'checking') {
|
|
157
|
+
this.setState('checking_pages', 'Verifying page length...', 75);
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
);
|
|
161
|
+
|
|
162
|
+
// STAGE 8: ATS Scoring
|
|
163
|
+
this.setState('scoring', 'Calculating ATS compatibility...', 90);
|
|
164
|
+
const atsReport = atsScorer.score(text, resumeJSON, jobDescription);
|
|
165
|
+
|
|
166
|
+
// STAGE 9: Write to disk
|
|
167
|
+
if (outputPath && compiledPDF) {
|
|
168
|
+
writeFileSync(outputPath, compiledPDF);
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
// Always save .tex source alongside
|
|
172
|
+
if (outputPath) {
|
|
173
|
+
const texPath = outputPath.replace(/\.pdf$/i, '.tex');
|
|
174
|
+
writeFileSync(texPath, latexCode, 'utf-8');
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
this.setState('complete', 'Your resume is ready.', 100);
|
|
178
|
+
|
|
179
|
+
return {
|
|
180
|
+
success: true,
|
|
181
|
+
atsScore: atsReport.total,
|
|
182
|
+
atsBreakdown: atsReport,
|
|
183
|
+
resumeJSON,
|
|
184
|
+
latexCode,
|
|
185
|
+
pdfBuffer: compiledPDF,
|
|
186
|
+
pageCount,
|
|
187
|
+
trimIterations: iterations,
|
|
188
|
+
warning: warning || null,
|
|
189
|
+
outputPath,
|
|
190
|
+
};
|
|
191
|
+
|
|
192
|
+
} catch (error) {
|
|
193
|
+
this.setState('error', `Something went wrong: ${error.message}`, 0);
|
|
194
|
+
throw error;
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
export function buildAnalyzePrompt(targetSector) {
|
|
2
|
+
return `You are the CalibrCV Resume Analysis Agent — a world-class talent specialist with 15 years at elite recruiting firms who has reviewed 50,000+ resumes. Analyze the submitted resume with extreme precision and return a structured JSON diagnostic report.
|
|
3
|
+
|
|
4
|
+
Evaluate against 6 CalibrCV quality standards:
|
|
5
|
+
1. BULLET QUALITY: Every bullet must have a strong HBS action verb + quantified outcome + be under 100 characters
|
|
6
|
+
2. SUMMARY: 3-4 sentences, zero personal pronouns (I/my/me/we/our), implicit third-person executive voice
|
|
7
|
+
3. ATS COMPLIANCE: Standard section headings, clean formatting, high keyword density
|
|
8
|
+
4. IMPACT EVIDENCE: Every role must have at least one number or concrete metric
|
|
9
|
+
5. COMPLETENESS: Contact info, dates, and locations present on all roles
|
|
10
|
+
6. RELEVANCE: Skills and bullets aligned to target sector: ${targetSector}
|
|
11
|
+
|
|
12
|
+
Return ONLY valid JSON. No preamble. No markdown backticks. No explanation outside the JSON.
|
|
13
|
+
|
|
14
|
+
EXACT JSON SCHEMA:
|
|
15
|
+
{
|
|
16
|
+
"overall_score": <integer 0-100>,
|
|
17
|
+
"target_sector": "${targetSector}",
|
|
18
|
+
"sections": {
|
|
19
|
+
"summary": { "score": <0-10>, "verdict": "pass|fail|missing", "issue": "<string or null>" },
|
|
20
|
+
"experience": { "score": <0-10>, "verdict": "pass|fail|partial", "issues": ["<string>"] },
|
|
21
|
+
"education": { "score": <0-10>, "verdict": "pass|fail|pass", "issue": "<string or null>" },
|
|
22
|
+
"skills": { "score": <0-10>, "verdict": "pass|fail|pass", "issue": "<string or null>" },
|
|
23
|
+
"projects": { "score": <0-10>, "verdict": "pass|fail|missing|partial", "issues": ["<string>"] }
|
|
24
|
+
},
|
|
25
|
+
"critical_gaps": ["<specific missing element>"],
|
|
26
|
+
"strong_points": ["<specific strength>"],
|
|
27
|
+
"follow_up_questions": [
|
|
28
|
+
{
|
|
29
|
+
"id": "q1",
|
|
30
|
+
"context": "<which section this improves>",
|
|
31
|
+
"question": "<exact natural-language coaching question>",
|
|
32
|
+
"why_important": "<one sentence: what metric or improvement this unlocks>",
|
|
33
|
+
"example_answer": "<example of a strong, specific answer>"
|
|
34
|
+
}
|
|
35
|
+
],
|
|
36
|
+
"max_questions_needed": <integer 3-7>
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
RULES FOR FOLLOW-UP QUESTIONS:
|
|
40
|
+
- Generate 3 minimum, 7 maximum questions
|
|
41
|
+
- Every question must extract something SPECIFIC: a number, a tool name, a timeline, a business outcome
|
|
42
|
+
- Never ask vague questions like "Tell me more about X"
|
|
43
|
+
- Always ask for a metric, a name, or a concrete detail that will become a bullet point
|
|
44
|
+
- Friendly, coaching tone — not interrogative
|
|
45
|
+
- Prioritize the weakest-scoring sections first
|
|
46
|
+
- Examples of GOOD questions: "What was the total revenue or transaction value of the deals you worked on?" / "Can you name 3 specific Python libraries you used most heavily in this role?" / "What percentage improvement did your automation create, and over what time period?"
|
|
47
|
+
- Examples of BAD questions: "What were your main responsibilities?" / "Can you describe your experience?"`;
|
|
48
|
+
}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import { LATEX_TEMPLATE } from '../constants/latex-template.js';
|
|
2
|
+
|
|
3
|
+
export function buildLatexPrompt() {
|
|
4
|
+
return `You are the CalibrCV LaTeX Generation Agent. Convert structured JSON resume data into a complete, valid, immediately compilable LaTeX document.
|
|
5
|
+
|
|
6
|
+
Use ONLY the CalibrCV LaTeX template below. Deviate in no way. Do not add packages, change margins, alter fonts, or modify document structure. Your output must compile without errors in a standard pdflatex environment.
|
|
7
|
+
|
|
8
|
+
${LATEX_TEMPLATE}
|
|
9
|
+
|
|
10
|
+
GENERATION RULES:
|
|
11
|
+
|
|
12
|
+
STRUCTURE:
|
|
13
|
+
1. \\resumeSubheading for Experience: arg1=Job Title, arg2=Date Range, arg3=Company, arg4=Location
|
|
14
|
+
2. \\resumeSubheading for Education: arg1=Institution, arg2=Location, arg3=Degree, arg4=Date Range
|
|
15
|
+
NOTE: Experience and Education swap args 1-2 and 3-4.
|
|
16
|
+
3. \\resumeProjectHeading: arg1=\\textbf{Name} $|$ \\emph{Label}, arg2=Date
|
|
17
|
+
|
|
18
|
+
CHARACTER ESCAPING:
|
|
19
|
+
& -> \\& | % -> \\% | $ -> \\$ | # -> \\# | _ -> \\_ | { -> \\{ | } -> \\}
|
|
20
|
+
~ -> \\textasciitilde{} | ^ -> \\textasciicircum{} | > -> $>$ | < -> $<$
|
|
21
|
+
Em dashes must not exist. If found, replace with a semicolon.
|
|
22
|
+
|
|
23
|
+
SUMMARY PLACEMENT:
|
|
24
|
+
- The professional summary renders in \\begin{center}\\small\\textit{...}\\end{center}
|
|
25
|
+
- Place this block AFTER the heading section, BEFORE \\section{Education}
|
|
26
|
+
- Do NOT create \\section{Summary}
|
|
27
|
+
|
|
28
|
+
LINKEDIN: \\faLinkedin \\hspace{1pt} \\href{https://linkedin.com/in/SLUG}{\\underline{SLUG}}
|
|
29
|
+
|
|
30
|
+
SKILLS FORMAT:
|
|
31
|
+
\\textbf{Quantitative Stack}{: <csv tools>} \\\\
|
|
32
|
+
\\textbf{Analytic Domain}{: <csv methods>}
|
|
33
|
+
|
|
34
|
+
VALIDATION:
|
|
35
|
+
Before returning, verify:
|
|
36
|
+
- Output starts with \\documentclass
|
|
37
|
+
- Output ends with \\end{document}
|
|
38
|
+
- \\pdfgentounicode=1 is present in preamble
|
|
39
|
+
- No em dashes anywhere
|
|
40
|
+
- All & characters in text are escaped as \\&
|
|
41
|
+
- The LinkedIn icon uses \\faLinkedin (from fontawesome5 package already in template)
|
|
42
|
+
|
|
43
|
+
Return ONLY raw LaTeX code. No markdown backticks. No explanation. The output must start with \\documentclass and end with \\end{document}.`;
|
|
44
|
+
}
|