@pranavraut033/ats-checker 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +358 -1
- package/dist/index.d.ts +358 -1
- package/dist/index.js +631 -2
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +619 -3
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -103,6 +103,112 @@ interface RuleContext {
|
|
|
103
103
|
overusedKeywords?: string[];
|
|
104
104
|
}
|
|
105
105
|
|
|
106
|
+
/**
|
|
107
|
+
* LLM v2 Support Types - Optional, Backward Compatible
|
|
108
|
+
*/
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* JSON Schema for response validation
|
|
112
|
+
*/
|
|
113
|
+
interface JSONSchema {
|
|
114
|
+
type: string;
|
|
115
|
+
properties?: Record<string, unknown>;
|
|
116
|
+
required?: string[];
|
|
117
|
+
items?: unknown;
|
|
118
|
+
[key: string]: unknown;
|
|
119
|
+
}
|
|
120
|
+
/**
|
|
121
|
+
* LLM Client abstraction - user provides their own implementation
|
|
122
|
+
* This allows flexibility with different LLM providers without direct dependencies
|
|
123
|
+
*/
|
|
124
|
+
interface LLMClient {
|
|
125
|
+
/**
|
|
126
|
+
* Create a structured completion from the LLM
|
|
127
|
+
* Must validate and return only valid JSON matching the schema
|
|
128
|
+
*/
|
|
129
|
+
createCompletion(input: {
|
|
130
|
+
model: string;
|
|
131
|
+
messages: {
|
|
132
|
+
role: "system" | "user";
|
|
133
|
+
content: string;
|
|
134
|
+
}[];
|
|
135
|
+
max_tokens: number;
|
|
136
|
+
response_format: JSONSchema;
|
|
137
|
+
}): Promise<{
|
|
138
|
+
content: unknown;
|
|
139
|
+
usage?: {
|
|
140
|
+
prompt_tokens?: number;
|
|
141
|
+
completion_tokens?: number;
|
|
142
|
+
total_tokens?: number;
|
|
143
|
+
};
|
|
144
|
+
}>;
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* LLM budget configuration - prevents runaway spending
|
|
148
|
+
*/
|
|
149
|
+
interface LLMBudget {
|
|
150
|
+
maxCalls: number;
|
|
151
|
+
maxTokensPerCall: number;
|
|
152
|
+
maxTotalTokens: number;
|
|
153
|
+
}
|
|
154
|
+
/**
|
|
155
|
+
* Feature toggles for LLM capabilities
|
|
156
|
+
*/
|
|
157
|
+
interface LLMFeatures {
|
|
158
|
+
skillNormalization?: boolean;
|
|
159
|
+
sectionClassification?: boolean;
|
|
160
|
+
suggestions?: boolean;
|
|
161
|
+
}
|
|
162
|
+
/**
|
|
163
|
+
* Complete LLM configuration
|
|
164
|
+
*/
|
|
165
|
+
interface LLMConfig {
|
|
166
|
+
/** User-provided LLM client (e.g., OpenAI wrapper) */
|
|
167
|
+
client: LLMClient;
|
|
168
|
+
/** Model identifiers */
|
|
169
|
+
models?: {
|
|
170
|
+
/** Default model for fast, structured output (e.g., "gpt-4o-mini") */
|
|
171
|
+
default: string;
|
|
172
|
+
/** Optional thinking model for complex reasoning (e.g., "o4-mini") */
|
|
173
|
+
thinking?: string;
|
|
174
|
+
};
|
|
175
|
+
/** Budget constraints */
|
|
176
|
+
limits: LLMBudget;
|
|
177
|
+
/** Which LLM features to enable */
|
|
178
|
+
enable?: LLMFeatures;
|
|
179
|
+
/** Request timeout in milliseconds */
|
|
180
|
+
timeoutMs?: number;
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Updated AnalyzeResumeInput with optional LLM support
|
|
184
|
+
*/
|
|
185
|
+
interface AnalyzeResumeInputV2 {
|
|
186
|
+
resumeText: string;
|
|
187
|
+
jobDescription: string;
|
|
188
|
+
config?: ATSConfig;
|
|
189
|
+
llm?: LLMConfig;
|
|
190
|
+
}
|
|
191
|
+
/**
|
|
192
|
+
* LLM usage tracking for debugging
|
|
193
|
+
*/
|
|
194
|
+
interface LLMUsageStats {
|
|
195
|
+
totalCalls: number;
|
|
196
|
+
totalTokensUsed: number;
|
|
197
|
+
callsRemaining: number;
|
|
198
|
+
tokensRemaining: number;
|
|
199
|
+
features: Partial<Record<keyof LLMFeatures, boolean>>;
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Result of an LLM operation (with fallback info)
|
|
203
|
+
*/
|
|
204
|
+
interface LLMResult<T> {
|
|
205
|
+
success: boolean;
|
|
206
|
+
data?: T;
|
|
207
|
+
fallback: boolean;
|
|
208
|
+
error?: string;
|
|
209
|
+
tokensUsed?: number;
|
|
210
|
+
}
|
|
211
|
+
|
|
106
212
|
interface ATSBreakdown {
|
|
107
213
|
skills: number;
|
|
108
214
|
experience: number;
|
|
@@ -113,6 +219,7 @@ interface AnalyzeResumeInput {
|
|
|
113
219
|
resumeText: string;
|
|
114
220
|
jobDescription: string;
|
|
115
221
|
config?: ATSConfig;
|
|
222
|
+
llm?: LLMConfig;
|
|
116
223
|
}
|
|
117
224
|
interface ATSAnalysisResult {
|
|
118
225
|
score: number;
|
|
@@ -127,9 +234,259 @@ interface ATSAnalysisResult {
|
|
|
127
234
|
declare const defaultSkillAliases: SkillAliases;
|
|
128
235
|
declare const defaultProfiles: ATSProfile[];
|
|
129
236
|
|
|
237
|
+
/**
|
|
238
|
+
* LLM Manager - Orchestrates all LLM operations with budget constraints
|
|
239
|
+
* Provides safe fallback to v1 deterministic logic on any failure
|
|
240
|
+
*/
|
|
241
|
+
|
|
242
|
+
interface LLMCallOptions {
|
|
243
|
+
model?: string;
|
|
244
|
+
useThinking?: boolean;
|
|
245
|
+
requestedTokens?: number;
|
|
246
|
+
}
|
|
247
|
+
/**
|
|
248
|
+
* Core LLM manager - handles all interactions with the LLM client
|
|
249
|
+
*/
|
|
250
|
+
declare class LLMManager {
|
|
251
|
+
private client;
|
|
252
|
+
private budgetManager;
|
|
253
|
+
private config;
|
|
254
|
+
private timeoutMs;
|
|
255
|
+
private warnings;
|
|
256
|
+
constructor(config: LLMConfig);
|
|
257
|
+
/**
|
|
258
|
+
* Structured call to LLM with timeout and budget protection
|
|
259
|
+
*/
|
|
260
|
+
callLLM<T>(systemPrompt: string, userPrompt: string, schema: JSONSchema, options?: LLMCallOptions): Promise<LLMResult<T>>;
|
|
261
|
+
/**
|
|
262
|
+
* Get list of warnings from LLM operations
|
|
263
|
+
*/
|
|
264
|
+
getWarnings(): string[];
|
|
265
|
+
/**
|
|
266
|
+
* Get budget stats
|
|
267
|
+
*/
|
|
268
|
+
getBudgetStats(): {
|
|
269
|
+
callsUsed: number;
|
|
270
|
+
callsRemaining: number;
|
|
271
|
+
tokensUsed: number;
|
|
272
|
+
tokensRemaining: number;
|
|
273
|
+
totalCalls: number;
|
|
274
|
+
totalTokens: number;
|
|
275
|
+
};
|
|
276
|
+
/**
|
|
277
|
+
* Check if features are enabled
|
|
278
|
+
*/
|
|
279
|
+
isFeatureEnabled(feature: keyof NonNullable<LLMConfig["enable"]>): boolean;
|
|
280
|
+
/**
|
|
281
|
+
* Create a timeout promise
|
|
282
|
+
*/
|
|
283
|
+
private createTimeout;
|
|
284
|
+
/**
|
|
285
|
+
* Estimate tokens for a call (rough approximation)
|
|
286
|
+
* 1 token ≈ 4 characters average
|
|
287
|
+
*/
|
|
288
|
+
private estimateTokens;
|
|
289
|
+
/**
|
|
290
|
+
* Validate that schema looks like valid JSON schema
|
|
291
|
+
*/
|
|
292
|
+
private isValidJsonSchema;
|
|
293
|
+
/**
|
|
294
|
+
* Simple schema validation - check required fields exist
|
|
295
|
+
*/
|
|
296
|
+
private validateAgainstSchema;
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
/**
|
|
300
|
+
* LLM Budget Manager
|
|
301
|
+
* Enforces call and token limits to prevent runaway costs
|
|
302
|
+
*/
|
|
303
|
+
|
|
304
|
+
declare class LLMBudgetManager {
|
|
305
|
+
private callCount;
|
|
306
|
+
private totalTokensUsed;
|
|
307
|
+
private readonly limits;
|
|
308
|
+
constructor(limits: LLMBudget);
|
|
309
|
+
/**
|
|
310
|
+
* Check if we can make a call with the given token estimate
|
|
311
|
+
* Throws if budget would be exceeded
|
|
312
|
+
*/
|
|
313
|
+
assertCanCall(requestedTokens: number): void;
|
|
314
|
+
/**
|
|
315
|
+
* Record actual token usage from a completed call
|
|
316
|
+
*/
|
|
317
|
+
recordUsage(tokensUsed: number): void;
|
|
318
|
+
/**
|
|
319
|
+
* Get current budget state
|
|
320
|
+
*/
|
|
321
|
+
getStats(): {
|
|
322
|
+
callsUsed: number;
|
|
323
|
+
callsRemaining: number;
|
|
324
|
+
tokensUsed: number;
|
|
325
|
+
tokensRemaining: number;
|
|
326
|
+
totalCalls: number;
|
|
327
|
+
totalTokens: number;
|
|
328
|
+
};
|
|
329
|
+
/**
|
|
330
|
+
* Check if budget is exhausted
|
|
331
|
+
*/
|
|
332
|
+
isExhausted(): boolean;
|
|
333
|
+
/**
|
|
334
|
+
* Reset budget (for testing)
|
|
335
|
+
*/
|
|
336
|
+
reset(): void;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
/**
|
|
340
|
+
* JSON Schemas for LLM responses
|
|
341
|
+
* These enforce structured output from the LLM
|
|
342
|
+
*/
|
|
343
|
+
|
|
344
|
+
/**
|
|
345
|
+
* Type-safe schema accessor
|
|
346
|
+
*/
|
|
347
|
+
declare const LLMSchemas: {
|
|
348
|
+
readonly skillNormalization: JSONSchema;
|
|
349
|
+
readonly sectionClassification: JSONSchema;
|
|
350
|
+
readonly suggestionEnhancement: JSONSchema;
|
|
351
|
+
readonly jdClarification: JSONSchema;
|
|
352
|
+
readonly validation: JSONSchema;
|
|
353
|
+
};
|
|
354
|
+
|
|
355
|
+
/**
|
|
356
|
+
* LLM Prompt Templates
|
|
357
|
+
* Strict prompts that enforce JSON output and role clarity
|
|
358
|
+
*/
|
|
359
|
+
/**
|
|
360
|
+
* System prompts for different LLM tasks
|
|
361
|
+
*/
|
|
362
|
+
declare const LLMPrompts: {
|
|
363
|
+
/**
|
|
364
|
+
* System prompt for skill normalization
|
|
365
|
+
*/
|
|
366
|
+
skillNormalizationSystem: string;
|
|
367
|
+
/**
|
|
368
|
+
* User prompt for skill normalization
|
|
369
|
+
*/
|
|
370
|
+
skillNormalizationUser: (skills: string[]) => string;
|
|
371
|
+
/**
|
|
372
|
+
* System prompt for section classification
|
|
373
|
+
*/
|
|
374
|
+
sectionClassificationSystem: string;
|
|
375
|
+
/**
|
|
376
|
+
* User prompt for section classification
|
|
377
|
+
*/
|
|
378
|
+
sectionClassificationUser: (headers: string[]) => string;
|
|
379
|
+
/**
|
|
380
|
+
* System prompt for suggestion enhancement
|
|
381
|
+
*/
|
|
382
|
+
suggestionEnhancementSystem: string;
|
|
383
|
+
/**
|
|
384
|
+
* User prompt for suggestion enhancement
|
|
385
|
+
*/
|
|
386
|
+
suggestionEnhancementUser: (suggestions: string[]) => string;
|
|
387
|
+
/**
|
|
388
|
+
* System prompt for JD clarification
|
|
389
|
+
*/
|
|
390
|
+
jdClarificationSystem: string;
|
|
391
|
+
/**
|
|
392
|
+
* User prompt for JD clarification
|
|
393
|
+
*/
|
|
394
|
+
jdClarificationUser: (jd: string) => string;
|
|
395
|
+
};
|
|
396
|
+
/**
|
|
397
|
+
* Create a structured prompt for a task
|
|
398
|
+
*/
|
|
399
|
+
declare function createPrompt(systemBase: string, userBuilder: (input: string) => string, input: string): {
|
|
400
|
+
system: string;
|
|
401
|
+
user: string;
|
|
402
|
+
};
|
|
403
|
+
|
|
404
|
+
/**
|
|
405
|
+
* LLM Adapters - Transform LLM responses into usable data
|
|
406
|
+
* Safely extract and validate results from LLM calls
|
|
407
|
+
*/
|
|
408
|
+
/**
|
|
409
|
+
* Adapter for skill normalization response
|
|
410
|
+
*/
|
|
411
|
+
declare function adaptSkillNormalizationResponse(data: unknown): {
|
|
412
|
+
input: string;
|
|
413
|
+
normalized: string;
|
|
414
|
+
confidence?: number;
|
|
415
|
+
}[];
|
|
416
|
+
/**
|
|
417
|
+
* Adapter for section classification response
|
|
418
|
+
*/
|
|
419
|
+
declare function adaptSectionClassificationResponse(data: unknown): {
|
|
420
|
+
header: string;
|
|
421
|
+
classification: string;
|
|
422
|
+
confidence?: number;
|
|
423
|
+
}[];
|
|
424
|
+
/**
|
|
425
|
+
* Adapter for suggestion enhancement response
|
|
426
|
+
*/
|
|
427
|
+
declare function adaptSuggestionEnhancementResponse(data: unknown): {
|
|
428
|
+
original: string;
|
|
429
|
+
enhanced: string;
|
|
430
|
+
actionable?: boolean;
|
|
431
|
+
}[];
|
|
432
|
+
/**
|
|
433
|
+
* Adapter for JD clarification response
|
|
434
|
+
*/
|
|
435
|
+
declare function adaptJdClarificationResponse(data: unknown): {
|
|
436
|
+
implicitSkills: string[];
|
|
437
|
+
implicitExperience?: {
|
|
438
|
+
minYears?: number;
|
|
439
|
+
domains?: string[];
|
|
440
|
+
};
|
|
441
|
+
clarityScore?: number;
|
|
442
|
+
};
|
|
443
|
+
/**
|
|
444
|
+
* Safe value extraction with type coercion
|
|
445
|
+
*/
|
|
446
|
+
declare function safeExtractString(obj: unknown, key: string): string | undefined;
|
|
447
|
+
/**
|
|
448
|
+
* Safe array extraction
|
|
449
|
+
*/
|
|
450
|
+
declare function safeExtractArray(obj: unknown, key: string): unknown[];
|
|
451
|
+
/**
|
|
452
|
+
* Safe number extraction
|
|
453
|
+
*/
|
|
454
|
+
declare function safeExtractNumber(obj: unknown, key: string): number | undefined;
|
|
455
|
+
|
|
130
456
|
/**
|
|
131
457
|
* Analyze a resume against a job description using deterministic, explainable rules.
|
|
458
|
+
* Optional LLM config enables v2 features while maintaining full backward compatibility.
|
|
459
|
+
*
|
|
460
|
+
* @param input Resume, job description, and optional LLM config
|
|
461
|
+
* @returns ATS analysis result with score, breakdown, and suggestions
|
|
462
|
+
*
|
|
463
|
+
* @example
|
|
464
|
+
* // v1 behavior - deterministic only
|
|
465
|
+
* const result = analyzeResume({ resumeText, jobDescription });
|
|
466
|
+
*
|
|
467
|
+
* @example
|
|
468
|
+
* // v2 with LLM - enhanced suggestions
|
|
469
|
+
* const result = analyzeResume({
|
|
470
|
+
* resumeText,
|
|
471
|
+
* jobDescription,
|
|
472
|
+
* llm: { client, limits: { maxCalls: 3, maxTokensPerCall: 2000, maxTotalTokens: 5000 } }
|
|
473
|
+
* });
|
|
132
474
|
*/
|
|
133
475
|
declare function analyzeResume(input: AnalyzeResumeInput): ATSAnalysisResult;
|
|
476
|
+
/**
|
|
477
|
+
* Async version: Analyze a resume with full LLM support
|
|
478
|
+
* This version properly handles async LLM calls
|
|
479
|
+
*
|
|
480
|
+
* @param input Resume, job description, and optional LLM config
|
|
481
|
+
* @returns Promise<ATSAnalysisResult>
|
|
482
|
+
*
|
|
483
|
+
* @example
|
|
484
|
+
* const result = await analyzeResumeAsync({
|
|
485
|
+
* resumeText,
|
|
486
|
+
* jobDescription,
|
|
487
|
+
* llm: { client, limits: {...}, enable: { suggestions: true } }
|
|
488
|
+
* });
|
|
489
|
+
*/
|
|
490
|
+
declare function analyzeResumeAsync(input: AnalyzeResumeInput): Promise<ATSAnalysisResult>;
|
|
134
491
|
|
|
135
|
-
export { type ATSAnalysisResult, type ATSBreakdown, type ATSConfig, type ATSProfile, type ATSRule, type ATSWeights, type AnalyzeResumeInput, type KeywordDensityConfig, type NormalizedWeights, type ParsedDateRange, type ParsedExperienceEntry, type ParsedJobDescription, type ParsedResume, type ResolvedATSConfig, type ResumeSection, type RuleContext, type SectionPenaltyConfig, type SkillAliases, analyzeResume, defaultProfiles, defaultSkillAliases };
|
|
492
|
+
export { type ATSAnalysisResult, type ATSBreakdown, type ATSConfig, type ATSProfile, type ATSRule, type ATSWeights, type AnalyzeResumeInput, type AnalyzeResumeInputV2, type JSONSchema, type KeywordDensityConfig, type LLMBudget, LLMBudgetManager, type LLMClient, type LLMConfig, type LLMFeatures, LLMManager, LLMPrompts, type LLMResult, LLMSchemas, type LLMUsageStats, type NormalizedWeights, type ParsedDateRange, type ParsedExperienceEntry, type ParsedJobDescription, type ParsedResume, type ResolvedATSConfig, type ResumeSection, type RuleContext, type SectionPenaltyConfig, type SkillAliases, adaptJdClarificationResponse, adaptSectionClassificationResponse, adaptSkillNormalizationResponse, adaptSuggestionEnhancementResponse, analyzeResume, analyzeResumeAsync, createPrompt, defaultProfiles, defaultSkillAliases, safeExtractArray, safeExtractNumber, safeExtractString };
|