@with-logic/intent 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +277 -0
- package/dist/batches.d.ts +55 -0
- package/dist/batches.js +91 -0
- package/dist/batches.js.map +1 -0
- package/dist/config.d.ts +24 -0
- package/dist/config.js +29 -0
- package/dist/config.js.map +1 -0
- package/dist/extractors.d.ts +52 -0
- package/dist/extractors.js +88 -0
- package/dist/extractors.js.map +1 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +5 -0
- package/dist/index.js.map +1 -0
- package/dist/intent.d.ts +402 -0
- package/dist/intent.js +540 -0
- package/dist/intent.js.map +1 -0
- package/dist/lib/config.d.ts +30 -0
- package/dist/lib/config.js +81 -0
- package/dist/lib/config.js.map +1 -0
- package/dist/lib/number.d.ts +5 -0
- package/dist/lib/number.js +15 -0
- package/dist/lib/number.js.map +1 -0
- package/dist/llm_client.d.ts +14 -0
- package/dist/llm_client.js +29 -0
- package/dist/llm_client.js.map +1 -0
- package/dist/messages.d.ts +41 -0
- package/dist/messages.js +136 -0
- package/dist/messages.js.map +1 -0
- package/dist/providers/groq.d.ts +84 -0
- package/dist/providers/groq.js +335 -0
- package/dist/providers/groq.js.map +1 -0
- package/dist/schema.d.ts +82 -0
- package/dist/schema.js +114 -0
- package/dist/schema.js.map +1 -0
- package/dist/types.d.ts +74 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/package.json +78 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"config.js","sourceRoot":"","sources":["../../src/lib/config.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAMH,SAAS,mBAAmB,CAAC,IAAY;IACvC,MAAM,IAAI,KAAK,CAAC,GAAG,IAAI,eAAe,CAAC,CAAC;AAC1C,CAAC;AAED,OAAO,EAAE,KAAK,EAAE,MAAM,UAAU,CAAC;AAEjC,MAAM,UAAU,MAAM,CAAC,IAAY,EAAE,IAA0B;IAC7D,MAAM,KAAK,GAAG,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;IAChC,MAAM,MAAM,GAAG,KAAK,IAAI,IAAI,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC;IACjD,IAAI,MAAM,EAAE,CAAC;QACX,OAAO,KAAe,CAAC;IACzB,CAAC;IACD,IAAI,IAAI,EAAE,OAAO,KAAK,SAAS,EAAE,CAAC;QAChC,OAAO,IAAI,CAAC,OAAO,CAAC;IACtB,CAAC;IACD,OAAO,mBAAmB,CAAC,IAAI,CAAC,CAAC;AACnC,CAAC;AAED;;;;;GAKG;AACH,MAAM,UAAU,WAAW,CACzB,IAAY,EACZ,IAAoB;IAEpB,MAAM,KAAK,GAAG,IAAI,CAAC,OAAO,KAAK,SAAS,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,EAAE,EAAE,OAAO,EAAE,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC;IAClG,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,EAAE,CAAC;QAChC,OAAO,KAAK,CAAC;IACf,CAAC;IACD,MAAM,IAAI,KAAK,CAAC,GAAG,IAAI,oBAAoB,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;AACvE,CAAC;AAED,MAAM,UAAU,OAAO,CAAC,IAAY,EAAE,IAA2B;IAC/D,MAAM,KAAK,GAAG,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;IAChC,MAAM,MAAM,GAAG,KAAK,IAAI,IAAI,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC;IACjD,IAAI,MAAM,EAAE,CAAC;QACX,MAAM,CAAC,GAAG,KAAe,CAAC;QAC1B,OAAO,CAAC,GAAG,EAAE,OAAO,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,KAAK,CAAC;IAC9C,CAAC;IACD,IAAI,IAAI,EAAE,OAAO,KAAK,SAAS,EAAE,CAAC;QAChC,OAAO,IAAI,CAAC,OAAO,CAAC;IACtB,CAAC;IACD,OAAO,mBAAmB,CAAC,IAAI,CAAC,CAAC;AACnC,CAAC;AAED,MAAM,UAAU,GAAG,CAAC,IAAY,EAAE,IAAmB;IACnD,MAAM,KAAK,GAAG,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;IAChC,MAAM,MAAM,GAAG,KAAK,IAAI,IAAI,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC;IACjD,IAAI,MAAM,EAAE,CAAC;QACX,MAAM,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,KAAe,EAAE,EAAE,CAAC,CAAC;QACpD,IAAI,MAAM,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC;YACzB,MAAM,IAAI,KAAK,CAAC,GAAG,IAAI,0BAA0B,CAAC,CAAC;QACrD,CAAC;QACD,OAAO,KAAK,CAAC,MAAM,EAAE,IAAI,EAAE,GAAG,EAAE,IAAI,EAAE,GAAG,CAAC,CAAC;IAC7C,CAAC;IACD,IAAI,IAAI,EAAE,OAAO,KAAK,SAAS,EAAE,CAAC;QAChC,MAAM,GAAG,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;QACrC,OAAO,KAAK,CAAC,GAAG,EAAE,IAAI,EAAE,GAAG,EAAE,IAAI,EAAE,GAAG,CAAC,CAAC;IAC1C,CAAC;IACD,OAAO,mBAAmB,CAAC,IAAI,CAAC,CAAC;AACnC,CAAC;AAED,MAAM,UAAU,MAAM,CAAC,IAAY,EAAE,IAAmB;IACtD,MAAM,KAAK,GAAG,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;IAChC,MAAM,MAAM,GAAG,KAAK,IAAI,IAAI,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC;IACjD,IAAI,MAAM,EAAE,CAAC;QACX,MAAM,MAAM,GAAG,MAAM,CAAC,UAAU,CAAC,KAAe,CAAC,CAAC;QAClD,IAAI,MAAM,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE,CAAC;YACzB,MAAM,IAAI,KAAK,CAAC,GAAG,IAAI,yBAAyB,CAAC,CAAC;QACpD,CAAC;QACD,OAAO,KAAK,CAAC,MAAM,EAAE,IAAI,EAAE,GAAG,EAAE,IAAI,EAAE,GAAG,CAAC,CAAC;IAC7C,CAAC;IACD,IAAI,IAAI,EAAE,OAAO,KAAK,SAAS,EAAE,CAAC;QAChC,OAAO,KAAK,CAAC,IAAI,CAAC,OAAO,EAAE,IAAI,EAAE,GAAG,EAAE,IAAI,EAAE,GAAG,CAAC,CAAC;IACnD,CAAC;IACD,OAAO,mBAAmB,CAAC,IAAI,CAAC,CAAC;AACnC,CAAC"}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Clamp a number to the provided [min, max] range. If min or max are
|
|
3
|
+
* undefined, only the defined bound(s) are applied.
|
|
4
|
+
*/
|
|
5
|
+
export function clamp(n, min, max) {
|
|
6
|
+
let out = n;
|
|
7
|
+
if (typeof min === "number" && out < min) {
|
|
8
|
+
out = min;
|
|
9
|
+
}
|
|
10
|
+
if (typeof max === "number" && out > max) {
|
|
11
|
+
out = max;
|
|
12
|
+
}
|
|
13
|
+
return out;
|
|
14
|
+
}
|
|
15
|
+
//# sourceMappingURL=number.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"number.js","sourceRoot":"","sources":["../../src/lib/number.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,MAAM,UAAU,KAAK,CAAC,CAAS,EAAE,GAAY,EAAE,GAAY;IACzD,IAAI,GAAG,GAAG,CAAC,CAAC;IACZ,IAAI,OAAO,GAAG,KAAK,QAAQ,IAAI,GAAG,GAAG,GAAG,EAAE,CAAC;QACzC,GAAG,GAAG,GAAG,CAAC;IACZ,CAAC;IACD,IAAI,OAAO,GAAG,KAAK,QAAQ,IAAI,GAAG,GAAG,GAAG,EAAE,CAAC;QACzC,GAAG,GAAG,GAAG,CAAC;IACZ,CAAC;IACD,OAAO,GAAG,CAAC;AACb,CAAC"}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { CONFIG } from "./config";
|
|
2
|
+
import type { LlmClient, IntentContext } from "./types";
|
|
3
|
+
/**
|
|
4
|
+
* Select an LLM client to use for reranking.
|
|
5
|
+
*
|
|
6
|
+
* Implements the client selection logic:
|
|
7
|
+
* 1. If ctx.llm is provided, use it directly
|
|
8
|
+
* 2. Else if GROQ_API_KEY environment variable is set, create default Groq client
|
|
9
|
+
* 3. Else return undefined (caller must handle error)
|
|
10
|
+
*
|
|
11
|
+
* @param ctx - Context object potentially containing an LLM client
|
|
12
|
+
* @returns Selected LLM client, or undefined if none available
|
|
13
|
+
*/
|
|
14
|
+
export declare function selectLlmClient(ctx: IntentContext, config?: typeof CONFIG): LlmClient | undefined;
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { CONFIG } from "./config";
|
|
2
|
+
import { createDefaultGroqClient } from "./providers/groq";
|
|
3
|
+
/**
|
|
4
|
+
* Select an LLM client to use for reranking.
|
|
5
|
+
*
|
|
6
|
+
* Implements the client selection logic:
|
|
7
|
+
* 1. If ctx.llm is provided, use it directly
|
|
8
|
+
* 2. Else if GROQ_API_KEY environment variable is set, create default Groq client
|
|
9
|
+
* 3. Else return undefined (caller must handle error)
|
|
10
|
+
*
|
|
11
|
+
* @param ctx - Context object potentially containing an LLM client
|
|
12
|
+
* @returns Selected LLM client, or undefined if none available
|
|
13
|
+
*/
|
|
14
|
+
export function selectLlmClient(ctx, config = CONFIG) {
|
|
15
|
+
if (ctx.llm) {
|
|
16
|
+
return ctx.llm;
|
|
17
|
+
}
|
|
18
|
+
const groqKey = config.GROQ.API_KEY;
|
|
19
|
+
if (groqKey && groqKey !== "") {
|
|
20
|
+
return createDefaultGroqClient(groqKey, {
|
|
21
|
+
defaults: {
|
|
22
|
+
model: config.GROQ.DEFAULT_MODEL,
|
|
23
|
+
reasoningEffort: config.GROQ.DEFAULT_REASONING_EFFORT,
|
|
24
|
+
},
|
|
25
|
+
});
|
|
26
|
+
}
|
|
27
|
+
return undefined;
|
|
28
|
+
}
|
|
29
|
+
//# sourceMappingURL=llm_client.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"llm_client.js","sourceRoot":"","sources":["../src/llm_client.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAClC,OAAO,EAAE,uBAAuB,EAAE,MAAM,kBAAkB,CAAC;AAI3D;;;;;;;;;;GAUG;AACH,MAAM,UAAU,eAAe,CAC7B,GAAkB,EAClB,SAAwB,MAAM;IAE9B,IAAI,GAAG,CAAC,GAAG,EAAE,CAAC;QACZ,OAAO,GAAG,CAAC,GAAG,CAAC;IACjB,CAAC;IACD,MAAM,OAAO,GAAG,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC;IACpC,IAAI,OAAO,IAAI,OAAO,KAAK,EAAE,EAAE,CAAC;QAC9B,OAAO,uBAAuB,CAAC,OAAO,EAAE;YACtC,QAAQ,EAAE;gBACR,KAAK,EAAE,MAAM,CAAC,IAAI,CAAC,aAAa;gBAChC,eAAe,EAAE,MAAM,CAAC,IAAI,CAAC,wBAAwB;aACtD;SACF,CAAC,CAAC;IACL,CAAC;IACD,OAAO,SAAS,CAAC;AACnB,CAAC"}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import type { ChatMessage, IntentCandidate } from "./types";
|
|
2
|
+
/**
|
|
3
|
+
* Build system + user messages instructing the model to score candidates.
|
|
4
|
+
*
|
|
5
|
+
* Constructs a two-message conversation:
|
|
6
|
+
* 1. System message: Defines the scoring task, output format, and constraints
|
|
7
|
+
* 2. User message: Contains the query and candidate items as JSON
|
|
8
|
+
*
|
|
9
|
+
* The system prompt emphasizes using the full configured score range and being decisive
|
|
10
|
+
* about relevance, with strict instructions to return only the score mapping.
|
|
11
|
+
*
|
|
12
|
+
* @param query - The search query or user intent
|
|
13
|
+
* @param candidates - Array of candidates with keys and summaries
|
|
14
|
+
* @returns Array of chat messages ready for LLM consumption
|
|
15
|
+
*/
|
|
16
|
+
export declare function buildMessages(query: string, candidates: IntentCandidate[], scoreRange: {
|
|
17
|
+
minScore: number;
|
|
18
|
+
maxScore: number;
|
|
19
|
+
}): ChatMessage[];
|
|
20
|
+
/**
|
|
21
|
+
* Build system + user messages instructing the model to filter candidates.
|
|
22
|
+
*
|
|
23
|
+
* The model must output a JSON object mapping each candidate key to:
|
|
24
|
+
* Example: {"Some key": {"explanation": "...", "isRelevant": true}}.
|
|
25
|
+
*
|
|
26
|
+
* @param query - The search query or user intent
|
|
27
|
+
* @param candidates - Array of candidates with keys and summaries
|
|
28
|
+
* @returns Array of chat messages ready for LLM consumption
|
|
29
|
+
*/
|
|
30
|
+
export declare function buildFilterMessages(query: string, candidates: IntentCandidate[]): ChatMessage[];
|
|
31
|
+
/**
|
|
32
|
+
* Build system + user messages instructing the model to choose exactly one candidate.
|
|
33
|
+
*
|
|
34
|
+
* The model must output a JSON object of the form:
|
|
35
|
+
* Example: {"explanation": "...", "selectedKey": "Some key"}.
|
|
36
|
+
*
|
|
37
|
+
* @param query - The search query or user intent
|
|
38
|
+
* @param candidates - Array of candidates with keys and summaries
|
|
39
|
+
* @returns Array of chat messages ready for LLM consumption
|
|
40
|
+
*/
|
|
41
|
+
export declare function buildChoiceMessages(query: string, candidates: IntentCandidate[]): ChatMessage[];
|
package/dist/messages.js
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
import { jsonStringify } from "./extractors";
|
|
2
|
+
/**
|
|
3
|
+
* Build system + user messages instructing the model to score candidates.
|
|
4
|
+
*
|
|
5
|
+
* Constructs a two-message conversation:
|
|
6
|
+
* 1. System message: Defines the scoring task, output format, and constraints
|
|
7
|
+
* 2. User message: Contains the query and candidate items as JSON
|
|
8
|
+
*
|
|
9
|
+
* The system prompt emphasizes using the full configured score range and being decisive
|
|
10
|
+
* about relevance, with strict instructions to return only the score mapping.
|
|
11
|
+
*
|
|
12
|
+
* @param query - The search query or user intent
|
|
13
|
+
* @param candidates - Array of candidates with keys and summaries
|
|
14
|
+
* @returns Array of chat messages ready for LLM consumption
|
|
15
|
+
*/
|
|
16
|
+
export function buildMessages(query, candidates, scoreRange) {
|
|
17
|
+
const system = `You will receive a JSON blob containing candidate_search_results (each candidate has a key and a short summary) plus a short user request.
|
|
18
|
+
|
|
19
|
+
Your task is to assess each candidate and return a JSON object that maps candidate keys to objects of the form {"explanation": string, "score": integer} avoiding ambiguity.
|
|
20
|
+
|
|
21
|
+
The score must be an integer from ${scoreRange.minScore} to ${scoreRange.maxScore}:
|
|
22
|
+
- ${scoreRange.minScore} means not relevant at all
|
|
23
|
+
- ${scoreRange.maxScore} means highly relevant
|
|
24
|
+
|
|
25
|
+
Sometimes none are relevant, sometimes all are relevant. Be decisive.
|
|
26
|
+
|
|
27
|
+
It is okay to return ${scoreRange.minScore} if the candidate is not relevant to the query. It is okay to return ${scoreRange.maxScore} if the candidate is highly relevant to the query. Use the full range of scores.
|
|
28
|
+
|
|
29
|
+
Every candidate MUST include an explanation. Write the explanation first, then the score. The explanation should be concise (1-3 sentences), concrete, and reference the query intent and the candidate summary.
|
|
30
|
+
|
|
31
|
+
Write explanations as end-user-facing justifications:
|
|
32
|
+
- Do NOT say "the query" or talk about prompt mechanics.
|
|
33
|
+
- Write in a direct, item-first voice (e.g., "gpt-5.2 is best here because it specializes in feature implementation and testing.").
|
|
34
|
+
- Avoid "I"/"we".
|
|
35
|
+
|
|
36
|
+
Every key in candidate_search_results must be present in your output mapping. Do not add any keys that are not present in candidate_search_results.
|
|
37
|
+
Every key in candidate_search_results must map to an object with:
|
|
38
|
+
- explanation: string
|
|
39
|
+
- score: integer from ${scoreRange.minScore} to ${scoreRange.maxScore}
|
|
40
|
+
Do not, in your generated JSON, include anything other than the \`"{key}": {"explanation": "...", "score": 7}\` mappings. Do not include any other text outside the JSON.
|
|
41
|
+
|
|
42
|
+
Return a JSON object that matches the enforced JSON schema for response formatting. Use the candidate.key as the property name in the output mapping.
|
|
43
|
+
|
|
44
|
+
The JSON you return should be of the form: {
|
|
45
|
+
"Key for document 1": { "explanation": "...", "score": ${scoreRange.minScore} },
|
|
46
|
+
"Key for document 2": { "explanation": "...", "score": ${scoreRange.maxScore} },
|
|
47
|
+
...
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
Pretty-print the JSON for readability.`;
|
|
51
|
+
const payload = {
|
|
52
|
+
query,
|
|
53
|
+
candidate_search_results: candidates.map((c) => ({ key: c.key, summary: c.summary })),
|
|
54
|
+
};
|
|
55
|
+
return [
|
|
56
|
+
{ role: "system", content: system },
|
|
57
|
+
{ role: "user", content: jsonStringify(payload) },
|
|
58
|
+
];
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Build system + user messages instructing the model to filter candidates.
|
|
62
|
+
*
|
|
63
|
+
* The model must output a JSON object mapping each candidate key to:
|
|
64
|
+
* Example: {"Some key": {"explanation": "...", "isRelevant": true}}.
|
|
65
|
+
*
|
|
66
|
+
* @param query - The search query or user intent
|
|
67
|
+
* @param candidates - Array of candidates with keys and summaries
|
|
68
|
+
* @returns Array of chat messages ready for LLM consumption
|
|
69
|
+
*/
|
|
70
|
+
export function buildFilterMessages(query, candidates) {
|
|
71
|
+
const system = `You will receive a JSON blob containing candidate_search_results (each candidate has a key and a short summary) plus a short user request.
|
|
72
|
+
|
|
73
|
+
Your task is to assess each candidate and return a JSON object that maps candidate keys to objects of the form {"explanation": string, "isRelevant": boolean}.
|
|
74
|
+
|
|
75
|
+
Return isRelevant=true only when the candidate clearly helps satisfy the query intent. Otherwise return isRelevant=false.
|
|
76
|
+
|
|
77
|
+
Every candidate MUST include an explanation. Write the explanation first, then the boolean. The explanation should be concise (1-3 sentences), concrete, and reference the query intent and the candidate summary.
|
|
78
|
+
|
|
79
|
+
Write explanations as end-user-facing justifications:
|
|
80
|
+
- Do NOT say "the query" or talk about prompt mechanics.
|
|
81
|
+
- Write in a direct, item-first voice.
|
|
82
|
+
- Avoid "I"/"we".
|
|
83
|
+
|
|
84
|
+
Every key in candidate_search_results must be present in your output mapping. Do not add any keys that are not present in candidate_search_results.
|
|
85
|
+
Every key in candidate_search_results must map to an object with:
|
|
86
|
+
- explanation: string
|
|
87
|
+
- isRelevant: boolean
|
|
88
|
+
Do not include anything other than the mapping JSON object. Return only JSON matching the enforced schema.
|
|
89
|
+
|
|
90
|
+
Pretty-print the JSON for readability.`;
|
|
91
|
+
const payload = {
|
|
92
|
+
query,
|
|
93
|
+
candidate_search_results: candidates.map((c) => ({ key: c.key, summary: c.summary })),
|
|
94
|
+
};
|
|
95
|
+
return [
|
|
96
|
+
{ role: "system", content: system },
|
|
97
|
+
{ role: "user", content: jsonStringify(payload) },
|
|
98
|
+
];
|
|
99
|
+
}
|
|
100
|
+
/**
|
|
101
|
+
* Build system + user messages instructing the model to choose exactly one candidate.
|
|
102
|
+
*
|
|
103
|
+
* The model must output a JSON object of the form:
|
|
104
|
+
* Example: {"explanation": "...", "selectedKey": "Some key"}.
|
|
105
|
+
*
|
|
106
|
+
* @param query - The search query or user intent
|
|
107
|
+
* @param candidates - Array of candidates with keys and summaries
|
|
108
|
+
* @returns Array of chat messages ready for LLM consumption
|
|
109
|
+
*/
|
|
110
|
+
export function buildChoiceMessages(query, candidates) {
|
|
111
|
+
const system = `You will receive a JSON blob containing candidate_search_results (each candidate has a key and a short summary) plus a short user request.
|
|
112
|
+
|
|
113
|
+
Your task is to choose exactly one candidate as the best match for what the user wants.
|
|
114
|
+
|
|
115
|
+
You MUST choose one candidate key from the provided list. Do not choose multiple.
|
|
116
|
+
|
|
117
|
+
Return ONLY JSON of the form: {"explanation": string, "selectedKey": string} where selectedKey is exactly one of the candidate keys. The explanation should be concise (1-3 sentences), concrete, and reference the query intent and the candidate summary.
|
|
118
|
+
|
|
119
|
+
Write the explanation as an end-user-facing justification:
|
|
120
|
+
- Do NOT say "the query" or talk about prompt mechanics.
|
|
121
|
+
- Write in a direct, item-first voice.
|
|
122
|
+
- Avoid "I"/"we".
|
|
123
|
+
|
|
124
|
+
Do not include any other text outside the JSON. Return only JSON matching the enforced schema.
|
|
125
|
+
|
|
126
|
+
Pretty-print the JSON for readability.`;
|
|
127
|
+
const payload = {
|
|
128
|
+
query,
|
|
129
|
+
candidate_search_results: candidates.map((c) => ({ key: c.key, summary: c.summary })),
|
|
130
|
+
};
|
|
131
|
+
return [
|
|
132
|
+
{ role: "system", content: system },
|
|
133
|
+
{ role: "user", content: jsonStringify(payload) },
|
|
134
|
+
];
|
|
135
|
+
}
|
|
136
|
+
//# sourceMappingURL=messages.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"messages.js","sourceRoot":"","sources":["../src/messages.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC;AAI7C;;;;;;;;;;;;;GAaG;AACH,MAAM,UAAU,aAAa,CAC3B,KAAa,EACb,UAA6B,EAC7B,UAAkD;IAElD,MAAM,MAAM,GAAG;;;;oCAImB,UAAU,CAAC,QAAQ,OAAO,UAAU,CAAC,QAAQ;IAC7E,UAAU,CAAC,QAAQ;IACnB,UAAU,CAAC,QAAQ;;;;uBAIA,UAAU,CAAC,QAAQ,wEAAwE,UAAU,CAAC,QAAQ;;;;;;;;;;;;wBAY7G,UAAU,CAAC,QAAQ,OAAO,UAAU,CAAC,QAAQ;;;;;;6DAMR,UAAU,CAAC,QAAQ;6DACnB,UAAU,CAAC,QAAQ;;;;uCAIzC,CAAC;IAEtC,MAAM,OAAO,GAAG;QACd,KAAK;QACL,wBAAwB,EAAE,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,EAAE,GAAG,EAAE,CAAC,CAAC,GAAG,EAAE,OAAO,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;KAC7E,CAAC;IAEX,OAAO;QACL,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,EAAE;QACnC,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,aAAa,CAAC,OAAO,CAAC,EAAE;KAClD,CAAC;AACJ,CAAC;AAED;;;;;;;;;GASG;AACH,MAAM,UAAU,mBAAmB,CAAC,KAAa,EAAE,UAA6B;IAC9E,MAAM,MAAM,GAAG;;;;;;;;;;;;;;;;;;;uCAmBsB,CAAC;IAEtC,MAAM,OAAO,GAAG;QACd,KAAK;QACL,wBAAwB,EAAE,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,EAAE,GAAG,EAAE,CAAC,CAAC,GAAG,EAAE,OAAO,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;KAC7E,CAAC;IAEX,OAAO;QACL,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,EAAE;QACnC,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,aAAa,CAAC,OAAO,CAAC,EAAE;KAClD,CAAC;AACJ,CAAC;AAED;;;;;;;;;GASG;AACH,MAAM,UAAU,mBAAmB,CAAC,KAAa,EAAE,UAA6B;IAC9E,MAAM,MAAM,GAAG;;;;;;;;;;;;;;;uCAesB,CAAC;IAEtC,MAAM,OAAO,GAAG;QACd,KAAK;QACL,wBAAwB,EAAE,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,EAAE,GAAG,EAAE,CAAC,CAAC,GAAG,EAAE,OAAO,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;KAC7E,CAAC;IAEX,OAAO;QACL,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,EAAE;QACnC,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,aAAa,CAAC,OAAO,CAAC,EAAE;KAClD,CAAC;AACJ,CAAC"}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import type { JSONObject, LlmClient } from "../types";
|
|
2
|
+
import type { ChatCompletionMessageParam } from "groq-sdk/resources/chat/completions";
|
|
3
|
+
type GroqTimeoutOptions = {
|
|
4
|
+
timeout?: number;
|
|
5
|
+
};
|
|
6
|
+
type GroqJsonSchemaResponseFormat = {
|
|
7
|
+
type: "json_schema";
|
|
8
|
+
json_schema: {
|
|
9
|
+
name: string;
|
|
10
|
+
schema: JSONObject;
|
|
11
|
+
strict: true;
|
|
12
|
+
};
|
|
13
|
+
};
|
|
14
|
+
type GroqChatCompletionRequest = {
|
|
15
|
+
model: string;
|
|
16
|
+
reasoning_effort: "low" | "medium" | "high";
|
|
17
|
+
messages: ChatCompletionMessageParam[];
|
|
18
|
+
user?: string;
|
|
19
|
+
response_format: GroqJsonSchemaResponseFormat;
|
|
20
|
+
};
|
|
21
|
+
type GroqChatCompletionResponse = {
|
|
22
|
+
choices: Array<{
|
|
23
|
+
message?: {
|
|
24
|
+
content?: string | null;
|
|
25
|
+
};
|
|
26
|
+
}>;
|
|
27
|
+
};
|
|
28
|
+
/**
|
|
29
|
+
* Create a default Groq LLM client with retry logic.
|
|
30
|
+
*
|
|
31
|
+
* Returns an LlmClient implementation that uses the Groq SDK with:
|
|
32
|
+
* - Strict JSON schema enforcement via response_format
|
|
33
|
+
* - Automatic retry on schema validation failures (up to 3 attempts)
|
|
34
|
+
* - Support for custom model, reasoning effort, timeout, and user ID
|
|
35
|
+
*
|
|
36
|
+
* @param apiKey - Groq API key
|
|
37
|
+
* @returns LlmClient implementation for Groq
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* ```typescript
|
|
41
|
+
* import { CONFIG } from "../config";
|
|
42
|
+
* const client = createDefaultGroqClient(CONFIG.GROQ.API_KEY);
|
|
43
|
+
* const result = await client.call(messages, schema, { model: "llama-3.3-70b" });
|
|
44
|
+
* ```
|
|
45
|
+
*/
|
|
46
|
+
export type GroqSdkLike = {
|
|
47
|
+
chat: {
|
|
48
|
+
completions: {
|
|
49
|
+
create: (req: GroqChatCompletionRequest, opts?: GroqTimeoutOptions) => Promise<GroqChatCompletionResponse>;
|
|
50
|
+
};
|
|
51
|
+
};
|
|
52
|
+
};
|
|
53
|
+
/**
|
|
54
|
+
* Create a GroqSdkLike wrapper around groq-sdk.
|
|
55
|
+
*
|
|
56
|
+
* This keeps our provider surface strongly typed while isolating groq-sdk's
|
|
57
|
+
* broader request/response types to a single boundary.
|
|
58
|
+
*
|
|
59
|
+
* @param apiKey - Groq API key
|
|
60
|
+
* @returns GroqSdkLike wrapper
|
|
61
|
+
* @private
|
|
62
|
+
*/
|
|
63
|
+
export declare function createGroqSdkLike(apiKey: string): GroqSdkLike;
|
|
64
|
+
/**
|
|
65
|
+
* Create the underlying Groq SDK client.
|
|
66
|
+
*
|
|
67
|
+
* This wrapper exists to make the default SDK construction path unit-testable.
|
|
68
|
+
*
|
|
69
|
+
* @param options - Groq SDK constructor options
|
|
70
|
+
* @returns Groq SDK client
|
|
71
|
+
* @private
|
|
72
|
+
*/
|
|
73
|
+
export declare function createGroqSdk(options: {
|
|
74
|
+
apiKey: string;
|
|
75
|
+
}): unknown;
|
|
76
|
+
export declare function createDefaultGroqClient(apiKey: string, options?: {
|
|
77
|
+
defaults?: {
|
|
78
|
+
model?: string;
|
|
79
|
+
reasoningEffort?: "low" | "medium" | "high";
|
|
80
|
+
};
|
|
81
|
+
makeSdk?: (apiKey: string) => GroqSdkLike;
|
|
82
|
+
jsonRepairAttempts?: number;
|
|
83
|
+
}): LlmClient;
|
|
84
|
+
export {};
|