@riotprompt/riotprompt 0.0.13 → 0.0.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +18 -15
- package/dist/chat.d.ts +2 -0
- package/dist/chat.js +2 -0
- package/dist/chat.js.map +1 -1
- package/dist/cli.cjs +118 -20
- package/dist/execution/anthropic.js +27 -3
- package/dist/execution/anthropic.js.map +1 -1
- package/dist/execution/gemini.js +45 -1
- package/dist/execution/gemini.js.map +1 -1
- package/dist/execution/openai.js +2 -1
- package/dist/execution/openai.js.map +1 -1
- package/dist/formatter.js +42 -14
- package/dist/formatter.js.map +1 -1
- package/dist/prompt.d.ts +19 -1
- package/dist/prompt.js +11 -2
- package/dist/prompt.js.map +1 -1
- package/dist/recipes.d.ts +108 -0
- package/dist/recipes.js +195 -30
- package/dist/recipes.js.map +1 -1
- package/dist/riotprompt.cjs +323 -51
- package/dist/riotprompt.cjs.map +1 -1
- package/guide/architecture.md +33 -22
- package/guide/index.md +25 -25
- package/guide/usage.md +96 -39
- package/package.json +3 -2
package/README.md
CHANGED
|
@@ -4,10 +4,14 @@ A powerful, flexible prompt building library and CLI tool for AI applications wi
|
|
|
4
4
|
|
|
5
5
|
## Features
|
|
6
6
|
|
|
7
|
-
- **Structured Prompts**: Treat prompts as code with sections for Persona, Instructions, and
|
|
7
|
+
- **Structured Prompts**: Treat prompts as code with specialized sections for Persona, Instructions, Context, and more.
|
|
8
|
+
- **Advanced Prompt Strategies**: First-class support for **Constraints**, **Tone**, **Few-Shot Examples**, **Reasoning Steps**, **Response Format**, and **Safeguards**.
|
|
9
|
+
- **Model Alignment**: Automatically adapts prompt structure to match the specific expectations of each model provider:
|
|
10
|
+
- **Anthropic (Claude)**: Places Personas, Roles, Tone, and Constraints into the `system` parameter. Additionally, converts `schema` definitions into forced **Tool Use** calls, extracting structured results to match OpenAI's output format.
|
|
11
|
+
- **OpenAI**: Maps generic roles to the appropriate `system` or `developer` (for O-series) messages.
|
|
12
|
+
- **Gemini**: Structurally adapts components into System Instructions and content parts. For structured outputs, it automatically transforms JSON schemas into Gemini's `responseSchema` format, ensuring strict adherence to the defined structure.
|
|
8
13
|
- **CLI Tool**: Scaffold, manage, process, and **execute** prompts directly from the terminal.
|
|
9
14
|
- **Model Agnostic**: Format prompts for different models (GPT-4, Claude, Gemini, etc.) automatically.
|
|
10
|
-
- **Model Families**: Intelligent handling of model-specific quirks (e.g., 'developer' vs 'system' roles for O-series vs GPT-4).
|
|
11
15
|
- **Execution Engine**: Run prompts against OpenAI, Anthropic, or Gemini APIs directly.
|
|
12
16
|
- **Portable**: Serialize prompts to JSON or XML for easy exchange between systems.
|
|
13
17
|
- **Type-Safe**: Full TypeScript support with excellent IntelliSense.
|
|
@@ -120,21 +124,19 @@ You can also use RiotPrompt programmatically in your application.
|
|
|
120
124
|
```typescript
|
|
121
125
|
import { cook, registerTemplates } from 'riotprompt';
|
|
122
126
|
|
|
123
|
-
//
|
|
127
|
+
// Advanced prompt creation
|
|
128
|
+
import { z } from "zod";
|
|
129
|
+
|
|
124
130
|
const prompt = await cook({
|
|
125
131
|
basePath: __dirname,
|
|
126
132
|
persona: { content: 'You are a helpful AI assistant' },
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
],
|
|
135
|
-
context: [
|
|
136
|
-
{ content: 'Additional context', title: 'Context' },
|
|
137
|
-
],
|
|
133
|
+
// ...
|
|
134
|
+
// Structured Output with Zod
|
|
135
|
+
schema: z.object({
|
|
136
|
+
summary: z.string(),
|
|
137
|
+
tags: z.array(z.string()),
|
|
138
|
+
confidence: z.number().min(0).max(1)
|
|
139
|
+
})
|
|
138
140
|
});
|
|
139
141
|
|
|
140
142
|
// Register and use templates
|
|
@@ -154,8 +156,9 @@ const analysisPrompt = await cook({
|
|
|
154
156
|
|
|
155
157
|
## Documentation
|
|
156
158
|
|
|
157
|
-
|
|
159
|
+
Full documentation is available at [https://tobrien.github.io/riotprompt/](https://tobrien.github.io/riotprompt/).
|
|
158
160
|
|
|
161
|
+
You can also explore the guides in the source:
|
|
159
162
|
- [Core Concepts](docs/public/core-concepts.md)
|
|
160
163
|
- [Recipes System](docs/public/recipes.md)
|
|
161
164
|
- [API Reference](docs/public/api-reference.md)
|
package/dist/chat.d.ts
CHANGED
package/dist/chat.js
CHANGED
package/dist/chat.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"chat.js","sources":["../src/chat.ts"],"sourcesContent":["import { getPersonaRole as getPersonaRoleFromRegistry } from \"./model-config\";\n\nexport type Role = \"user\" | \"assistant\" | \"system\" | \"developer\";\n\n// Model is now a flexible string type\nexport type Model = string;\n\nexport interface Message {\n role: Role;\n content: string | string[];\n name?: string;\n}\n\nexport interface Request {\n messages: Message[];\n model: Model;\n\n addMessage(message: Message): void;\n}\n\nexport const getPersonaRole = (model: Model): Role => {\n return getPersonaRoleFromRegistry(model);\n}\n\nexport const createRequest = (model: Model): Request => {\n const messages: Message[] = [];\n\n return {\n model,\n messages,\n addMessage: (message: Message) => {\n messages.push(message);\n }\n }\n}\n"],"names":["getPersonaRole","model","getPersonaRoleFromRegistry","createRequest","messages","addMessage","message","push"],"mappings":";;
|
|
1
|
+
{"version":3,"file":"chat.js","sources":["../src/chat.ts"],"sourcesContent":["import { getPersonaRole as getPersonaRoleFromRegistry } from \"./model-config\";\n\nexport type Role = \"user\" | \"assistant\" | \"system\" | \"developer\";\n\n// Model is now a flexible string type\nexport type Model = string;\n\nexport interface Message {\n role: Role;\n content: string | string[];\n name?: string;\n}\n\nexport interface Request {\n messages: Message[];\n model: Model;\n responseFormat?: any; // Generic to support different provider formats (JSON schema, etc.)\n validator?: any; // Zod schema for validation\n\n addMessage(message: Message): void;\n}\n\nexport const getPersonaRole = (model: Model): Role => {\n return getPersonaRoleFromRegistry(model);\n}\n\nexport const createRequest = (model: Model): Request => {\n const messages: Message[] = [];\n\n return {\n model,\n messages,\n responseFormat: undefined,\n validator: undefined,\n addMessage: (message: Message) => {\n messages.push(message);\n }\n }\n}\n"],"names":["getPersonaRole","model","getPersonaRoleFromRegistry","createRequest","messages","responseFormat","undefined","validator","addMessage","message","push"],"mappings":";;AAsBO,MAAMA,iBAAiB,CAACC,KAAAA,GAAAA;AAC3B,IAAA,OAAOC,gBAAAA,CAA2BD,KAAAA,CAAAA;AACtC;AAEO,MAAME,gBAAgB,CAACF,KAAAA,GAAAA;AAC1B,IAAA,MAAMG,WAAsB,EAAE;IAE9B,OAAO;AACHH,QAAAA,KAAAA;AACAG,QAAAA,QAAAA;QACAC,cAAAA,EAAgBC,SAAAA;QAChBC,SAAAA,EAAWD,SAAAA;AACXE,QAAAA,UAAAA,EAAY,CAACC,OAAAA,GAAAA;AACTL,YAAAA,QAAAA,CAASM,IAAI,CAACD,OAAAA,CAAAA;AAClB,QAAA;AACJ,KAAA;AACJ;;;;"}
|
package/dist/cli.cjs
CHANGED
|
@@ -7,6 +7,7 @@ const cardigantime = require("@theunwalked/cardigantime");
|
|
|
7
7
|
const zod = require("zod");
|
|
8
8
|
const fs$1 = require("fs/promises");
|
|
9
9
|
const path = require("path");
|
|
10
|
+
require("zod-to-json-schema");
|
|
10
11
|
const crypto = require("crypto");
|
|
11
12
|
require("tiktoken");
|
|
12
13
|
const fastXmlParser = require("fast-xml-parser");
|
|
@@ -193,13 +194,31 @@ const create$3 = ({
|
|
|
193
194
|
persona,
|
|
194
195
|
instructions,
|
|
195
196
|
contents,
|
|
196
|
-
contexts
|
|
197
|
+
contexts,
|
|
198
|
+
constraints,
|
|
199
|
+
tone,
|
|
200
|
+
examples,
|
|
201
|
+
reasoning,
|
|
202
|
+
responseFormat,
|
|
203
|
+
recap,
|
|
204
|
+
safeguards,
|
|
205
|
+
schema,
|
|
206
|
+
validator
|
|
197
207
|
}) => {
|
|
198
208
|
return {
|
|
199
209
|
persona,
|
|
200
210
|
instructions,
|
|
201
211
|
contents,
|
|
202
|
-
contexts
|
|
212
|
+
contexts,
|
|
213
|
+
constraints,
|
|
214
|
+
tone,
|
|
215
|
+
examples,
|
|
216
|
+
reasoning,
|
|
217
|
+
responseFormat,
|
|
218
|
+
recap,
|
|
219
|
+
safeguards,
|
|
220
|
+
schema,
|
|
221
|
+
validator
|
|
203
222
|
};
|
|
204
223
|
};
|
|
205
224
|
const LIBRARY_NAME = "riotprompt";
|
|
@@ -415,6 +434,8 @@ const createRequest = (model) => {
|
|
|
415
434
|
return {
|
|
416
435
|
model,
|
|
417
436
|
messages,
|
|
437
|
+
responseFormat: void 0,
|
|
438
|
+
validator: void 0,
|
|
418
439
|
addMessage: (message) => {
|
|
419
440
|
messages.push(message);
|
|
420
441
|
}
|
|
@@ -552,22 +573,46 @@ ${formattedItems}`;
|
|
|
552
573
|
const formatPrompt = (model, prompt) => {
|
|
553
574
|
logger.silly("Formatting prompt");
|
|
554
575
|
const chatRequest = createRequest(model);
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
576
|
+
const systemSections = [];
|
|
577
|
+
if (prompt.persona) systemSections.push(prompt.persona);
|
|
578
|
+
if (prompt.tone) systemSections.push(prompt.tone);
|
|
579
|
+
if (prompt.constraints) systemSections.push(prompt.constraints);
|
|
580
|
+
if (prompt.safeguards) systemSections.push(prompt.safeguards);
|
|
581
|
+
if (prompt.responseFormat) systemSections.push(prompt.responseFormat);
|
|
582
|
+
if (systemSections.length > 0) {
|
|
583
|
+
const systemContent = systemSections.map((section) => formatSection(section)).join("\n\n");
|
|
584
|
+
chatRequest.addMessage({
|
|
585
|
+
role: getPersonaRole(model),
|
|
586
|
+
content: systemContent
|
|
558
587
|
});
|
|
559
588
|
}
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
589
|
+
const userSections = [
|
|
590
|
+
prompt.contexts,
|
|
591
|
+
prompt.examples,
|
|
592
|
+
prompt.instructions,
|
|
593
|
+
prompt.contents,
|
|
594
|
+
prompt.reasoning,
|
|
595
|
+
prompt.recap
|
|
596
|
+
];
|
|
597
|
+
let formattedUserContent = "";
|
|
598
|
+
for (const section of userSections) {
|
|
599
|
+
if (section) {
|
|
600
|
+
formattedUserContent += formatSection(section) + "\n\n";
|
|
601
|
+
}
|
|
563
602
|
}
|
|
564
|
-
if (
|
|
565
|
-
|
|
603
|
+
if (formattedUserContent.trim().length > 0 || systemSections.length === 0) {
|
|
604
|
+
chatRequest.addMessage({
|
|
605
|
+
role: "user",
|
|
606
|
+
content: formattedUserContent.trim() || " "
|
|
607
|
+
// Empty user message if needed (though usually not ideal)
|
|
608
|
+
});
|
|
609
|
+
}
|
|
610
|
+
if (prompt.schema) {
|
|
611
|
+
chatRequest.responseFormat = prompt.schema;
|
|
612
|
+
}
|
|
613
|
+
if (prompt.validator) {
|
|
614
|
+
chatRequest.validator = prompt.validator;
|
|
566
615
|
}
|
|
567
|
-
chatRequest.addMessage({
|
|
568
|
-
role: "user",
|
|
569
|
-
content: formattedAreas
|
|
570
|
-
});
|
|
571
616
|
return chatRequest;
|
|
572
617
|
};
|
|
573
618
|
return {
|
|
@@ -872,6 +917,16 @@ zod.z.object({
|
|
|
872
917
|
instructions: zod.z.array(ContentItemSchema).optional().default([]),
|
|
873
918
|
content: zod.z.array(ContentItemSchema).optional().default([]),
|
|
874
919
|
context: zod.z.array(ContentItemSchema).optional().default([]),
|
|
920
|
+
// Advanced prompting sections
|
|
921
|
+
constraints: zod.z.array(ContentItemSchema).optional().default([]),
|
|
922
|
+
tone: zod.z.array(ContentItemSchema).optional().default([]),
|
|
923
|
+
examples: zod.z.array(ContentItemSchema).optional().default([]),
|
|
924
|
+
reasoning: zod.z.array(ContentItemSchema).optional().default([]),
|
|
925
|
+
responseFormat: zod.z.array(ContentItemSchema).optional().default([]),
|
|
926
|
+
recap: zod.z.array(ContentItemSchema).optional().default([]),
|
|
927
|
+
safeguards: zod.z.array(ContentItemSchema).optional().default([]),
|
|
928
|
+
schema: zod.z.any().optional(),
|
|
929
|
+
// Can be string path, JSON object, or Zod schema
|
|
875
930
|
// Templates and inheritance
|
|
876
931
|
extends: zod.z.string().optional(),
|
|
877
932
|
// Extend another recipe
|
|
@@ -1132,7 +1187,8 @@ class OpenAIProvider {
|
|
|
1132
1187
|
model,
|
|
1133
1188
|
messages,
|
|
1134
1189
|
temperature: options.temperature,
|
|
1135
|
-
max_tokens: options.maxTokens
|
|
1190
|
+
max_tokens: options.maxTokens,
|
|
1191
|
+
response_format: request.responseFormat
|
|
1136
1192
|
});
|
|
1137
1193
|
const choice = response.choices[0];
|
|
1138
1194
|
return {
|
|
@@ -1168,11 +1224,26 @@ class AnthropicProvider {
|
|
|
1168
1224
|
system: systemPrompt.trim() || void 0,
|
|
1169
1225
|
messages,
|
|
1170
1226
|
max_tokens: options.maxTokens || 4096,
|
|
1171
|
-
|
|
1172
|
-
|
|
1227
|
+
temperature: options.temperature,
|
|
1228
|
+
...request.responseFormat?.type === "json_schema" ? {
|
|
1229
|
+
tools: [{
|
|
1230
|
+
name: request.responseFormat.json_schema.name,
|
|
1231
|
+
description: request.responseFormat.json_schema.description || "Output data in this structured format",
|
|
1232
|
+
input_schema: request.responseFormat.json_schema.schema
|
|
1233
|
+
}],
|
|
1234
|
+
tool_choice: { type: "tool", name: request.responseFormat.json_schema.name }
|
|
1235
|
+
} : {}
|
|
1173
1236
|
});
|
|
1174
|
-
|
|
1175
|
-
|
|
1237
|
+
let text = "";
|
|
1238
|
+
if (request.responseFormat?.type === "json_schema") {
|
|
1239
|
+
const toolUseBlock = response.content.find((block) => block.type === "tool_use");
|
|
1240
|
+
if (toolUseBlock && toolUseBlock.type === "tool_use") {
|
|
1241
|
+
text = JSON.stringify(toolUseBlock.input, null, 2);
|
|
1242
|
+
}
|
|
1243
|
+
} else {
|
|
1244
|
+
const contentBlock = response.content[0];
|
|
1245
|
+
text = contentBlock.type === "text" ? contentBlock.text : "";
|
|
1246
|
+
}
|
|
1176
1247
|
return {
|
|
1177
1248
|
content: text,
|
|
1178
1249
|
model: response.model,
|
|
@@ -1189,6 +1260,32 @@ class GeminiProvider {
|
|
|
1189
1260
|
if (!apiKey) throw new Error("Gemini API key is required");
|
|
1190
1261
|
const genAI = new generativeAi.GoogleGenerativeAI(apiKey);
|
|
1191
1262
|
const modelName = options.model || request.model || "gemini-1.5-pro";
|
|
1263
|
+
const generationConfig = {};
|
|
1264
|
+
if (request.responseFormat?.type === "json_schema") {
|
|
1265
|
+
generationConfig.responseMimeType = "application/json";
|
|
1266
|
+
const openAISchema = request.responseFormat.json_schema.schema;
|
|
1267
|
+
const mapSchema = (s) => {
|
|
1268
|
+
if (!s) return void 0;
|
|
1269
|
+
const newSchema = { ...s };
|
|
1270
|
+
if (newSchema.type) {
|
|
1271
|
+
newSchema.type = typeof newSchema.type === "string" ? newSchema.type.toUpperCase() : newSchema.type;
|
|
1272
|
+
}
|
|
1273
|
+
if (newSchema.properties) {
|
|
1274
|
+
const newProps = {};
|
|
1275
|
+
for (const [key, val] of Object.entries(newSchema.properties)) {
|
|
1276
|
+
newProps[key] = mapSchema(val);
|
|
1277
|
+
}
|
|
1278
|
+
newSchema.properties = newProps;
|
|
1279
|
+
}
|
|
1280
|
+
if (newSchema.items) {
|
|
1281
|
+
newSchema.items = mapSchema(newSchema.items);
|
|
1282
|
+
}
|
|
1283
|
+
delete newSchema.additionalProperties;
|
|
1284
|
+
delete newSchema["$schema"];
|
|
1285
|
+
return newSchema;
|
|
1286
|
+
};
|
|
1287
|
+
generationConfig.responseSchema = mapSchema(openAISchema);
|
|
1288
|
+
}
|
|
1192
1289
|
let systemInstruction = "";
|
|
1193
1290
|
for (const msg of request.messages) {
|
|
1194
1291
|
if (msg.role === "system" || msg.role === "developer") {
|
|
@@ -1197,7 +1294,8 @@ class GeminiProvider {
|
|
|
1197
1294
|
}
|
|
1198
1295
|
const configuredModel = genAI.getGenerativeModel({
|
|
1199
1296
|
model: modelName,
|
|
1200
|
-
systemInstruction: systemInstruction ? systemInstruction.trim() : void 0
|
|
1297
|
+
systemInstruction: systemInstruction ? systemInstruction.trim() : void 0,
|
|
1298
|
+
generationConfig
|
|
1201
1299
|
});
|
|
1202
1300
|
const chatHistory = [];
|
|
1203
1301
|
let lastUserMessage = "";
|
|
@@ -2,6 +2,7 @@ import Anthropic from '@anthropic-ai/sdk';
|
|
|
2
2
|
|
|
3
3
|
class AnthropicProvider {
|
|
4
4
|
async execute(request, options = {}) {
|
|
5
|
+
var _request_responseFormat, _request_responseFormat1;
|
|
5
6
|
const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY;
|
|
6
7
|
if (!apiKey) throw new Error('Anthropic API key is required');
|
|
7
8
|
const client = new Anthropic({
|
|
@@ -26,11 +27,34 @@ class AnthropicProvider {
|
|
|
26
27
|
system: systemPrompt.trim() || undefined,
|
|
27
28
|
messages: messages,
|
|
28
29
|
max_tokens: options.maxTokens || 4096,
|
|
29
|
-
temperature: options.temperature
|
|
30
|
+
temperature: options.temperature,
|
|
31
|
+
...((_request_responseFormat = request.responseFormat) === null || _request_responseFormat === void 0 ? void 0 : _request_responseFormat.type) === 'json_schema' ? {
|
|
32
|
+
tools: [
|
|
33
|
+
{
|
|
34
|
+
name: request.responseFormat.json_schema.name,
|
|
35
|
+
description: request.responseFormat.json_schema.description || "Output data in this structured format",
|
|
36
|
+
input_schema: request.responseFormat.json_schema.schema
|
|
37
|
+
}
|
|
38
|
+
],
|
|
39
|
+
tool_choice: {
|
|
40
|
+
type: 'tool',
|
|
41
|
+
name: request.responseFormat.json_schema.name
|
|
42
|
+
}
|
|
43
|
+
} : {}
|
|
30
44
|
});
|
|
31
45
|
// Handle ContentBlock
|
|
32
|
-
|
|
33
|
-
|
|
46
|
+
// Check for tool_use first if we requested structured output
|
|
47
|
+
let text = '';
|
|
48
|
+
if (((_request_responseFormat1 = request.responseFormat) === null || _request_responseFormat1 === void 0 ? void 0 : _request_responseFormat1.type) === 'json_schema') {
|
|
49
|
+
const toolUseBlock = response.content.find((block)=>block.type === 'tool_use');
|
|
50
|
+
if (toolUseBlock && toolUseBlock.type === 'tool_use') {
|
|
51
|
+
// Return the structured data as a JSON string to match OpenAI behavior
|
|
52
|
+
text = JSON.stringify(toolUseBlock.input, null, 2);
|
|
53
|
+
}
|
|
54
|
+
} else {
|
|
55
|
+
const contentBlock = response.content[0];
|
|
56
|
+
text = contentBlock.type === 'text' ? contentBlock.text : '';
|
|
57
|
+
}
|
|
34
58
|
return {
|
|
35
59
|
content: text,
|
|
36
60
|
model: response.model,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"anthropic.js","sources":["../../src/execution/anthropic.ts"],"sourcesContent":["import Anthropic from '@anthropic-ai/sdk';\nimport { Provider, ProviderResponse, ExecutionOptions } from './provider';\nimport { Request } from '../chat';\n\nexport class AnthropicProvider implements Provider {\n async execute(request: Request, options: ExecutionOptions = {}): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY;\n if (!apiKey) throw new Error('Anthropic API key is required');\n\n const client = new Anthropic({ apiKey });\n \n const model = options.model || request.model || 'claude-3-opus-20240229';\n\n // Anthropic separates system prompt from messages\n let systemPrompt = '';\n const messages: Anthropic.MessageParam[] = [];\n\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') {\n systemPrompt += (typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)) + '\\n\\n';\n } else {\n messages.push({\n role: msg.role as 'user' | 'assistant',\n content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)\n });\n }\n }\n\n const response = await client.messages.create({\n model: model,\n system: systemPrompt.trim() || undefined,\n messages: messages,\n max_tokens: options.maxTokens || 4096
|
|
1
|
+
{"version":3,"file":"anthropic.js","sources":["../../src/execution/anthropic.ts"],"sourcesContent":["import Anthropic from '@anthropic-ai/sdk';\nimport { Provider, ProviderResponse, ExecutionOptions } from './provider';\nimport { Request } from '../chat';\n\nexport class AnthropicProvider implements Provider {\n async execute(request: Request, options: ExecutionOptions = {}): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY;\n if (!apiKey) throw new Error('Anthropic API key is required');\n\n const client = new Anthropic({ apiKey });\n \n const model = options.model || request.model || 'claude-3-opus-20240229';\n\n // Anthropic separates system prompt from messages\n let systemPrompt = '';\n const messages: Anthropic.MessageParam[] = [];\n\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') {\n systemPrompt += (typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)) + '\\n\\n';\n } else {\n messages.push({\n role: msg.role as 'user' | 'assistant',\n content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)\n });\n }\n }\n\n const response = await client.messages.create({\n model: model,\n system: systemPrompt.trim() || undefined,\n messages: messages,\n max_tokens: options.maxTokens || 4096,\n temperature: options.temperature,\n ...(request.responseFormat?.type === 'json_schema' ? {\n tools: [{\n name: request.responseFormat.json_schema.name,\n description: request.responseFormat.json_schema.description || \"Output data in this structured format\",\n input_schema: request.responseFormat.json_schema.schema\n }],\n tool_choice: { type: 'tool', name: request.responseFormat.json_schema.name }\n } : {})\n });\n\n // Handle ContentBlock\n // Check for tool_use first if we requested structured output\n let text = '';\n \n if (request.responseFormat?.type === 'json_schema') {\n const toolUseBlock = response.content.find(block => block.type === 'tool_use');\n if (toolUseBlock && toolUseBlock.type === 'tool_use') {\n // Return the structured data as a JSON string to match OpenAI behavior\n text = JSON.stringify(toolUseBlock.input, null, 2);\n }\n } else {\n const contentBlock = response.content[0];\n text = contentBlock.type === 'text' ? contentBlock.text : '';\n }\n\n return {\n content: text,\n model: response.model,\n usage: {\n inputTokens: response.usage.input_tokens,\n outputTokens: response.usage.output_tokens\n }\n };\n }\n}\n\n"],"names":["AnthropicProvider","execute","request","options","apiKey","process","env","ANTHROPIC_API_KEY","Error","client","Anthropic","model","systemPrompt","messages","msg","role","content","JSON","stringify","push","response","create","system","trim","undefined","max_tokens","maxTokens","temperature","responseFormat","type","tools","name","json_schema","description","input_schema","schema","tool_choice","text","toolUseBlock","find","block","input","contentBlock","usage","inputTokens","input_tokens","outputTokens","output_tokens"],"mappings":";;AAIO,MAAMA,iBAAAA,CAAAA;AACT,IAAA,MAAMC,QAAQC,OAAgB,EAAEC,OAAAA,GAA4B,EAAE,EAA6B;YA6B/ED,uBAAAA,EAcJA,wBAAAA;AA1CJ,QAAA,MAAME,SAASD,OAAAA,CAAQC,MAAM,IAAIC,OAAAA,CAAQC,GAAG,CAACC,iBAAiB;AAC9D,QAAA,IAAI,CAACH,MAAAA,EAAQ,MAAM,IAAII,KAAAA,CAAM,+BAAA,CAAA;QAE7B,MAAMC,MAAAA,GAAS,IAAIC,SAAAA,CAAU;AAAEN,YAAAA;AAAO,SAAA,CAAA;AAEtC,QAAA,MAAMO,QAAQR,OAAAA,CAAQQ,KAAK,IAAIT,OAAAA,CAAQS,KAAK,IAAI,wBAAA;;AAGhD,QAAA,IAAIC,YAAAA,GAAe,EAAA;AACnB,QAAA,MAAMC,WAAqC,EAAE;AAE7C,QAAA,KAAK,MAAMC,GAAAA,IAAOZ,OAAAA,CAAQW,QAAQ,CAAE;AAChC,YAAA,IAAIC,IAAIC,IAAI,KAAK,YAAYD,GAAAA,CAAIC,IAAI,KAAK,WAAA,EAAa;AACnDH,gBAAAA,YAAAA,IAAgB,CAAC,OAAOE,GAAAA,CAAIE,OAAO,KAAK,QAAA,GAAWF,GAAAA,CAAIE,OAAO,GAAGC,KAAKC,SAAS,CAACJ,GAAAA,CAAIE,OAAO,CAAA,IAAK,MAAA;YACpG,CAAA,MAAO;AACHH,gBAAAA,QAAAA,CAASM,IAAI,CAAC;AACVJ,oBAAAA,IAAAA,EAAMD,IAAIC,IAAI;AACdC,oBAAAA,OAAAA,EAAS,OAAOF,GAAAA,CAAIE,OAAO,KAAK,QAAA,GAAWF,GAAAA,CAAIE,OAAO,GAAGC,IAAAA,CAAKC,SAAS,CAACJ,GAAAA,CAAIE,OAAO;AACvF,iBAAA,CAAA;AACJ,YAAA;AACJ,QAAA;AAEA,QAAA,MAAMI,WAAW,MAAMX,MAAAA,CAAOI,QAAQ,CAACQ,MAAM,CAAC;YAC1CV,KAAAA,EAAOA,KAAAA;YACPW,MAAAA,EAAQV,YAAAA,CAAaW,IAAI,EAAA,IAAMC,SAAAA;YAC/BX,QAAAA,EAAUA,QAAAA;YACVY,UAAAA,EAAYtB,OAAAA,CAAQuB,SAAS,IAAI,IAAA;AACjCC,YAAAA,WAAAA,EAAaxB,QAAQwB,WAAW;YAChC,GAAIzB,CAAAA,CAAAA,0BAAAA,OAAAA,CAAQ0B,cAAc,cAAtB1B,uBAAAA,KAAAA,MAAAA,GAAAA,MAAAA,GAAAA,uBAAAA,CAAwB2B,IAAI,MAAK,aAAA,GAAgB;gBACjDC,KAAAA,EAAO;AAAC,oBAAA;AACJC,wBAAAA,IAAAA,EAAM7B,OAAAA,CAAQ0B,cAAc,CAACI,WAAW,CAACD,IAAI;AAC7CE,wBAAAA,WAAAA,EAAa/B,QAAQ0B,cAAc,CAACI,WAAW,CAACC,WAAW,IAAI,uCAAA;AAC/DC,wBAAAA,YAAAA,EAAchC,OAAAA,CAAQ0B,cAAc,CAACI,WAAW,CAACG;AACrD;AAAE,iBAAA;gBACFC,WAAAA,EAAa;oBAAEP,IAAAA,EAAM,MAAA;AAAQE,oBAAAA,IAAAA,EAAM7B,OAAAA,CAAQ0B,cAAc,CAACI,WAAW,CAACD;AAAK;AAC/E,aAAA,GAAI;AACR,SAAA,CAAA;;;AAIA,QAAA,IAAIM,IAAAA,GAAO,EAAA;QAEX,IAAInC,CAAAA,CAAAA,2BAAAA,OAAAA,CAAQ0B,cAAc,cAAtB1B,wBAAAA,KAAAA,MAAAA,GAAAA,MAAAA,GAAAA,wBAAAA,CAAwB2B,IAAI,MAAK,aAAA,EAAe;YAChD,MAAMS,YAAAA,GAAelB,QAAAA,CAASJ,OAAO,CAACuB,IAAI,CAACC,CAAAA,KAAAA,GAASA,KAAAA,CAAMX,IAAI,KAAK,UAAA,CAAA;AACnE,YAAA,IAAIS,YAAAA,IAAgBA,YAAAA,CAAaT,IAAI,KAAK,UAAA,EAAY;;AAElDQ,gBAAAA,IAAAA,GAAOpB,KAAKC,SAAS,CAACoB,YAAAA,CAAaG,KAAK,EAAE,IAAA,EAAM,CAAA,CAAA;AACpD,YAAA;QACJ,CAAA,MAAO;AACH,YAAA,MAAMC,YAAAA,GAAetB,QAAAA,CAASJ,OAAO,CAAC,CAAA,CAAE;AACxCqB,YAAAA,IAAAA,GAAOK,aAAab,IAAI,KAAK,MAAA,GAASa,YAAAA,CAAaL,IAAI,GAAG,EAAA;AAC9D,QAAA;QAEA,OAAO;YACHrB,OAAAA,EAASqB,IAAAA;AACT1B,YAAAA,KAAAA,EAAOS,SAAST,KAAK;YACrBgC,KAAAA,EAAO;gBACHC,WAAAA,EAAaxB,QAAAA,CAASuB,KAAK,CAACE,YAAY;gBACxCC,YAAAA,EAAc1B,QAAAA,CAASuB,KAAK,CAACI;AACjC;AACJ,SAAA;AACJ,IAAA;AACJ;;;;"}
|
package/dist/execution/gemini.js
CHANGED
|
@@ -2,10 +2,53 @@ import { GoogleGenerativeAI } from '@google/generative-ai';
|
|
|
2
2
|
|
|
3
3
|
class GeminiProvider {
|
|
4
4
|
async execute(request, options = {}) {
|
|
5
|
+
var _request_responseFormat;
|
|
5
6
|
const apiKey = options.apiKey || process.env.GEMINI_API_KEY; // or GOOGLE_API_KEY
|
|
6
7
|
if (!apiKey) throw new Error('Gemini API key is required');
|
|
7
8
|
const genAI = new GoogleGenerativeAI(apiKey);
|
|
8
9
|
const modelName = options.model || request.model || 'gemini-1.5-pro';
|
|
10
|
+
// Handle generation config for structured output
|
|
11
|
+
const generationConfig = {};
|
|
12
|
+
if (((_request_responseFormat = request.responseFormat) === null || _request_responseFormat === void 0 ? void 0 : _request_responseFormat.type) === 'json_schema') {
|
|
13
|
+
generationConfig.responseMimeType = "application/json";
|
|
14
|
+
// Map OpenAI JSON schema to Gemini Schema
|
|
15
|
+
// OpenAI: { name: "...", schema: { type: "object", properties: ... } }
|
|
16
|
+
// Gemini expects the schema object directly
|
|
17
|
+
const openAISchema = request.responseFormat.json_schema.schema;
|
|
18
|
+
// We need to recursively map the types because Gemini uses uppercase enums
|
|
19
|
+
// SchemaType.OBJECT, SchemaType.STRING, etc.
|
|
20
|
+
// But the SDK also accepts string types "OBJECT", "STRING" etc.
|
|
21
|
+
// Let's implement a simple converter or pass it if compatible.
|
|
22
|
+
// Zod-to-json-schema produces lowercase types ("object", "string").
|
|
23
|
+
// Google's SDK might need them to be uppercase or mapped.
|
|
24
|
+
// Helper to clean up schema for Gemini
|
|
25
|
+
// Removes $schema, strict, and additionalProperties if not supported or formatted differently
|
|
26
|
+
// And maps 'type' to uppercase.
|
|
27
|
+
const mapSchema = (s)=>{
|
|
28
|
+
if (!s) return undefined;
|
|
29
|
+
const newSchema = {
|
|
30
|
+
...s
|
|
31
|
+
};
|
|
32
|
+
if (newSchema.type) {
|
|
33
|
+
newSchema.type = typeof newSchema.type === 'string' ? newSchema.type.toUpperCase() : newSchema.type;
|
|
34
|
+
}
|
|
35
|
+
if (newSchema.properties) {
|
|
36
|
+
const newProps = {};
|
|
37
|
+
for (const [key, val] of Object.entries(newSchema.properties)){
|
|
38
|
+
newProps[key] = mapSchema(val);
|
|
39
|
+
}
|
|
40
|
+
newSchema.properties = newProps;
|
|
41
|
+
}
|
|
42
|
+
if (newSchema.items) {
|
|
43
|
+
newSchema.items = mapSchema(newSchema.items);
|
|
44
|
+
}
|
|
45
|
+
// Remove unsupported OpenAI-specific fields if Gemini complains
|
|
46
|
+
delete newSchema.additionalProperties;
|
|
47
|
+
delete newSchema['$schema'];
|
|
48
|
+
return newSchema;
|
|
49
|
+
};
|
|
50
|
+
generationConfig.responseSchema = mapSchema(openAISchema);
|
|
51
|
+
}
|
|
9
52
|
// Gemini format: system instruction is separate, history is separate from last message
|
|
10
53
|
// generateContent accepts a string or parts.
|
|
11
54
|
// We need to construct the prompt.
|
|
@@ -22,7 +65,8 @@ class GeminiProvider {
|
|
|
22
65
|
// Let's try to prepend for compatibility if needed, but 'systemInstruction' param exists in getGenerativeModel config.
|
|
23
66
|
const configuredModel = genAI.getGenerativeModel({
|
|
24
67
|
model: modelName,
|
|
25
|
-
systemInstruction: systemInstruction ? systemInstruction.trim() : undefined
|
|
68
|
+
systemInstruction: systemInstruction ? systemInstruction.trim() : undefined,
|
|
69
|
+
generationConfig
|
|
26
70
|
});
|
|
27
71
|
// Build history/messages
|
|
28
72
|
// Gemini `generateContent` takes the *last* user message.
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"gemini.js","sources":["../../src/execution/gemini.ts"],"sourcesContent":["import { GoogleGenerativeAI } from '@google/generative-ai';\nimport { Provider, ProviderResponse, ExecutionOptions } from './provider';\nimport { Request } from '../chat';\n\nexport class GeminiProvider implements Provider {\n async execute(request: Request, options: ExecutionOptions = {}): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.GEMINI_API_KEY; // or GOOGLE_API_KEY\n if (!apiKey) throw new Error('Gemini API key is required');\n\n const genAI = new GoogleGenerativeAI(apiKey);\n \n const modelName = options.model || request.model || 'gemini-1.5-pro';\n \n // Gemini format: system instruction is separate, history is separate from last message\n // generateContent accepts a string or parts.\n \n // We need to construct the prompt.\n // Simple approach: Concat system instructions + chat history\n \n let systemInstruction = '';\n \n // Extract system prompt\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') {\n systemInstruction += (typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)) + '\\n\\n';\n }\n }\n\n // Configure model with system instruction if available (newer Gemini versions support this)\n // Or just prepend to first user message.\n // Let's try to prepend for compatibility if needed, but 'systemInstruction' param exists in getGenerativeModel config.\n \n const configuredModel = genAI.getGenerativeModel({ \n model: modelName,\n systemInstruction: systemInstruction ? systemInstruction.trim() : undefined\n });\n\n // Build history/messages\n // Gemini `generateContent` takes the *last* user message.\n // `startChat` takes history.\n \n const chatHistory = [];\n let lastUserMessage = '';\n\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') continue;\n \n const content = typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content);\n \n if (msg.role === 'user') {\n lastUserMessage = content; // Assuming strictly alternating or we just want the prompt?\n // If there are multiple messages, we should build a chat.\n }\n \n chatHistory.push({\n role: msg.role === 'assistant' ? 'model' : 'user',\n parts: [{ text: content }]\n });\n }\n\n // If we are just running a prompt (single turn), we can use generateContent with the full text.\n // But let's support multi-turn by using startChat if history > 1.\n \n // If it's a typical \"Prompt\" execution, it's usually System + 1 User message.\n \n let result;\n \n if (chatHistory.length > 1) {\n // Remove last message from history to send it\n const lastMsg = chatHistory.pop();\n const chat = configuredModel.startChat({\n history: chatHistory\n });\n result = await chat.sendMessage(lastMsg?.parts[0].text || '');\n } else {\n // Just one message (or none?)\n result = await configuredModel.generateContent(lastUserMessage || ' ');\n }\n\n const response = await result.response;\n const text = response.text();\n\n return {\n content: text,\n model: modelName,\n // Gemini usage metadata usageMetadata\n usage: response.usageMetadata ? {\n inputTokens: response.usageMetadata.promptTokenCount,\n outputTokens: response.usageMetadata.candidatesTokenCount\n } : undefined\n };\n }\n}\n\n"],"names":["GeminiProvider","execute","request","options","apiKey","process","env","GEMINI_API_KEY","Error","genAI","GoogleGenerativeAI","modelName","model","systemInstruction","msg","messages","role","content","JSON","stringify","configuredModel","getGenerativeModel","trim","undefined","chatHistory","lastUserMessage","push","parts","text","result","length","lastMsg","pop","chat","startChat","history","sendMessage","generateContent","response","usage","usageMetadata","inputTokens","promptTokenCount","outputTokens","candidatesTokenCount"],"mappings":";;AAIO,MAAMA,cAAAA,CAAAA;AACT,IAAA,MAAMC,QAAQC,OAAgB,EAAEC,OAAAA,GAA4B,EAAE,EAA6B;QACvF,MAAMC,MAAAA,GAASD,QAAQC,MAAM,IAAIC,QAAQC,GAAG,CAACC,cAAc,CAAA;AAC3D,QAAA,IAAI,CAACH,MAAAA,EAAQ,MAAM,IAAII,KAAAA,CAAM,4BAAA,CAAA;QAE7B,MAAMC,KAAAA,GAAQ,IAAIC,kBAAAA,CAAmBN,MAAAA,CAAAA;AAErC,QAAA,MAAMO,YAAYR,OAAAA,CAAQS,KAAK,IAAIV,OAAAA,CAAQU,KAAK,IAAI,gBAAA;;;;;AAQpD,QAAA,IAAIC,iBAAAA,GAAoB,EAAA;;AAGxB,QAAA,KAAK,MAAMC,GAAAA,IAAOZ,OAAAA,CAAQa,QAAQ,CAAE;AAChC,YAAA,IAAID,IAAIE,IAAI,KAAK,YAAYF,GAAAA,CAAIE,IAAI,KAAK,WAAA,EAAa;AACnDH,gBAAAA,iBAAAA,IAAqB,CAAC,OAAOC,GAAAA,CAAIG,OAAO,KAAK,QAAA,GAAWH,GAAAA,CAAIG,OAAO,GAAGC,KAAKC,SAAS,CAACL,GAAAA,CAAIG,OAAO,CAAA,IAAK,MAAA;AACzG,YAAA;AACJ,QAAA;;;;QAMA,MAAMG,eAAAA,GAAkBX,KAAAA,CAAMY,kBAAkB,CAAC;YAC7CT,KAAAA,EAAOD,SAAAA;YACPE,iBAAAA,EAAmBA,iBAAAA,GAAoBA,iBAAAA,CAAkBS,IAAI,EAAA,GAAKC;AACtE,SAAA,CAAA;;;;AAMA,QAAA,MAAMC,cAAc,EAAE;AACtB,QAAA,IAAIC,eAAAA,GAAkB,EAAA;AAEtB,QAAA,KAAK,MAAMX,GAAAA,IAAOZ,OAAAA,CAAQa,QAAQ,CAAE;AAChC,YAAA,IAAID,IAAIE,IAAI,KAAK,YAAYF,GAAAA,CAAIE,IAAI,KAAK,WAAA,EAAa;AAEvD,YAAA,MAAMC,OAAAA,GAAU,OAAOH,GAAAA,CAAIG,OAAO,KAAK,QAAA,GAAWH,GAAAA,CAAIG,OAAO,GAAGC,IAAAA,CAAKC,SAAS,CAACL,IAAIG,OAAO,CAAA;YAE1F,IAAIH,GAAAA,CAAIE,IAAI,KAAK,MAAA,EAAQ;AACrBS,gBAAAA,eAAAA,GAAkBR;;AAEtB,YAAA;AAEAO,YAAAA,WAAAA,CAAYE,IAAI,CAAC;AACbV,gBAAAA,IAAAA,EAAMF,GAAAA,CAAIE,IAAI,KAAK,WAAA,GAAc,OAAA,GAAU,MAAA;gBAC3CW,KAAAA,EAAO;AAAC,oBAAA;wBAAEC,IAAAA,EAAMX;AAAQ;AAAE;AAC9B,aAAA,CAAA;AACJ,QAAA;;;;QAOA,IAAIY,MAAAA;QAEJ,IAAIL,WAAAA,CAAYM,MAAM,GAAG,CAAA,EAAG;;YAExB,MAAMC,OAAAA,GAAUP,YAAYQ,GAAG,EAAA;YAC/B,MAAMC,IAAAA,GAAOb,eAAAA,CAAgBc,SAAS,CAAC;gBACnCC,OAAAA,EAASX;AACb,aAAA,CAAA;AACAK,YAAAA,MAAAA,GAAS,MAAMI,IAAAA,CAAKG,WAAW,CAACL,CAAAA,OAAAA,KAAAA,IAAAA,IAAAA,OAAAA,KAAAA,MAAAA,GAAAA,MAAAA,GAAAA,OAAAA,CAASJ,KAAK,CAAC,CAAA,CAAE,CAACC,IAAI,KAAI,EAAA,CAAA;QAC9D,CAAA,MAAO;;AAEHC,YAAAA,MAAAA,GAAS,MAAMT,eAAAA,CAAgBiB,eAAe,CAACZ,eAAAA,IAAmB,GAAA,CAAA;AACtE,QAAA;QAEA,MAAMa,QAAAA,GAAW,MAAMT,MAAAA,CAAOS,QAAQ;QACtC,MAAMV,IAAAA,GAAOU,SAASV,IAAI,EAAA;QAE1B,OAAO;YACHX,OAAAA,EAASW,IAAAA;YACThB,KAAAA,EAAOD,SAAAA;;YAEP4B,KAAAA,EAAOD,QAAAA,CAASE,aAAa,GAAG;gBAC5BC,WAAAA,EAAaH,QAAAA,CAASE,aAAa,CAACE,gBAAgB;gBACpDC,YAAAA,EAAcL,QAAAA,CAASE,aAAa,CAACI;aACzC,GAAIrB;AACR,SAAA;AACJ,IAAA;AACJ;;;;"}
|
|
1
|
+
{"version":3,"file":"gemini.js","sources":["../../src/execution/gemini.ts"],"sourcesContent":["import { GoogleGenerativeAI } from '@google/generative-ai';\nimport { Provider, ProviderResponse, ExecutionOptions } from './provider';\nimport { Request } from '../chat';\n\nexport class GeminiProvider implements Provider {\n async execute(request: Request, options: ExecutionOptions = {}): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.GEMINI_API_KEY; // or GOOGLE_API_KEY\n if (!apiKey) throw new Error('Gemini API key is required');\n\n const genAI = new GoogleGenerativeAI(apiKey);\n \n const modelName = options.model || request.model || 'gemini-1.5-pro';\n \n // Handle generation config for structured output\n const generationConfig: any = {};\n \n if (request.responseFormat?.type === 'json_schema') {\n generationConfig.responseMimeType = \"application/json\";\n \n // Map OpenAI JSON schema to Gemini Schema\n // OpenAI: { name: \"...\", schema: { type: \"object\", properties: ... } }\n // Gemini expects the schema object directly\n \n const openAISchema = request.responseFormat.json_schema.schema;\n \n // We need to recursively map the types because Gemini uses uppercase enums\n // SchemaType.OBJECT, SchemaType.STRING, etc.\n // But the SDK also accepts string types \"OBJECT\", \"STRING\" etc.\n // Let's implement a simple converter or pass it if compatible.\n // Zod-to-json-schema produces lowercase types (\"object\", \"string\").\n // Google's SDK might need them to be uppercase or mapped.\n \n // Helper to clean up schema for Gemini\n // Removes $schema, strict, and additionalProperties if not supported or formatted differently\n // And maps 'type' to uppercase.\n const mapSchema = (s: any): any => {\n if (!s) return undefined;\n \n const newSchema: any = { ...s };\n \n if (newSchema.type) {\n newSchema.type = (typeof newSchema.type === 'string') \n ? (newSchema.type as string).toUpperCase() \n : newSchema.type;\n }\n \n if (newSchema.properties) {\n const newProps: any = {};\n for (const [key, val] of Object.entries(newSchema.properties)) {\n newProps[key] = mapSchema(val);\n }\n newSchema.properties = newProps;\n }\n \n if (newSchema.items) {\n newSchema.items = mapSchema(newSchema.items);\n }\n \n // Remove unsupported OpenAI-specific fields if Gemini complains\n delete newSchema.additionalProperties;\n delete newSchema['$schema'];\n \n return newSchema;\n };\n \n generationConfig.responseSchema = mapSchema(openAISchema);\n }\n\n // Gemini format: system instruction is separate, history is separate from last message\n // generateContent accepts a string or parts.\n \n // We need to construct the prompt.\n // Simple approach: Concat system instructions + chat history\n \n let systemInstruction = '';\n \n // Extract system prompt\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') {\n systemInstruction += (typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)) + '\\n\\n';\n }\n }\n\n // Configure model with system instruction if available (newer Gemini versions support this)\n // Or just prepend to first user message.\n // Let's try to prepend for compatibility if needed, but 'systemInstruction' param exists in getGenerativeModel config.\n \n const configuredModel = genAI.getGenerativeModel({ \n model: modelName,\n systemInstruction: systemInstruction ? systemInstruction.trim() : undefined,\n generationConfig\n });\n\n // Build history/messages\n // Gemini `generateContent` takes the *last* user message.\n // `startChat` takes history.\n \n const chatHistory = [];\n let lastUserMessage = '';\n\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') continue;\n \n const content = typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content);\n \n if (msg.role === 'user') {\n lastUserMessage = content; // Assuming strictly alternating or we just want the prompt?\n // If there are multiple messages, we should build a chat.\n }\n \n chatHistory.push({\n role: msg.role === 'assistant' ? 'model' : 'user',\n parts: [{ text: content }]\n });\n }\n\n // If we are just running a prompt (single turn), we can use generateContent with the full text.\n // But let's support multi-turn by using startChat if history > 1.\n \n // If it's a typical \"Prompt\" execution, it's usually System + 1 User message.\n \n let result;\n \n if (chatHistory.length > 1) {\n // Remove last message from history to send it\n const lastMsg = chatHistory.pop();\n const chat = configuredModel.startChat({\n history: chatHistory\n });\n result = await chat.sendMessage(lastMsg?.parts[0].text || '');\n } else {\n // Just one message (or none?)\n result = await configuredModel.generateContent(lastUserMessage || ' ');\n }\n\n const response = await result.response;\n const text = response.text();\n\n return {\n content: text,\n model: modelName,\n // Gemini usage metadata usageMetadata\n usage: response.usageMetadata ? {\n inputTokens: response.usageMetadata.promptTokenCount,\n outputTokens: response.usageMetadata.candidatesTokenCount\n } : undefined\n };\n }\n}\n\n"],"names":["GeminiProvider","execute","request","options","apiKey","process","env","GEMINI_API_KEY","Error","genAI","GoogleGenerativeAI","modelName","model","generationConfig","responseFormat","type","responseMimeType","openAISchema","json_schema","schema","mapSchema","s","undefined","newSchema","toUpperCase","properties","newProps","key","val","Object","entries","items","additionalProperties","responseSchema","systemInstruction","msg","messages","role","content","JSON","stringify","configuredModel","getGenerativeModel","trim","chatHistory","lastUserMessage","push","parts","text","result","length","lastMsg","pop","chat","startChat","history","sendMessage","generateContent","response","usage","usageMetadata","inputTokens","promptTokenCount","outputTokens","candidatesTokenCount"],"mappings":";;AAIO,MAAMA,cAAAA,CAAAA;AACT,IAAA,MAAMC,QAAQC,OAAgB,EAAEC,OAAAA,GAA4B,EAAE,EAA6B;AAWnFD,QAAAA,IAAAA,uBAAAA;QAVJ,MAAME,MAAAA,GAASD,QAAQC,MAAM,IAAIC,QAAQC,GAAG,CAACC,cAAc,CAAA;AAC3D,QAAA,IAAI,CAACH,MAAAA,EAAQ,MAAM,IAAII,KAAAA,CAAM,4BAAA,CAAA;QAE7B,MAAMC,KAAAA,GAAQ,IAAIC,kBAAAA,CAAmBN,MAAAA,CAAAA;AAErC,QAAA,MAAMO,YAAYR,OAAAA,CAAQS,KAAK,IAAIV,OAAAA,CAAQU,KAAK,IAAI,gBAAA;;AAGpD,QAAA,MAAMC,mBAAwB,EAAC;QAE/B,IAAIX,CAAAA,CAAAA,0BAAAA,OAAAA,CAAQY,cAAc,cAAtBZ,uBAAAA,KAAAA,MAAAA,GAAAA,MAAAA,GAAAA,uBAAAA,CAAwBa,IAAI,MAAK,aAAA,EAAe;AAChDF,YAAAA,gBAAAA,CAAiBG,gBAAgB,GAAG,kBAAA;;;;AAMpC,YAAA,MAAMC,eAAef,OAAAA,CAAQY,cAAc,CAACI,WAAW,CAACC,MAAM;;;;;;;;;;AAY9D,YAAA,MAAMC,YAAY,CAACC,CAAAA,GAAAA;gBACf,IAAI,CAACA,GAAG,OAAOC,SAAAA;AAEf,gBAAA,MAAMC,SAAAA,GAAiB;AAAE,oBAAA,GAAGF;AAAE,iBAAA;gBAE9B,IAAIE,SAAAA,CAAUR,IAAI,EAAE;AAChBQ,oBAAAA,SAAAA,CAAUR,IAAI,GAAG,OAAQQ,UAAUR,IAAI,KAAK,QAAA,GACrCQ,UAAUR,IAAI,CAAYS,WAAW,EAAA,GACtCD,UAAUR,IAAI;AACxB,gBAAA;gBAEA,IAAIQ,SAAAA,CAAUE,UAAU,EAAE;AACtB,oBAAA,MAAMC,WAAgB,EAAC;oBACvB,KAAK,MAAM,CAACC,GAAAA,EAAKC,GAAAA,CAAI,IAAIC,OAAOC,OAAO,CAACP,SAAAA,CAAUE,UAAU,CAAA,CAAG;wBAC3DC,QAAQ,CAACC,GAAAA,CAAI,GAAGP,SAAAA,CAAUQ,GAAAA,CAAAA;AAC9B,oBAAA;AACAL,oBAAAA,SAAAA,CAAUE,UAAU,GAAGC,QAAAA;AAC3B,gBAAA;gBAEA,IAAIH,SAAAA,CAAUQ,KAAK,EAAE;AACjBR,oBAAAA,SAAAA,CAAUQ,KAAK,GAAGX,SAAAA,CAAUG,SAAAA,CAAUQ,KAAK,CAAA;AAC/C,gBAAA;;AAGA,gBAAA,OAAOR,UAAUS,oBAAoB;gBACrC,OAAOT,SAAS,CAAC,SAAA,CAAU;gBAE3B,OAAOA,SAAAA;AACX,YAAA,CAAA;YAEAV,gBAAAA,CAAiBoB,cAAc,GAAGb,SAAAA,CAAUH,YAAAA,CAAAA;AAChD,QAAA;;;;;AAQA,QAAA,IAAIiB,iBAAAA,GAAoB,EAAA;;AAGxB,QAAA,KAAK,MAAMC,GAAAA,IAAOjC,OAAAA,CAAQkC,QAAQ,CAAE;AAChC,YAAA,IAAID,IAAIE,IAAI,KAAK,YAAYF,GAAAA,CAAIE,IAAI,KAAK,WAAA,EAAa;AACnDH,gBAAAA,iBAAAA,IAAqB,CAAC,OAAOC,GAAAA,CAAIG,OAAO,KAAK,QAAA,GAAWH,GAAAA,CAAIG,OAAO,GAAGC,KAAKC,SAAS,CAACL,GAAAA,CAAIG,OAAO,CAAA,IAAK,MAAA;AACzG,YAAA;AACJ,QAAA;;;;QAMA,MAAMG,eAAAA,GAAkBhC,KAAAA,CAAMiC,kBAAkB,CAAC;YAC7C9B,KAAAA,EAAOD,SAAAA;YACPuB,iBAAAA,EAAmBA,iBAAAA,GAAoBA,iBAAAA,CAAkBS,IAAI,EAAA,GAAKrB,SAAAA;AAClET,YAAAA;AACJ,SAAA,CAAA;;;;AAMA,QAAA,MAAM+B,cAAc,EAAE;AACtB,QAAA,IAAIC,eAAAA,GAAkB,EAAA;AAEtB,QAAA,KAAK,MAAMV,GAAAA,IAAOjC,OAAAA,CAAQkC,QAAQ,CAAE;AAChC,YAAA,IAAID,IAAIE,IAAI,KAAK,YAAYF,GAAAA,CAAIE,IAAI,KAAK,WAAA,EAAa;AAEvD,YAAA,MAAMC,OAAAA,GAAU,OAAOH,GAAAA,CAAIG,OAAO,KAAK,QAAA,GAAWH,GAAAA,CAAIG,OAAO,GAAGC,IAAAA,CAAKC,SAAS,CAACL,IAAIG,OAAO,CAAA;YAE1F,IAAIH,GAAAA,CAAIE,IAAI,KAAK,MAAA,EAAQ;AACrBQ,gBAAAA,eAAAA,GAAkBP;;AAEtB,YAAA;AAEAM,YAAAA,WAAAA,CAAYE,IAAI,CAAC;AACbT,gBAAAA,IAAAA,EAAMF,GAAAA,CAAIE,IAAI,KAAK,WAAA,GAAc,OAAA,GAAU,MAAA;gBAC3CU,KAAAA,EAAO;AAAC,oBAAA;wBAAEC,IAAAA,EAAMV;AAAQ;AAAE;AAC9B,aAAA,CAAA;AACJ,QAAA;;;;QAOA,IAAIW,MAAAA;QAEJ,IAAIL,WAAAA,CAAYM,MAAM,GAAG,CAAA,EAAG;;YAExB,MAAMC,OAAAA,GAAUP,YAAYQ,GAAG,EAAA;YAC/B,MAAMC,IAAAA,GAAOZ,eAAAA,CAAgBa,SAAS,CAAC;gBACnCC,OAAAA,EAASX;AACb,aAAA,CAAA;AACAK,YAAAA,MAAAA,GAAS,MAAMI,IAAAA,CAAKG,WAAW,CAACL,CAAAA,OAAAA,KAAAA,IAAAA,IAAAA,OAAAA,KAAAA,MAAAA,GAAAA,MAAAA,GAAAA,OAAAA,CAASJ,KAAK,CAAC,CAAA,CAAE,CAACC,IAAI,KAAI,EAAA,CAAA;QAC9D,CAAA,MAAO;;AAEHC,YAAAA,MAAAA,GAAS,MAAMR,eAAAA,CAAgBgB,eAAe,CAACZ,eAAAA,IAAmB,GAAA,CAAA;AACtE,QAAA;QAEA,MAAMa,QAAAA,GAAW,MAAMT,MAAAA,CAAOS,QAAQ;QACtC,MAAMV,IAAAA,GAAOU,SAASV,IAAI,EAAA;QAE1B,OAAO;YACHV,OAAAA,EAASU,IAAAA;YACTpC,KAAAA,EAAOD,SAAAA;;YAEPgD,KAAAA,EAAOD,QAAAA,CAASE,aAAa,GAAG;gBAC5BC,WAAAA,EAAaH,QAAAA,CAASE,aAAa,CAACE,gBAAgB;gBACpDC,YAAAA,EAAcL,QAAAA,CAASE,aAAa,CAACI;aACzC,GAAI1C;AACR,SAAA;AACJ,IAAA;AACJ;;;;"}
|
package/dist/execution/openai.js
CHANGED
|
@@ -26,7 +26,8 @@ class OpenAIProvider {
|
|
|
26
26
|
model: model,
|
|
27
27
|
messages: messages,
|
|
28
28
|
temperature: options.temperature,
|
|
29
|
-
max_tokens: options.maxTokens
|
|
29
|
+
max_tokens: options.maxTokens,
|
|
30
|
+
response_format: request.responseFormat
|
|
30
31
|
});
|
|
31
32
|
const choice = response.choices[0];
|
|
32
33
|
return {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai.js","sources":["../../src/execution/openai.ts"],"sourcesContent":["import OpenAI from 'openai';\nimport { Provider, ProviderResponse, ExecutionOptions } from './provider';\nimport { Request } from '../chat';\n\nexport class OpenAIProvider implements Provider {\n async execute(request: Request, options: ExecutionOptions = {}): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.OPENAI_API_KEY;\n if (!apiKey) throw new Error('OpenAI API key is required');\n\n const client = new OpenAI({ apiKey });\n \n const model = options.model || request.model || 'gpt-4';\n\n // Convert RiotPrompt messages to OpenAI messages\n const messages = request.messages.map(msg => {\n const role = msg.role === 'developer' ? 'system' : msg.role; // OpenAI uses system, not developer usually (except o1)\n // But wait, o1 uses developer. Let's respect what formatter gave us if valid.\n // OpenAI Node SDK types expect specific roles.\n // RiotPrompt roles: \"user\" | \"assistant\" | \"system\" | \"developer\"\n // OpenAI roles: \"system\" | \"user\" | \"assistant\" | \"tool\" | \"function\" | \"developer\" (recent versions)\n \n // We'll cast to any to avoid strict type issues with older/newer SDK versions mismatch\n return {\n role: role,\n content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),\n name: msg.name\n } as any;\n });\n\n const response = await client.chat.completions.create({\n model: model,\n messages: messages,\n temperature: options.temperature,\n max_tokens: options.maxTokens,\n });\n\n const choice = response.choices[0];\n \n return {\n content: choice.message.content || '',\n model: response.model,\n usage: response.usage ? {\n inputTokens: response.usage.prompt_tokens,\n outputTokens: response.usage.completion_tokens\n } : undefined\n };\n }\n}\n\n"],"names":["OpenAIProvider","execute","request","options","apiKey","process","env","OPENAI_API_KEY","Error","client","OpenAI","model","messages","map","msg","role","content","JSON","stringify","name","response","chat","completions","create","temperature","max_tokens","maxTokens","choice","choices","message","usage","inputTokens","prompt_tokens","outputTokens","completion_tokens","undefined"],"mappings":";;AAIO,MAAMA,cAAAA,CAAAA;AACT,IAAA,MAAMC,QAAQC,OAAgB,EAAEC,OAAAA,GAA4B,EAAE,EAA6B;AACvF,QAAA,MAAMC,SAASD,OAAAA,CAAQC,MAAM,IAAIC,OAAAA,CAAQC,GAAG,CAACC,cAAc;AAC3D,QAAA,IAAI,CAACH,MAAAA,EAAQ,MAAM,IAAII,KAAAA,CAAM,4BAAA,CAAA;QAE7B,MAAMC,MAAAA,GAAS,IAAIC,MAAAA,CAAO;AAAEN,YAAAA;AAAO,SAAA,CAAA;AAEnC,QAAA,MAAMO,QAAQR,OAAAA,CAAQQ,KAAK,IAAIT,OAAAA,CAAQS,KAAK,IAAI,OAAA;;AAGhD,QAAA,MAAMC,WAAWV,OAAAA,CAAQU,QAAQ,CAACC,GAAG,CAACC,CAAAA,GAAAA,GAAAA;YAClC,MAAMC,IAAAA,GAAOD,IAAIC,IAAI,KAAK,cAAc,QAAA,GAAWD,GAAAA,CAAIC,IAAI,CAAA;;;;;;YAO3D,OAAO;gBACHA,IAAAA,EAAMA,IAAAA;AACNC,gBAAAA,OAAAA,EAAS,OAAOF,GAAAA,CAAIE,OAAO,KAAK,QAAA,GAAWF,GAAAA,CAAIE,OAAO,GAAGC,IAAAA,CAAKC,SAAS,CAACJ,GAAAA,CAAIE,OAAO,CAAA;AACnFG,gBAAAA,IAAAA,EAAML,IAAIK;AACd,aAAA;AACJ,QAAA,CAAA,CAAA;QAEA,MAAMC,QAAAA,GAAW,MAAMX,MAAAA,CAAOY,IAAI,CAACC,WAAW,CAACC,MAAM,CAAC;YAClDZ,KAAAA,EAAOA,KAAAA;YACPC,QAAAA,EAAUA,QAAAA;AACVY,YAAAA,WAAAA,EAAarB,QAAQqB,WAAW;AAChCC,YAAAA,UAAAA,EAAYtB,QAAQuB;
|
|
1
|
+
{"version":3,"file":"openai.js","sources":["../../src/execution/openai.ts"],"sourcesContent":["import OpenAI from 'openai';\nimport { Provider, ProviderResponse, ExecutionOptions } from './provider';\nimport { Request } from '../chat';\n\nexport class OpenAIProvider implements Provider {\n async execute(request: Request, options: ExecutionOptions = {}): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.OPENAI_API_KEY;\n if (!apiKey) throw new Error('OpenAI API key is required');\n\n const client = new OpenAI({ apiKey });\n \n const model = options.model || request.model || 'gpt-4';\n\n // Convert RiotPrompt messages to OpenAI messages\n const messages = request.messages.map(msg => {\n const role = msg.role === 'developer' ? 'system' : msg.role; // OpenAI uses system, not developer usually (except o1)\n // But wait, o1 uses developer. Let's respect what formatter gave us if valid.\n // OpenAI Node SDK types expect specific roles.\n // RiotPrompt roles: \"user\" | \"assistant\" | \"system\" | \"developer\"\n // OpenAI roles: \"system\" | \"user\" | \"assistant\" | \"tool\" | \"function\" | \"developer\" (recent versions)\n \n // We'll cast to any to avoid strict type issues with older/newer SDK versions mismatch\n return {\n role: role,\n content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),\n name: msg.name\n } as any;\n });\n\n const response = await client.chat.completions.create({\n model: model,\n messages: messages,\n temperature: options.temperature,\n max_tokens: options.maxTokens,\n response_format: request.responseFormat,\n });\n\n const choice = response.choices[0];\n \n return {\n content: choice.message.content || '',\n model: response.model,\n usage: response.usage ? {\n inputTokens: response.usage.prompt_tokens,\n outputTokens: response.usage.completion_tokens\n } : undefined\n };\n }\n}\n\n"],"names":["OpenAIProvider","execute","request","options","apiKey","process","env","OPENAI_API_KEY","Error","client","OpenAI","model","messages","map","msg","role","content","JSON","stringify","name","response","chat","completions","create","temperature","max_tokens","maxTokens","response_format","responseFormat","choice","choices","message","usage","inputTokens","prompt_tokens","outputTokens","completion_tokens","undefined"],"mappings":";;AAIO,MAAMA,cAAAA,CAAAA;AACT,IAAA,MAAMC,QAAQC,OAAgB,EAAEC,OAAAA,GAA4B,EAAE,EAA6B;AACvF,QAAA,MAAMC,SAASD,OAAAA,CAAQC,MAAM,IAAIC,OAAAA,CAAQC,GAAG,CAACC,cAAc;AAC3D,QAAA,IAAI,CAACH,MAAAA,EAAQ,MAAM,IAAII,KAAAA,CAAM,4BAAA,CAAA;QAE7B,MAAMC,MAAAA,GAAS,IAAIC,MAAAA,CAAO;AAAEN,YAAAA;AAAO,SAAA,CAAA;AAEnC,QAAA,MAAMO,QAAQR,OAAAA,CAAQQ,KAAK,IAAIT,OAAAA,CAAQS,KAAK,IAAI,OAAA;;AAGhD,QAAA,MAAMC,WAAWV,OAAAA,CAAQU,QAAQ,CAACC,GAAG,CAACC,CAAAA,GAAAA,GAAAA;YAClC,MAAMC,IAAAA,GAAOD,IAAIC,IAAI,KAAK,cAAc,QAAA,GAAWD,GAAAA,CAAIC,IAAI,CAAA;;;;;;YAO3D,OAAO;gBACHA,IAAAA,EAAMA,IAAAA;AACNC,gBAAAA,OAAAA,EAAS,OAAOF,GAAAA,CAAIE,OAAO,KAAK,QAAA,GAAWF,GAAAA,CAAIE,OAAO,GAAGC,IAAAA,CAAKC,SAAS,CAACJ,GAAAA,CAAIE,OAAO,CAAA;AACnFG,gBAAAA,IAAAA,EAAML,IAAIK;AACd,aAAA;AACJ,QAAA,CAAA,CAAA;QAEA,MAAMC,QAAAA,GAAW,MAAMX,MAAAA,CAAOY,IAAI,CAACC,WAAW,CAACC,MAAM,CAAC;YAClDZ,KAAAA,EAAOA,KAAAA;YACPC,QAAAA,EAAUA,QAAAA;AACVY,YAAAA,WAAAA,EAAarB,QAAQqB,WAAW;AAChCC,YAAAA,UAAAA,EAAYtB,QAAQuB,SAAS;AAC7BC,YAAAA,eAAAA,EAAiBzB,QAAQ0B;AAC7B,SAAA,CAAA;AAEA,QAAA,MAAMC,MAAAA,GAAST,QAAAA,CAASU,OAAO,CAAC,CAAA,CAAE;QAElC,OAAO;AACHd,YAAAA,OAAAA,EAASa,MAAAA,CAAOE,OAAO,CAACf,OAAO,IAAI,EAAA;AACnCL,YAAAA,KAAAA,EAAOS,SAAST,KAAK;YACrBqB,KAAAA,EAAOZ,QAAAA,CAASY,KAAK,GAAG;gBACpBC,WAAAA,EAAab,QAAAA,CAASY,KAAK,CAACE,aAAa;gBACzCC,YAAAA,EAAcf,QAAAA,CAASY,KAAK,CAACI;aACjC,GAAIC;AACR,SAAA;AACJ,IAAA;AACJ;;;;"}
|
package/dist/formatter.js
CHANGED
|
@@ -99,24 +99,52 @@ const create = (formatterOptions)=>{
|
|
|
99
99
|
const formatPrompt = (model, prompt)=>{
|
|
100
100
|
logger.silly('Formatting prompt');
|
|
101
101
|
const chatRequest = createRequest(model);
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
102
|
+
// --- System/Role Message Construction ---
|
|
103
|
+
// Collect sections that belong in the system/developer prompt (Persona, Tone, Constraints, etc.)
|
|
104
|
+
const systemSections = [];
|
|
105
|
+
if (prompt.persona) systemSections.push(prompt.persona);
|
|
106
|
+
if (prompt.tone) systemSections.push(prompt.tone);
|
|
107
|
+
if (prompt.constraints) systemSections.push(prompt.constraints);
|
|
108
|
+
if (prompt.safeguards) systemSections.push(prompt.safeguards);
|
|
109
|
+
if (prompt.responseFormat) systemSections.push(prompt.responseFormat);
|
|
110
|
+
if (systemSections.length > 0) {
|
|
111
|
+
// Combine all system sections into one system message content
|
|
112
|
+
const systemContent = systemSections.map((section)=>formatSection(section)).join('\n\n');
|
|
113
|
+
chatRequest.addMessage({
|
|
114
|
+
role: getPersonaRole(model),
|
|
115
|
+
content: systemContent
|
|
116
|
+
});
|
|
117
|
+
}
|
|
118
|
+
// --- User/Task Message Construction ---
|
|
119
|
+
// Logical flow: Context -> Examples -> Instructions -> Content -> Reasoning -> Recap
|
|
120
|
+
// This structure guides the model through the context and examples before presenting the core task
|
|
121
|
+
const userSections = [
|
|
122
|
+
prompt.contexts,
|
|
123
|
+
prompt.examples,
|
|
124
|
+
prompt.instructions,
|
|
125
|
+
prompt.contents,
|
|
126
|
+
prompt.reasoning,
|
|
127
|
+
prompt.recap
|
|
128
|
+
];
|
|
129
|
+
let formattedUserContent = "";
|
|
130
|
+
for (const section of userSections){
|
|
131
|
+
if (section) {
|
|
132
|
+
formattedUserContent += formatSection(section) + '\n\n';
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
// Ensure we always have a user message, or if we have content to send
|
|
136
|
+
if (formattedUserContent.trim().length > 0 || systemSections.length === 0) {
|
|
137
|
+
chatRequest.addMessage({
|
|
138
|
+
role: "user",
|
|
139
|
+
content: formattedUserContent.trim() || " "
|
|
107
140
|
});
|
|
108
141
|
}
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
formattedAreas += formatSection(prompt.contents) + '\n\n';
|
|
142
|
+
if (prompt.schema) {
|
|
143
|
+
chatRequest.responseFormat = prompt.schema;
|
|
112
144
|
}
|
|
113
|
-
if (prompt.
|
|
114
|
-
|
|
145
|
+
if (prompt.validator) {
|
|
146
|
+
chatRequest.validator = prompt.validator;
|
|
115
147
|
}
|
|
116
|
-
chatRequest.addMessage({
|
|
117
|
-
role: "user",
|
|
118
|
-
content: formattedAreas
|
|
119
|
-
});
|
|
120
148
|
return chatRequest;
|
|
121
149
|
};
|
|
122
150
|
return {
|