make-mp-data 2.1.6 → 3.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +31 -0
- package/dungeons/adspend.js +2 -2
- package/dungeons/ai-chat-analytics-ed.js +3 -2
- package/dungeons/anon.js +2 -2
- package/dungeons/array-of-object-loopup.js +181 -0
- package/dungeons/benchmark-heavy.js +241 -0
- package/dungeons/benchmark-light.js +141 -0
- package/dungeons/big.js +9 -8
- package/dungeons/business.js +2 -1
- package/dungeons/clinch-agi.js +632 -0
- package/dungeons/complex.js +3 -2
- package/dungeons/copilot.js +383 -0
- package/dungeons/ecommerce-store.js +0 -0
- package/dungeons/experiments.js +5 -4
- package/dungeons/foobar.js +101 -101
- package/dungeons/funnels.js +2 -2
- package/dungeons/gaming.js +3 -2
- package/dungeons/harness/harness-education.js +988 -0
- package/dungeons/harness/harness-fintech.js +976 -0
- package/dungeons/harness/harness-food.js +985 -0
- package/dungeons/harness/harness-gaming.js +1178 -0
- package/dungeons/harness/harness-media.js +961 -0
- package/dungeons/harness/harness-sass.js +923 -0
- package/dungeons/harness/harness-social.js +928 -0
- package/dungeons/kurby.js +211 -0
- package/dungeons/media.js +5 -4
- package/dungeons/mil.js +4 -3
- package/dungeons/mirror.js +2 -2
- package/dungeons/money2020-ed.js +8 -7
- package/dungeons/sanity.js +3 -2
- package/dungeons/scd.js +3 -2
- package/dungeons/simple.js +29 -14
- package/dungeons/strict-event-test.js +30 -0
- package/dungeons/student-teacher.js +3 -2
- package/dungeons/text-generation.js +84 -85
- package/dungeons/too-big-events.js +166 -0
- package/dungeons/uday-schema.json +220 -0
- package/dungeons/userAgent.js +4 -3
- package/index.js +41 -54
- package/lib/core/config-validator.js +122 -7
- package/lib/core/context.js +7 -14
- package/lib/core/storage.js +60 -30
- package/lib/generators/adspend.js +12 -27
- package/lib/generators/events.js +6 -7
- package/lib/generators/funnels.js +16 -5
- package/lib/generators/product-lookup.js +262 -0
- package/lib/generators/product-names.js +195 -0
- package/lib/generators/profiles.js +3 -3
- package/lib/generators/scd.js +13 -3
- package/lib/generators/text.js +17 -4
- package/lib/orchestrators/mixpanel-sender.js +251 -208
- package/lib/orchestrators/user-loop.js +57 -19
- package/lib/templates/funnels-instructions.txt +272 -0
- package/lib/templates/hook-examples.json +187 -0
- package/lib/templates/hooks-instructions.txt +295 -8
- package/lib/templates/phrases.js +473 -16
- package/lib/templates/refine-instructions.txt +485 -0
- package/lib/templates/schema-instructions.txt +239 -109
- package/lib/templates/schema.d.ts +173 -0
- package/lib/templates/verbose-schema.js +140 -206
- package/lib/utils/ai.js +853 -77
- package/lib/utils/chart.js +210 -0
- package/lib/utils/function-registry.js +285 -0
- package/lib/utils/json-evaluator.js +172 -0
- package/lib/utils/logger.js +38 -0
- package/lib/utils/mixpanel.js +101 -0
- package/lib/utils/project.js +3 -2
- package/lib/utils/utils.js +41 -4
- package/package.json +13 -19
- package/types.d.ts +15 -5
- package/lib/generators/text-bak-old.js +0 -1121
- package/lib/orchestrators/worker-manager.js +0 -203
- package/lib/templates/phrases-bak.js +0 -925
- package/lib/templates/prompt (old).txt +0 -98
- package/lib/templates/scratch-dungeon-template.js +0 -116
- package/lib/templates/textQuickTest.js +0 -172
package/lib/utils/ai.js
CHANGED
|
@@ -1,120 +1,896 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI Cache Module - Pre-warmed AI transformer instances for Cloud Run
|
|
3
|
+
*
|
|
4
|
+
* This module follows Google Cloud Run best practices for caching expensive objects.
|
|
5
|
+
* AI transformers are initialized at module load time (container cold-start) and
|
|
6
|
+
* reused across all requests to minimize latency.
|
|
7
|
+
*/
|
|
1
8
|
|
|
2
|
-
|
|
3
|
-
/** @typedef {import('../../types.js').EventConfig} EventConfig */
|
|
4
|
-
|
|
9
|
+
import AITransformer from 'ak-gemini';
|
|
5
10
|
import * as u from "ak-tools";
|
|
6
11
|
import 'dotenv/config';
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
import AITransformer from 'ak-gemini';
|
|
12
|
+
import { aiLogger as logger } from './logger.js';
|
|
13
|
+
import { trackAIJob } from './mixpanel.js';
|
|
10
14
|
|
|
11
|
-
|
|
12
|
-
|
|
15
|
+
const { NODE_ENV = "unknown", GOOGLE_CLOUD_PROJECT = "mixpanel-gtm-training" } = process.env;
|
|
16
|
+
// Uses Vertex AI with Application Default Credentials by default
|
|
17
|
+
// GEMINI_API_KEY is no longer required
|
|
13
18
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
{ "event": "add to basket", "product_added": "big mac" }
|
|
17
|
-
{ "event": "customized", "product_customized": "big mac" }
|
|
18
|
-
{ "event": "checked out", "cart": [{"item": "big mac"}] }
|
|
19
|
+
const MAX_OUTPUT_TOKENS = 50_000;
|
|
20
|
+
const DEFAULT_MODEL = 'gemini-2.5-flash';
|
|
19
21
|
|
|
22
|
+
// Supported models for validation
|
|
23
|
+
const SUPPORTED_MODELS = [
|
|
24
|
+
'gemini-2.5-flash',
|
|
25
|
+
'gemini-2.5-pro',
|
|
26
|
+
'gemini-3-flash-preview',
|
|
27
|
+
'gemini-3-pro-preview'
|
|
28
|
+
];
|
|
20
29
|
|
|
21
|
-
|
|
22
|
-
|
|
30
|
+
/**
|
|
31
|
+
* Format a number with commas for readability (e.g., 31395 -> "31,395")
|
|
32
|
+
* @param {number} num - Number to format
|
|
33
|
+
* @returns {string} Formatted number string
|
|
34
|
+
*/
|
|
35
|
+
function formatNumber(num) {
|
|
36
|
+
return num.toLocaleString('en-US');
|
|
37
|
+
}
|
|
23
38
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
39
|
+
/**
|
|
40
|
+
* Estimate token count from text using Google's rough approximation
|
|
41
|
+
* (1 token ~= 4 characters, 100 tokens ~= 60-80 English words)
|
|
42
|
+
* @param {string} text - Text to estimate tokens for
|
|
43
|
+
* @returns {number} Estimated token count
|
|
44
|
+
*/
|
|
45
|
+
function estimateTokens(text) {
|
|
46
|
+
if (!text) return 0;
|
|
47
|
+
return Math.ceil(text.length / 4);
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Log structured AI call metrics
|
|
52
|
+
* In production, outputs JSON for Cloud Logging ingestion
|
|
53
|
+
* @param {object} params - Logging parameters
|
|
54
|
+
* @param {string} params.component - AI component name (schema, hooks, refine, funnels)
|
|
55
|
+
* @param {string} params.prompt - Original user prompt
|
|
56
|
+
* @param {any} params.response - AI response
|
|
57
|
+
* @param {number} params.duration_ms - Call duration in milliseconds
|
|
58
|
+
* @param {boolean} params.success - Whether the call succeeded
|
|
59
|
+
* @param {string} [params.error] - Error message if failed
|
|
60
|
+
* @param {string} [params.model] - Model used
|
|
61
|
+
* @param {object} [params.usage] - Token usage from ai.getLastUsage()
|
|
62
|
+
*/
|
|
63
|
+
function logAICallStructured(params) {
|
|
64
|
+
const { component, prompt, response, duration_ms, success, error, model, usage } = params;
|
|
65
|
+
|
|
66
|
+
const structuredLog = {
|
|
67
|
+
message: "AI Response",
|
|
68
|
+
component,
|
|
69
|
+
model: model || DEFAULT_MODEL,
|
|
70
|
+
prompt_preview: prompt?.substring(0, 200) || '',
|
|
71
|
+
prompt_length: prompt?.length || 0,
|
|
72
|
+
output_length: response ? JSON.stringify(response).length : 0,
|
|
73
|
+
duration_ms,
|
|
74
|
+
duration_human: `${(duration_ms / 1000).toFixed(2)} seconds`,
|
|
75
|
+
success,
|
|
76
|
+
error: error || null,
|
|
77
|
+
environment: NODE_ENV,
|
|
78
|
+
// Token usage from ak-gemini library
|
|
79
|
+
...(usage && {
|
|
80
|
+
promptTokens: usage.promptTokens,
|
|
81
|
+
responseTokens: usage.responseTokens,
|
|
82
|
+
totalTokens: usage.totalTokens,
|
|
83
|
+
attempts: usage.attempts,
|
|
84
|
+
modelVersion: usage.modelVersion,
|
|
85
|
+
requestedModel: usage.requestedModel
|
|
86
|
+
})
|
|
87
|
+
};
|
|
88
|
+
|
|
89
|
+
// In production, output structured JSON for Cloud Logging
|
|
90
|
+
if (NODE_ENV === 'production') {
|
|
91
|
+
console.log(JSON.stringify(structuredLog));
|
|
92
|
+
} else {
|
|
93
|
+
// In dev/test, use the logger with structured data
|
|
94
|
+
if (success) {
|
|
95
|
+
logger.debug(structuredLog, `AI ${component} call completed`);
|
|
96
|
+
} else {
|
|
97
|
+
logger.error(structuredLog, `AI ${component} call failed`);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
return structuredLog;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// ========== VALIDATION FUNCTIONS FOR SELF-HEALING ==========
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Validates AI-generated schema meets minimum requirements.
|
|
108
|
+
* Used with transformWithValidation for self-healing.
|
|
109
|
+
* @param {object} payload - AI response payload
|
|
110
|
+
* @returns {Promise<object>} Validated payload
|
|
111
|
+
* @throws {Error} If validation fails (triggers AI retry)
|
|
112
|
+
*/
|
|
113
|
+
async function validateSchemaOutput(payload) {
|
|
114
|
+
const errors = [];
|
|
115
|
+
|
|
116
|
+
if (!Array.isArray(payload.events) || payload.events.length < 8) {
|
|
117
|
+
errors.push(`events: expected array with at least 8 items, got ${payload.events?.length || 0}`);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
if (!Array.isArray(payload.funnels) || payload.funnels.length < 3) {
|
|
121
|
+
errors.push(`funnels: expected array with at least 3 items, got ${payload.funnels?.length || 0}`);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
if (!payload.superProps || typeof payload.superProps !== 'object' ||
|
|
125
|
+
Array.isArray(payload.superProps) || Object.keys(payload.superProps).length < 2) {
|
|
126
|
+
errors.push(`superProps: expected object with at least 2 keys, got ${Object.keys(payload.superProps || {}).length}`);
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
if (!payload.userProps || typeof payload.userProps !== 'object' ||
|
|
130
|
+
Array.isArray(payload.userProps) || Object.keys(payload.userProps).length < 6) {
|
|
131
|
+
errors.push(`userProps: expected object with at least 6 keys, got ${Object.keys(payload.userProps || {}).length}`);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
if (Array.isArray(payload.events)) {
|
|
135
|
+
payload.events.forEach((event, i) => {
|
|
136
|
+
if (!event.event || typeof event.event !== 'string') {
|
|
137
|
+
errors.push(`events[${i}]: missing or invalid 'event' name`);
|
|
138
|
+
}
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
if (Array.isArray(payload.funnels)) {
|
|
143
|
+
payload.funnels.forEach((funnel, i) => {
|
|
144
|
+
if (!Array.isArray(funnel.sequence) || funnel.sequence.length < 2) {
|
|
145
|
+
errors.push(`funnels[${i}]: sequence must have at least 2 events`);
|
|
146
|
+
}
|
|
147
|
+
if (typeof funnel.conversionRate !== 'number' ||
|
|
148
|
+
funnel.conversionRate < 0 || funnel.conversionRate > 100) {
|
|
149
|
+
errors.push(`funnels[${i}]: conversionRate must be integer between 0 and 100`);
|
|
150
|
+
}
|
|
151
|
+
});
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
if (errors.length > 0) {
|
|
155
|
+
throw new Error(`Schema validation failed:\n${errors.join('\n')}\n\nPlease fix these issues and return the COMPLETE corrected schema (all fields: events, funnels, superProps, userProps, etc).`);
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
return payload;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
/**
|
|
162
|
+
* Creates a funnels validator with schema context for semantic validation.
|
|
163
|
+
* @param {object} schema - The current schema (for event name validation)
|
|
164
|
+
* @returns {(payload: any) => Promise<any>} Validator function
|
|
165
|
+
*/
|
|
166
|
+
function createFunnelsValidator(schema) {
|
|
167
|
+
return async function validateFunnelsOutput(payload) {
|
|
168
|
+
const errors = [];
|
|
169
|
+
|
|
170
|
+
if (!Array.isArray(payload.funnels) || payload.funnels.length < 2) {
|
|
171
|
+
errors.push(`funnels: expected array with at least 2 items, got ${payload.funnels?.length || 0}`);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
const validEventNames = new Set(
|
|
175
|
+
(schema?.events || []).map(e => e.event).filter(Boolean)
|
|
176
|
+
);
|
|
177
|
+
|
|
178
|
+
if (Array.isArray(payload.funnels)) {
|
|
179
|
+
payload.funnels.forEach((funnel, i) => {
|
|
180
|
+
if (!Array.isArray(funnel.sequence) || funnel.sequence.length < 2) {
|
|
181
|
+
errors.push(`funnels[${i}]: sequence must have at least 2 events`);
|
|
182
|
+
}
|
|
183
|
+
if (typeof funnel.conversionRate !== 'number' ||
|
|
184
|
+
funnel.conversionRate < 0 || funnel.conversionRate > 100) {
|
|
185
|
+
errors.push(`funnels[${i}]: conversionRate must be integer between 0 and 100`);
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
if (Array.isArray(funnel.sequence) && validEventNames.size > 0) {
|
|
189
|
+
funnel.sequence.forEach((eventName, j) => {
|
|
190
|
+
if (!validEventNames.has(eventName)) {
|
|
191
|
+
const available = [...validEventNames].slice(0, 5).join(', ');
|
|
192
|
+
errors.push(`funnels[${i}].sequence[${j}]: "${eventName}" is not a valid event. Available: ${available}...`);
|
|
193
|
+
}
|
|
194
|
+
});
|
|
195
|
+
}
|
|
196
|
+
});
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
if (errors.length > 0) {
|
|
200
|
+
throw new Error(`Funnels validation failed:\n${errors.join('\n')}\n\nPlease fix these issues and return the COMPLETE corrected funnels object with all funnels.`);
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
return payload;
|
|
204
|
+
};
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
/**
|
|
208
|
+
* Validates AI-generated hook code.
|
|
209
|
+
* @param {string} code - Hook function code
|
|
210
|
+
* @returns {Promise<string>} Validated code
|
|
211
|
+
* @throws {Error} If validation fails
|
|
212
|
+
*/
|
|
213
|
+
async function validateHookOutput(code) {
|
|
214
|
+
const errors = [];
|
|
215
|
+
|
|
216
|
+
if (typeof code !== 'string' || !code.trim()) {
|
|
217
|
+
throw new Error('Hook must be a non-empty string. Please return only the function code starting with: function(record, type, meta) { ... }');
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
const trimmed = code.trim();
|
|
221
|
+
|
|
222
|
+
if (!trimmed.startsWith('function(record, type, meta)') &&
|
|
223
|
+
!trimmed.startsWith('function (record, type, meta)')) {
|
|
224
|
+
errors.push('Hook must start with: function(record, type, meta)');
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
if (!trimmed.includes('return record')) {
|
|
228
|
+
errors.push('Hook must include "return record" statement');
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
try {
|
|
232
|
+
new Function('return ' + trimmed);
|
|
233
|
+
} catch (e) {
|
|
234
|
+
errors.push(`Invalid JavaScript syntax: ${e.message}`);
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
if (errors.length > 0) {
|
|
238
|
+
throw new Error(`Hook validation failed:\n${errors.join('\n')}\n\nPlease return the COMPLETE corrected hook function code only (no markdown, no explanations).`);
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
return code;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// Initialization state tracking
|
|
245
|
+
const initState = {
|
|
246
|
+
schema: { initialized: false, initializing: false, instance: null, error: null, instructions: null },
|
|
247
|
+
hooks: { initialized: false, initializing: false, instance: null, error: null, instructions: null },
|
|
248
|
+
refine: { initialized: false, initializing: false, instance: null, error: null, instructions: null },
|
|
249
|
+
funnels: { initialized: false, initializing: false, instance: null, error: null, instructions: null }
|
|
250
|
+
};
|
|
251
|
+
|
|
252
|
+
let cachedTemplates = null;
|
|
253
|
+
|
|
254
|
+
/**
|
|
255
|
+
* Load and cache instruction templates
|
|
256
|
+
* @returns {Promise<object>} Cached templates
|
|
257
|
+
*/
|
|
258
|
+
async function loadTemplates() {
|
|
259
|
+
if (cachedTemplates) return cachedTemplates;
|
|
260
|
+
|
|
261
|
+
const [INSTRUCTIONS, TYPES, HOOKS_INSTRUCTIONS, REFINE_INSTRUCTIONS, HOOK_EXAMPLES, FUNNELS_INSTRUCTIONS] = await Promise.all([
|
|
262
|
+
u.load('./lib/templates/schema-instructions.txt', false),
|
|
263
|
+
u.load('./lib/templates/schema.d.ts', false),
|
|
264
|
+
u.load('./lib/templates/hooks-instructions.txt', false),
|
|
265
|
+
u.load('./lib/templates/refine-instructions.txt', false),
|
|
266
|
+
u.load('./lib/templates/hook-examples.json', true),
|
|
267
|
+
u.load('./lib/templates/funnels-instructions.txt', false)
|
|
268
|
+
]);
|
|
269
|
+
|
|
270
|
+
const transformedExamples = HOOK_EXAMPLES.examples.map(ex => ({
|
|
271
|
+
PROMPT: { prompt: ex.prompt },
|
|
272
|
+
ANSWER: { hook: ex.response },
|
|
273
|
+
EXPLANATION: ex.useCase,
|
|
274
|
+
CONTEXT: {
|
|
275
|
+
hookTypes: ex.hookTypes,
|
|
276
|
+
patterns: ex.patterns,
|
|
277
|
+
complexity: ex.complexity
|
|
278
|
+
}
|
|
279
|
+
}));
|
|
280
|
+
|
|
281
|
+
cachedTemplates = {
|
|
282
|
+
schemaInstructions: INSTRUCTIONS.replace(/<TYPES>/g, TYPES),
|
|
283
|
+
hooksInstructions: HOOKS_INSTRUCTIONS,
|
|
284
|
+
refineInstructions: REFINE_INSTRUCTIONS.replace(/<TYPES>/g, TYPES),
|
|
285
|
+
funnelsInstructions: FUNNELS_INSTRUCTIONS,
|
|
286
|
+
types: TYPES,
|
|
287
|
+
hookExamples: transformedExamples
|
|
288
|
+
};
|
|
289
|
+
|
|
290
|
+
return cachedTemplates;
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
/**
|
|
294
|
+
* Initialize schema generation AI transformer
|
|
295
|
+
* @returns {Promise<AITransformer>} Initialized AI instance
|
|
296
|
+
*/
|
|
297
|
+
async function initSchemaAI() {
|
|
298
|
+
if (initState.schema.initialized) return initState.schema.instance;
|
|
299
|
+
if (initState.schema.initializing) {
|
|
300
|
+
while (initState.schema.initializing) {
|
|
301
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
302
|
+
}
|
|
303
|
+
if (initState.schema.error) throw initState.schema.error;
|
|
304
|
+
return initState.schema.instance;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
initState.schema.initializing = true;
|
|
308
|
+
|
|
309
|
+
try {
|
|
310
|
+
const templates = await loadTemplates();
|
|
311
|
+
initState.schema.instructions = templates.schemaInstructions?.trim();
|
|
312
|
+
|
|
313
|
+
const instructionChars = initState.schema.instructions?.length || 0;
|
|
314
|
+
const instructionTokens = estimateTokens(initState.schema.instructions);
|
|
315
|
+
logger.debug({ component: 'schema', instructionChars, instructionTokens }, `Schema instructions: ${formatNumber(instructionChars)} chars (~${formatNumber(instructionTokens)} tokens)`);
|
|
316
|
+
|
|
317
|
+
const ai = new AITransformer({
|
|
318
|
+
vertexai: true,
|
|
319
|
+
maxOutputTokens: MAX_OUTPUT_TOKENS,
|
|
320
|
+
project: GOOGLE_CLOUD_PROJECT,
|
|
321
|
+
onlyJSON: true,
|
|
322
|
+
systemInstructions: null,
|
|
323
|
+
modelName: DEFAULT_MODEL,
|
|
324
|
+
logLevel: "none",
|
|
325
|
+
labels: {
|
|
326
|
+
"app": "dm4",
|
|
327
|
+
"component": "schema"
|
|
328
|
+
}
|
|
329
|
+
});
|
|
330
|
+
|
|
331
|
+
await ai.init();
|
|
332
|
+
|
|
333
|
+
initState.schema.instance = ai;
|
|
334
|
+
initState.schema.initialized = true;
|
|
335
|
+
|
|
336
|
+
// @ts-ignore
|
|
337
|
+
return ai;
|
|
338
|
+
} catch (error) {
|
|
339
|
+
initState.schema.error = error;
|
|
340
|
+
logger.error({ err: error }, 'Failed to initialize schema AI');
|
|
341
|
+
throw error;
|
|
342
|
+
} finally {
|
|
343
|
+
initState.schema.initializing = false;
|
|
344
|
+
}
|
|
40
345
|
}
|
|
41
346
|
|
|
42
|
-
|
|
43
|
-
|
|
347
|
+
/**
|
|
348
|
+
* Initialize hooks generation AI transformer
|
|
349
|
+
* @returns {Promise<AITransformer>} Initialized AI instance
|
|
350
|
+
*/
|
|
351
|
+
async function initHooksAI() {
|
|
352
|
+
if (initState.hooks.initialized) return initState.hooks.instance;
|
|
353
|
+
if (initState.hooks.initializing) {
|
|
354
|
+
while (initState.hooks.initializing) {
|
|
355
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
356
|
+
}
|
|
357
|
+
if (initState.hooks.error) throw initState.hooks.error;
|
|
358
|
+
return initState.hooks.instance;
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
initState.hooks.initializing = true;
|
|
362
|
+
|
|
363
|
+
try {
|
|
364
|
+
const templates = await loadTemplates();
|
|
365
|
+
initState.hooks.instructions = templates.hooksInstructions?.trim();
|
|
366
|
+
|
|
367
|
+
const instructionChars = initState.hooks.instructions?.length || 0;
|
|
368
|
+
const instructionTokens = estimateTokens(initState.hooks.instructions);
|
|
369
|
+
logger.debug({ component: 'hooks', instructionChars, instructionTokens }, `Hooks instructions: ${formatNumber(instructionChars)} chars (~${formatNumber(instructionTokens)} tokens)`);
|
|
370
|
+
|
|
371
|
+
const ai = new AITransformer({
|
|
372
|
+
vertexai: true,
|
|
373
|
+
project: GOOGLE_CLOUD_PROJECT,
|
|
374
|
+
maxOutputTokens: MAX_OUTPUT_TOKENS,
|
|
375
|
+
onlyJSON: false,
|
|
376
|
+
systemInstructions: null,
|
|
377
|
+
modelName: DEFAULT_MODEL,
|
|
378
|
+
logLevel: "none",
|
|
379
|
+
exampleData: templates.hookExamples,
|
|
380
|
+
promptKey: 'PROMPT',
|
|
381
|
+
answerKey: 'ANSWER',
|
|
382
|
+
contextKey: 'CONTEXT',
|
|
383
|
+
explanationKey: 'EXPLANATION',
|
|
384
|
+
labels: {
|
|
385
|
+
"app": "dm4",
|
|
386
|
+
"component": "hooks"
|
|
387
|
+
}
|
|
388
|
+
});
|
|
389
|
+
|
|
390
|
+
await ai.init();
|
|
391
|
+
|
|
392
|
+
initState.hooks.instance = ai;
|
|
393
|
+
initState.hooks.initialized = true;
|
|
394
|
+
|
|
395
|
+
// @ts-ignore
|
|
396
|
+
return ai;
|
|
397
|
+
} catch (error) {
|
|
398
|
+
initState.hooks.error = error;
|
|
399
|
+
logger.error({ err: error }, 'Failed to initialize hooks AI');
|
|
400
|
+
throw error;
|
|
401
|
+
} finally {
|
|
402
|
+
initState.hooks.initializing = false;
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
/**
|
|
407
|
+
* Initialize refine AI transformer
|
|
408
|
+
* @returns {Promise<AITransformer>} Initialized AI instance
|
|
409
|
+
*/
|
|
410
|
+
async function initRefineAI() {
|
|
411
|
+
if (initState.refine.initialized) return initState.refine.instance;
|
|
412
|
+
if (initState.refine.initializing) {
|
|
413
|
+
while (initState.refine.initializing) {
|
|
414
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
415
|
+
}
|
|
416
|
+
if (initState.refine.error) throw initState.refine.error;
|
|
417
|
+
return initState.refine.instance;
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
initState.refine.initializing = true;
|
|
421
|
+
|
|
422
|
+
try {
|
|
423
|
+
const templates = await loadTemplates();
|
|
424
|
+
initState.refine.instructions = templates.refineInstructions?.trim();
|
|
425
|
+
|
|
426
|
+
const instructionChars = initState.refine.instructions?.length || 0;
|
|
427
|
+
const instructionTokens = estimateTokens(initState.refine.instructions);
|
|
428
|
+
logger.debug({ component: 'refine', instructionChars, instructionTokens }, `Refine instructions: ${formatNumber(instructionChars)} chars (~${formatNumber(instructionTokens)} tokens)`);
|
|
429
|
+
|
|
430
|
+
const ai = new AITransformer({
|
|
431
|
+
vertexai: true,
|
|
432
|
+
project: GOOGLE_CLOUD_PROJECT,
|
|
433
|
+
maxOutputTokens: MAX_OUTPUT_TOKENS,
|
|
434
|
+
onlyJSON: true,
|
|
435
|
+
systemInstructions: null,
|
|
436
|
+
modelName: DEFAULT_MODEL,
|
|
437
|
+
chatConfig: { temperature: 0.1 },
|
|
438
|
+
logLevel: "none",
|
|
439
|
+
labels: {
|
|
440
|
+
"app": "dm4",
|
|
441
|
+
"component": "refine"
|
|
442
|
+
}
|
|
443
|
+
});
|
|
444
|
+
|
|
445
|
+
await ai.init();
|
|
446
|
+
|
|
447
|
+
initState.refine.instance = ai;
|
|
448
|
+
initState.refine.initialized = true;
|
|
449
|
+
|
|
450
|
+
// @ts-ignore
|
|
451
|
+
return ai;
|
|
452
|
+
} catch (error) {
|
|
453
|
+
initState.refine.error = error;
|
|
454
|
+
logger.error({ err: error }, 'Failed to initialize refine AI');
|
|
455
|
+
throw error;
|
|
456
|
+
} finally {
|
|
457
|
+
initState.refine.initializing = false;
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
/**
|
|
462
|
+
* Initialize funnels generation AI transformer
|
|
463
|
+
* @returns {Promise<AITransformer>} Initialized AI instance
|
|
464
|
+
*/
|
|
465
|
+
async function initFunnelsAI() {
|
|
466
|
+
if (initState.funnels.initialized) return initState.funnels.instance;
|
|
467
|
+
if (initState.funnels.initializing) {
|
|
468
|
+
while (initState.funnels.initializing) {
|
|
469
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
470
|
+
}
|
|
471
|
+
if (initState.funnels.error) throw initState.funnels.error;
|
|
472
|
+
return initState.funnels.instance;
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
initState.funnels.initializing = true;
|
|
476
|
+
|
|
477
|
+
try {
|
|
478
|
+
const templates = await loadTemplates();
|
|
479
|
+
initState.funnels.instructions = templates.funnelsInstructions?.trim();
|
|
480
|
+
|
|
481
|
+
const instructionChars = initState.funnels.instructions?.length || 0;
|
|
482
|
+
const instructionTokens = estimateTokens(initState.funnels.instructions);
|
|
483
|
+
logger.debug({ component: 'funnels', instructionChars, instructionTokens }, `Funnels instructions: ${formatNumber(instructionChars)} chars (~${formatNumber(instructionTokens)} tokens)`);
|
|
484
|
+
|
|
485
|
+
const ai = new AITransformer({
|
|
486
|
+
vertexai: true,
|
|
487
|
+
project: GOOGLE_CLOUD_PROJECT,
|
|
488
|
+
maxOutputTokens: MAX_OUTPUT_TOKENS,
|
|
489
|
+
onlyJSON: true,
|
|
490
|
+
systemInstructions: null,
|
|
491
|
+
modelName: DEFAULT_MODEL,
|
|
492
|
+
logLevel: "none",
|
|
493
|
+
labels: {
|
|
494
|
+
"app": "dm4",
|
|
495
|
+
"component": "funnels"
|
|
496
|
+
}
|
|
497
|
+
});
|
|
498
|
+
|
|
499
|
+
await ai.init();
|
|
500
|
+
|
|
501
|
+
initState.funnels.instance = ai;
|
|
502
|
+
initState.funnels.initialized = true;
|
|
503
|
+
|
|
504
|
+
// @ts-ignore
|
|
505
|
+
return ai;
|
|
506
|
+
} catch (error) {
|
|
507
|
+
initState.funnels.error = error;
|
|
508
|
+
logger.error({ err: error }, 'Failed to initialize funnels AI');
|
|
509
|
+
throw error;
|
|
510
|
+
} finally {
|
|
511
|
+
initState.funnels.initializing = false;
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
/**
|
|
516
|
+
* Pre-warm all AI instances (called at module load)
|
|
517
|
+
*/
|
|
518
|
+
async function prewarmAllAI() {
|
|
519
|
+
const startTime = Date.now();
|
|
520
|
+
|
|
521
|
+
try {
|
|
522
|
+
const results = await Promise.allSettled([
|
|
523
|
+
initSchemaAI(),
|
|
524
|
+
initHooksAI(),
|
|
525
|
+
initRefineAI(),
|
|
526
|
+
initFunnelsAI()
|
|
527
|
+
]);
|
|
528
|
+
|
|
529
|
+
const elapsed = Date.now() - startTime;
|
|
530
|
+
|
|
531
|
+
const failures = results.filter(r => r.status === 'rejected');
|
|
532
|
+
if (failures.length > 0) {
|
|
533
|
+
logger.warn({ failures: failures.length, elapsed }, `Pre-warming completed with ${failures.length} failures in ${elapsed}ms`);
|
|
534
|
+
failures.forEach((f, i) => logger.error({ component: ['Schema', 'Hooks', 'Refine', 'Funnels'][i], err: f.reason }));
|
|
535
|
+
} else {
|
|
536
|
+
logger.info({ elapsed }, `AI transformers pre-warmed in ${elapsed}ms`);
|
|
537
|
+
}
|
|
538
|
+
} catch (error) {
|
|
539
|
+
logger.error({ err: error }, 'Critical error during pre-warming');
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
/**
|
|
544
|
+
* Generate AI schema using cached transformer
|
|
545
|
+
* @param {object} params - Parameters object
|
|
546
|
+
* @param {string} params.prompt - User's prompt
|
|
547
|
+
* @param {string} [params.user_id] - User ID for tracking
|
|
548
|
+
* @param {string} [params.model] - Model to use
|
|
549
|
+
* @returns {Promise<object>} Generated schema
|
|
550
|
+
*/
|
|
551
|
+
export async function generateAISchema(params) {
|
|
552
|
+
const { prompt, user_id, model } = params;
|
|
44
553
|
if (!prompt) throw new Error("Please provide a prompt");
|
|
45
|
-
let INSTRUCTIONS = await u.load('./lib/templates/schema-instructions.txt', false);
|
|
46
|
-
const TYPES = await u.load('./lib/templates/abbreviated.d.ts', false);
|
|
47
|
-
const VERBOSE_SCHEMA_FILE = await u.load('./lib/templates/verbose-schema.js', false);
|
|
48
|
-
const VERBOSE_SCHEMA = VERBOSE_SCHEMA_FILE.split(`//SPLIT HERE`).pop()?.trim() || ``;
|
|
49
|
-
INSTRUCTIONS = INSTRUCTIONS
|
|
50
|
-
.replace(/<TYPES>/g, TYPES)
|
|
51
|
-
.replace(/<VERBOSE_SCHEMA>/g, VERBOSE_SCHEMA);
|
|
52
554
|
|
|
53
|
-
const ai =
|
|
54
|
-
apiKey: API_KEY,
|
|
55
|
-
onlyJSON: false,
|
|
56
|
-
systemInstructions: INSTRUCTIONS?.trim(),
|
|
57
|
-
modelName: "gemini-2.5-pro",
|
|
555
|
+
const ai = await initSchemaAI();
|
|
58
556
|
|
|
557
|
+
const selectedModel = SUPPORTED_MODELS.includes(model) ? model : DEFAULT_MODEL;
|
|
558
|
+
ai.modelName = selectedModel;
|
|
59
559
|
|
|
60
|
-
}
|
|
61
|
-
await ai.init();
|
|
62
|
-
const response = await ai.message(prompt);
|
|
560
|
+
const fullPrompt = `${initState.schema.instructions}\n\n---\n\nUSER REQUEST:\n${prompt}`;
|
|
63
561
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
// }
|
|
562
|
+
const startTime = Date.now();
|
|
563
|
+
logger.debug({ model: selectedModel }, 'Starting AI schema generation');
|
|
67
564
|
|
|
68
|
-
|
|
565
|
+
let response = null;
|
|
566
|
+
let success = false;
|
|
567
|
+
let errorMessage = null;
|
|
568
|
+
|
|
569
|
+
try {
|
|
570
|
+
// @ts-ignore
|
|
571
|
+
response = await ai.transformWithValidation(
|
|
572
|
+
{ prompt: fullPrompt },
|
|
573
|
+
{ maxRetries: 2 },
|
|
574
|
+
validateSchemaOutput
|
|
575
|
+
);
|
|
576
|
+
success = true;
|
|
577
|
+
} catch (error) {
|
|
578
|
+
errorMessage = error.message;
|
|
579
|
+
throw error;
|
|
580
|
+
} finally {
|
|
581
|
+
const duration_ms = Date.now() - startTime;
|
|
582
|
+
const usage = ai.getLastUsage?.() || null;
|
|
583
|
+
|
|
584
|
+
logAICallStructured({
|
|
585
|
+
component: 'schema', prompt, response, duration_ms,
|
|
586
|
+
success, error: errorMessage, model: selectedModel, usage
|
|
587
|
+
});
|
|
69
588
|
|
|
589
|
+
trackAIJob({
|
|
590
|
+
component: 'schema', prompt, response, duration_ms,
|
|
591
|
+
success, error: errorMessage, usage, user_id
|
|
592
|
+
});
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
return response;
|
|
70
596
|
}
|
|
71
597
|
|
|
72
598
|
/**
|
|
73
|
-
* Generate AI hooks
|
|
599
|
+
* Generate AI hooks using cached transformer
|
|
74
600
|
* @param {object} params - Parameters object
|
|
75
601
|
* @param {string} params.prompt - User's description of desired trends
|
|
76
602
|
* @param {object} params.currentSchema - The existing dungeon schema
|
|
603
|
+
* @param {string} [params.user_id] - User ID for tracking
|
|
604
|
+
* @param {string} [params.model] - Model to use
|
|
77
605
|
* @returns {Promise<string>} Generated hook function code
|
|
78
606
|
*/
|
|
79
607
|
export async function generateAIHooks(params) {
|
|
80
|
-
const { prompt, currentSchema } = params;
|
|
608
|
+
const { prompt, currentSchema, user_id, model } = params;
|
|
81
609
|
if (!prompt) throw new Error("Please provide a prompt describing the trends you want");
|
|
82
610
|
if (!currentSchema) throw new Error("Please provide the current schema");
|
|
83
611
|
|
|
84
|
-
|
|
85
|
-
|
|
612
|
+
const ai = await initHooksAI();
|
|
613
|
+
|
|
614
|
+
const selectedModel = SUPPORTED_MODELS.includes(model) ? model : DEFAULT_MODEL;
|
|
615
|
+
ai.modelName = selectedModel;
|
|
86
616
|
|
|
87
|
-
// Format the current schema as a readable string
|
|
88
617
|
const schemaString = JSON.stringify(currentSchema, null, 2);
|
|
618
|
+
const instructionsWithSchema = initState.hooks.instructions
|
|
619
|
+
.replace(/<CURRENT_SCHEMA>/g, schemaString);
|
|
620
|
+
|
|
621
|
+
const fullPrompt = `${instructionsWithSchema}\n\n---\n\nUSER REQUEST:\n${prompt}`;
|
|
622
|
+
|
|
623
|
+
const startTime = Date.now();
|
|
624
|
+
logger.debug({ model: selectedModel }, 'Starting AI hooks generation');
|
|
625
|
+
|
|
626
|
+
let response = null;
|
|
627
|
+
let hookCode = null;
|
|
628
|
+
let success = false;
|
|
629
|
+
let errorMessage = null;
|
|
630
|
+
|
|
631
|
+
try {
|
|
632
|
+
// @ts-ignore
|
|
633
|
+
response = /** @type {string | {hook: string}} */ (await ai.message({ prompt: fullPrompt }));
|
|
634
|
+
|
|
635
|
+
if (typeof response === 'string') {
|
|
636
|
+
hookCode = response.trim();
|
|
637
|
+
} else if (response && typeof response === 'object' && 'hook' in response) {
|
|
638
|
+
hookCode = response.hook.trim();
|
|
639
|
+
} else {
|
|
640
|
+
throw new Error('AI did not return a valid hook function');
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
hookCode = hookCode
|
|
644
|
+
.replace(/^```javascript\s*/g, '')
|
|
645
|
+
.replace(/^```js\s*/g, '')
|
|
646
|
+
.replace(/^```\s*/g, '')
|
|
647
|
+
.replace(/```$/g, '')
|
|
648
|
+
.trim();
|
|
89
649
|
|
|
90
|
-
|
|
91
|
-
HOOKS_INSTRUCTIONS = HOOKS_INSTRUCTIONS.replace(/<CURRENT_SCHEMA>/g, schemaString);
|
|
650
|
+
await validateHookOutput(hookCode);
|
|
92
651
|
|
|
652
|
+
success = true;
|
|
653
|
+
logger.info('Hook function validated successfully');
|
|
654
|
+
} catch (error) {
|
|
655
|
+
errorMessage = error.message;
|
|
656
|
+
throw error;
|
|
657
|
+
} finally {
|
|
658
|
+
const duration_ms = Date.now() - startTime;
|
|
659
|
+
const usage = ai.getLastUsage?.() || null;
|
|
660
|
+
|
|
661
|
+
logAICallStructured({
|
|
662
|
+
component: 'hooks', prompt, response: hookCode, duration_ms,
|
|
663
|
+
success, error: errorMessage, model: selectedModel, usage
|
|
664
|
+
});
|
|
665
|
+
|
|
666
|
+
trackAIJob({
|
|
667
|
+
component: 'hooks', prompt, response: hookCode, duration_ms,
|
|
668
|
+
success, error: errorMessage, usage, user_id
|
|
669
|
+
});
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
return hookCode;
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
/**
|
|
676
|
+
* Refine existing schema using cached transformer
|
|
677
|
+
* @param {object} params - Parameters object
|
|
678
|
+
* @param {string} params.prompt - User's description of changes
|
|
679
|
+
* @param {object} params.currentSchema - The existing dungeon schema to refine
|
|
680
|
+
* @param {string} [params.user_id] - User ID for tracking
|
|
681
|
+
* @param {string} [params.model] - Model to use
|
|
682
|
+
* @returns {Promise<object>} Refined dungeon schema
|
|
683
|
+
*/
|
|
684
|
+
export async function generateAIRefine(params) {
|
|
685
|
+
const { prompt, currentSchema, user_id, model } = params;
|
|
686
|
+
if (!prompt) throw new Error("Please provide a description of the changes you want");
|
|
687
|
+
if (!currentSchema) throw new Error("Please provide the current schema to refine");
|
|
688
|
+
|
|
689
|
+
const ai = await initRefineAI();
|
|
690
|
+
|
|
691
|
+
const selectedModel = SUPPORTED_MODELS.includes(model) ? model : DEFAULT_MODEL;
|
|
692
|
+
ai.modelName = selectedModel;
|
|
693
|
+
|
|
694
|
+
const templates = await loadTemplates();
|
|
695
|
+
const schemaString = JSON.stringify(currentSchema, null, 2);
|
|
696
|
+
const instructionsWithSchema = initState.refine.instructions
|
|
697
|
+
.replace(/<CURRENT_SCHEMA>/g, schemaString)
|
|
698
|
+
.replace(/<TYPES>/g, templates.types);
|
|
699
|
+
|
|
700
|
+
const fullPrompt = `${instructionsWithSchema}\n\n---\n\nUSER REQUEST:\n${prompt}`;
|
|
701
|
+
|
|
702
|
+
const startTime = Date.now();
|
|
703
|
+
logger.debug({ model: selectedModel }, 'Starting AI refine operation');
|
|
704
|
+
|
|
705
|
+
let response = null;
|
|
706
|
+
let success = false;
|
|
707
|
+
let errorMessage = null;
|
|
708
|
+
|
|
709
|
+
try {
|
|
710
|
+
// @ts-ignore
|
|
711
|
+
response = await ai.transformWithValidation(
|
|
712
|
+
{ prompt: fullPrompt },
|
|
713
|
+
{ maxRetries: 2 },
|
|
714
|
+
validateSchemaOutput
|
|
715
|
+
);
|
|
716
|
+
success = true;
|
|
717
|
+
} catch (error) {
|
|
718
|
+
errorMessage = error.message;
|
|
719
|
+
throw error;
|
|
720
|
+
} finally {
|
|
721
|
+
const duration_ms = Date.now() - startTime;
|
|
722
|
+
const usage = ai.getLastUsage?.() || null;
|
|
723
|
+
|
|
724
|
+
logAICallStructured({
|
|
725
|
+
component: 'refine', prompt, response, duration_ms,
|
|
726
|
+
success, error: errorMessage, model: selectedModel, usage
|
|
727
|
+
});
|
|
728
|
+
|
|
729
|
+
trackAIJob({
|
|
730
|
+
component: 'refine', prompt, response, duration_ms,
|
|
731
|
+
success, error: errorMessage, usage, user_id
|
|
732
|
+
});
|
|
733
|
+
}
|
|
734
|
+
|
|
735
|
+
return response;
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
/**
|
|
739
|
+
* Generate AI funnels using cached transformer
|
|
740
|
+
* @param {object} params - Parameters object
|
|
741
|
+
* @param {string} params.prompt - User's description of user journeys
|
|
742
|
+
* @param {object} params.currentSchema - The existing dungeon schema
|
|
743
|
+
* @param {string} [params.user_id] - User ID for tracking
|
|
744
|
+
* @param {string} [params.model] - Model to use
|
|
745
|
+
* @returns {Promise<object>} Generated funnels object
|
|
746
|
+
*/
|
|
747
|
+
export async function generateAIFunnels(params) {
|
|
748
|
+
const { prompt, currentSchema, user_id, model } = params;
|
|
749
|
+
if (!prompt) throw new Error("Please provide a prompt describing user journeys");
|
|
750
|
+
if (!currentSchema) throw new Error("Please provide the current schema");
|
|
751
|
+
|
|
752
|
+
const ai = await initFunnelsAI();
|
|
753
|
+
|
|
754
|
+
const selectedModel = SUPPORTED_MODELS.includes(model) ? model : DEFAULT_MODEL;
|
|
755
|
+
ai.modelName = selectedModel;
|
|
756
|
+
|
|
757
|
+
const schemaString = JSON.stringify(currentSchema, null, 2);
|
|
758
|
+
const instructionsWithSchema = initState.funnels.instructions
|
|
759
|
+
.replace(/<CURRENT_SCHEMA>/g, schemaString);
|
|
760
|
+
|
|
761
|
+
const fullPrompt = `${instructionsWithSchema}\n\n---\n\nUSER REQUEST:\n${prompt}`;
|
|
762
|
+
|
|
763
|
+
const startTime = Date.now();
|
|
764
|
+
logger.debug({ model: selectedModel }, 'Starting AI funnels generation');
|
|
765
|
+
|
|
766
|
+
const validateFunnels = createFunnelsValidator(currentSchema);
|
|
767
|
+
|
|
768
|
+
let response = null;
|
|
769
|
+
let success = false;
|
|
770
|
+
let errorMessage = null;
|
|
771
|
+
|
|
772
|
+
try {
|
|
773
|
+
// @ts-ignore
|
|
774
|
+
response = await ai.transformWithValidation(
|
|
775
|
+
{ prompt: fullPrompt },
|
|
776
|
+
{ maxRetries: 2 },
|
|
777
|
+
validateFunnels
|
|
778
|
+
);
|
|
779
|
+
success = true;
|
|
780
|
+
} catch (error) {
|
|
781
|
+
errorMessage = error.message;
|
|
782
|
+
throw error;
|
|
783
|
+
} finally {
|
|
784
|
+
const duration_ms = Date.now() - startTime;
|
|
785
|
+
const usage = ai.getLastUsage?.() || null;
|
|
786
|
+
|
|
787
|
+
logAICallStructured({
|
|
788
|
+
component: 'funnels', prompt, response, duration_ms,
|
|
789
|
+
success, error: errorMessage, model: selectedModel, usage
|
|
790
|
+
});
|
|
791
|
+
|
|
792
|
+
trackAIJob({
|
|
793
|
+
component: 'funnels', prompt, response, duration_ms,
|
|
794
|
+
success, error: errorMessage, usage, user_id
|
|
795
|
+
});
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
return response;
|
|
799
|
+
}
|
|
800
|
+
|
|
801
|
+
/**
|
|
802
|
+
* Get initialization status for monitoring
|
|
803
|
+
* @returns {object} Current initialization state of all AI instances
|
|
804
|
+
*/
|
|
805
|
+
export function getInitStatus() {
|
|
806
|
+
return {
|
|
807
|
+
schema: { ready: initState.schema.initialized, error: initState.schema.error?.message },
|
|
808
|
+
hooks: { ready: initState.hooks.initialized, error: initState.hooks.error?.message },
|
|
809
|
+
refine: { ready: initState.refine.initialized, error: initState.refine.error?.message },
|
|
810
|
+
funnels: { ready: initState.funnels.initialized, error: initState.funnels.error?.message }
|
|
811
|
+
};
|
|
812
|
+
}
|
|
813
|
+
|
|
814
|
+
// Start pre-warming immediately when module loads (container cold-start)
|
|
815
|
+
if (NODE_ENV !== 'test') {
|
|
816
|
+
prewarmAllAI().catch(error => {
|
|
817
|
+
logger.error({ err: error }, 'Failed to pre-warm AI transformers');
|
|
818
|
+
});
|
|
819
|
+
}
|
|
820
|
+
|
|
821
|
+
/**
|
|
822
|
+
* Generic AI question
|
|
823
|
+
* @param {string} question - The question to ask
|
|
824
|
+
* @param {string} [user_id] - User ID for tracking
|
|
825
|
+
* @returns {Promise<string>} The AI response
|
|
826
|
+
*/
|
|
827
|
+
export async function ask(question, user_id = null) {
|
|
828
|
+
const prompt = question;
|
|
93
829
|
const ai = new AITransformer({
|
|
94
|
-
|
|
830
|
+
vertexai: true,
|
|
831
|
+
project: GOOGLE_CLOUD_PROJECT,
|
|
832
|
+
maxOutputTokens: MAX_OUTPUT_TOKENS,
|
|
95
833
|
onlyJSON: false,
|
|
96
|
-
|
|
97
|
-
|
|
834
|
+
responseSchema: {
|
|
835
|
+
type: "string"
|
|
836
|
+
},
|
|
837
|
+
modelName: "gemini-2.5-flash-lite",
|
|
838
|
+
labels: {
|
|
839
|
+
"app": "dm4",
|
|
840
|
+
"component": "generic"
|
|
841
|
+
}
|
|
98
842
|
});
|
|
99
843
|
|
|
844
|
+
const startTime = Date.now();
|
|
100
845
|
await ai.init();
|
|
101
|
-
const response = await ai.message(prompt);
|
|
102
846
|
|
|
103
|
-
|
|
104
|
-
|
|
847
|
+
let response = null;
|
|
848
|
+
let success = false;
|
|
849
|
+
let errorMessage = null;
|
|
105
850
|
|
|
106
|
-
|
|
851
|
+
try {
|
|
852
|
+
response = await ai.message({ prompt });
|
|
853
|
+
success = true;
|
|
854
|
+
} catch (error) {
|
|
855
|
+
errorMessage = error.message;
|
|
856
|
+
throw error;
|
|
857
|
+
} finally {
|
|
858
|
+
const duration_ms = Date.now() - startTime;
|
|
859
|
+
const usage = ai.getLastUsage?.() || null;
|
|
107
860
|
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
861
|
+
const logData = {
|
|
862
|
+
component: 'ai-generic',
|
|
863
|
+
prompt_preview: prompt.substring(0, 200),
|
|
864
|
+
prompt_length: prompt.length,
|
|
865
|
+
output_length: response?.toString()?.length || 0,
|
|
866
|
+
duration_ms,
|
|
867
|
+
duration_human: `${(duration_ms / 1000).toFixed(2)} seconds`,
|
|
868
|
+
user_id: user_id || 'anonymous',
|
|
869
|
+
success,
|
|
870
|
+
error: errorMessage,
|
|
871
|
+
...(usage && {
|
|
872
|
+
promptTokens: usage.promptTokens,
|
|
873
|
+
responseTokens: usage.responseTokens,
|
|
874
|
+
totalTokens: usage.totalTokens,
|
|
875
|
+
attempts: usage.attempts,
|
|
876
|
+
modelVersion: usage.modelVersion,
|
|
877
|
+
requestedModel: usage.requestedModel
|
|
878
|
+
}),
|
|
879
|
+
};
|
|
880
|
+
|
|
881
|
+
if (success) {
|
|
882
|
+
logger.info(logData, `AI ask completed in ${duration_ms}ms`);
|
|
883
|
+
} else {
|
|
884
|
+
logger.error(logData, `AI ask failed after ${duration_ms}ms`);
|
|
112
885
|
}
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
.catch((error) => {
|
|
118
|
-
if (NODE_ENV === "dev") debugger;
|
|
886
|
+
|
|
887
|
+
trackAIJob({
|
|
888
|
+
component: 'generic', prompt, response: response?.toString(),
|
|
889
|
+
duration_ms, success, error: errorMessage, usage, user_id
|
|
119
890
|
});
|
|
120
|
-
}
|
|
891
|
+
}
|
|
892
|
+
|
|
893
|
+
return response?.toString()?.trim();
|
|
894
|
+
}
|
|
895
|
+
|
|
896
|
+
export default generateAISchema;
|