make-mp-data 3.0.1 → 3.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/dungeons/adspend.js +35 -1
  2. package/dungeons/anon.js +25 -1
  3. package/dungeons/{array-of-object-loopup.js → array-of-object-lookup.js} +28 -8
  4. package/dungeons/benchmark-heavy.js +2 -2
  5. package/dungeons/benchmark-light.js +2 -2
  6. package/dungeons/big.js +2 -2
  7. package/dungeons/business.js +59 -12
  8. package/dungeons/complex.js +34 -1
  9. package/dungeons/copilot.js +1 -1
  10. package/dungeons/{harness/harness-education.js → education.js} +29 -12
  11. package/dungeons/experiments.js +15 -2
  12. package/dungeons/{harness/harness-fintech.js → fintech.js} +8 -8
  13. package/dungeons/foobar.js +33 -1
  14. package/dungeons/{harness/harness-food.js → food.js} +7 -4
  15. package/dungeons/funnels.js +38 -1
  16. package/dungeons/gaming.js +25 -5
  17. package/dungeons/media.js +861 -271
  18. package/dungeons/mil.js +29 -2
  19. package/dungeons/mirror.js +33 -1
  20. package/dungeons/{kurby.js → retention-cadence.js} +1 -1
  21. package/dungeons/{harness/harness-gaming.js → rpg.js} +5 -5
  22. package/dungeons/sanity.js +31 -2
  23. package/dungeons/{harness/harness-sass.js → sass.js} +2 -2
  24. package/dungeons/scd.js +46 -1
  25. package/dungeons/simple.js +1 -1
  26. package/dungeons/{harness/harness-social.js → social.js} +2 -2
  27. package/dungeons/streaming.js +373 -0
  28. package/dungeons/strict-event-test.js +1 -1
  29. package/dungeons/student-teacher.js +18 -5
  30. package/dungeons/text-generation.js +38 -1
  31. package/dungeons/too-big-events.js +38 -1
  32. package/dungeons/{userAgent.js → user-agent.js} +21 -1
  33. package/entry.js +5 -4
  34. package/lib/utils/logger.js +0 -4
  35. package/package.json +1 -4
  36. package/dungeons/ai-chat-analytics-ed.js +0 -275
  37. package/dungeons/clinch-agi.js +0 -632
  38. package/dungeons/ecommerce-store.js +0 -0
  39. package/dungeons/harness/harness-media.js +0 -961
  40. package/dungeons/money2020-ed-also.js +0 -277
  41. package/dungeons/money2020-ed.js +0 -580
  42. package/dungeons/uday-schema.json +0 -220
  43. package/lib/templates/funnels-instructions.txt +0 -272
  44. package/lib/templates/hook-examples.json +0 -187
  45. package/lib/templates/hooks-instructions.txt +0 -721
  46. package/lib/templates/refine-instructions.txt +0 -485
  47. package/lib/templates/schema-instructions.txt +0 -285
  48. package/lib/utils/ai.js +0 -896
  49. package/lib/utils/mixpanel.js +0 -101
  50. package/lib/utils/project.js +0 -167
package/lib/utils/ai.js DELETED
@@ -1,896 +0,0 @@
1
- /**
2
- * AI Cache Module - Pre-warmed AI transformer instances for Cloud Run
3
- *
4
- * This module follows Google Cloud Run best practices for caching expensive objects.
5
- * AI transformers are initialized at module load time (container cold-start) and
6
- * reused across all requests to minimize latency.
7
- */
8
-
9
- import AITransformer from 'ak-gemini';
10
- import * as u from "ak-tools";
11
- import 'dotenv/config';
12
- import { aiLogger as logger } from './logger.js';
13
- import { trackAIJob } from './mixpanel.js';
14
-
15
- const { NODE_ENV = "unknown", GOOGLE_CLOUD_PROJECT = "mixpanel-gtm-training" } = process.env;
16
- // Uses Vertex AI with Application Default Credentials by default
17
- // GEMINI_API_KEY is no longer required
18
-
19
- const MAX_OUTPUT_TOKENS = 50_000;
20
- const DEFAULT_MODEL = 'gemini-2.5-flash';
21
-
22
- // Supported models for validation
23
- const SUPPORTED_MODELS = [
24
- 'gemini-2.5-flash',
25
- 'gemini-2.5-pro',
26
- 'gemini-3-flash-preview',
27
- 'gemini-3-pro-preview'
28
- ];
29
-
30
- /**
31
- * Format a number with commas for readability (e.g., 31395 -> "31,395")
32
- * @param {number} num - Number to format
33
- * @returns {string} Formatted number string
34
- */
35
- function formatNumber(num) {
36
- return num.toLocaleString('en-US');
37
- }
38
-
39
- /**
40
- * Estimate token count from text using Google's rough approximation
41
- * (1 token ~= 4 characters, 100 tokens ~= 60-80 English words)
42
- * @param {string} text - Text to estimate tokens for
43
- * @returns {number} Estimated token count
44
- */
45
- function estimateTokens(text) {
46
- if (!text) return 0;
47
- return Math.ceil(text.length / 4);
48
- }
49
-
50
- /**
51
- * Log structured AI call metrics
52
- * In production, outputs JSON for Cloud Logging ingestion
53
- * @param {object} params - Logging parameters
54
- * @param {string} params.component - AI component name (schema, hooks, refine, funnels)
55
- * @param {string} params.prompt - Original user prompt
56
- * @param {any} params.response - AI response
57
- * @param {number} params.duration_ms - Call duration in milliseconds
58
- * @param {boolean} params.success - Whether the call succeeded
59
- * @param {string} [params.error] - Error message if failed
60
- * @param {string} [params.model] - Model used
61
- * @param {object} [params.usage] - Token usage from ai.getLastUsage()
62
- */
63
- function logAICallStructured(params) {
64
- const { component, prompt, response, duration_ms, success, error, model, usage } = params;
65
-
66
- const structuredLog = {
67
- message: "AI Response",
68
- component,
69
- model: model || DEFAULT_MODEL,
70
- prompt_preview: prompt?.substring(0, 200) || '',
71
- prompt_length: prompt?.length || 0,
72
- output_length: response ? JSON.stringify(response).length : 0,
73
- duration_ms,
74
- duration_human: `${(duration_ms / 1000).toFixed(2)} seconds`,
75
- success,
76
- error: error || null,
77
- environment: NODE_ENV,
78
- // Token usage from ak-gemini library
79
- ...(usage && {
80
- promptTokens: usage.promptTokens,
81
- responseTokens: usage.responseTokens,
82
- totalTokens: usage.totalTokens,
83
- attempts: usage.attempts,
84
- modelVersion: usage.modelVersion,
85
- requestedModel: usage.requestedModel
86
- })
87
- };
88
-
89
- // In production, output structured JSON for Cloud Logging
90
- if (NODE_ENV === 'production') {
91
- console.log(JSON.stringify(structuredLog));
92
- } else {
93
- // In dev/test, use the logger with structured data
94
- if (success) {
95
- logger.debug(structuredLog, `AI ${component} call completed`);
96
- } else {
97
- logger.error(structuredLog, `AI ${component} call failed`);
98
- }
99
- }
100
-
101
- return structuredLog;
102
- }
103
-
104
- // ========== VALIDATION FUNCTIONS FOR SELF-HEALING ==========
105
-
106
- /**
107
- * Validates AI-generated schema meets minimum requirements.
108
- * Used with transformWithValidation for self-healing.
109
- * @param {object} payload - AI response payload
110
- * @returns {Promise<object>} Validated payload
111
- * @throws {Error} If validation fails (triggers AI retry)
112
- */
113
- async function validateSchemaOutput(payload) {
114
- const errors = [];
115
-
116
- if (!Array.isArray(payload.events) || payload.events.length < 8) {
117
- errors.push(`events: expected array with at least 8 items, got ${payload.events?.length || 0}`);
118
- }
119
-
120
- if (!Array.isArray(payload.funnels) || payload.funnels.length < 3) {
121
- errors.push(`funnels: expected array with at least 3 items, got ${payload.funnels?.length || 0}`);
122
- }
123
-
124
- if (!payload.superProps || typeof payload.superProps !== 'object' ||
125
- Array.isArray(payload.superProps) || Object.keys(payload.superProps).length < 2) {
126
- errors.push(`superProps: expected object with at least 2 keys, got ${Object.keys(payload.superProps || {}).length}`);
127
- }
128
-
129
- if (!payload.userProps || typeof payload.userProps !== 'object' ||
130
- Array.isArray(payload.userProps) || Object.keys(payload.userProps).length < 6) {
131
- errors.push(`userProps: expected object with at least 6 keys, got ${Object.keys(payload.userProps || {}).length}`);
132
- }
133
-
134
- if (Array.isArray(payload.events)) {
135
- payload.events.forEach((event, i) => {
136
- if (!event.event || typeof event.event !== 'string') {
137
- errors.push(`events[${i}]: missing or invalid 'event' name`);
138
- }
139
- });
140
- }
141
-
142
- if (Array.isArray(payload.funnels)) {
143
- payload.funnels.forEach((funnel, i) => {
144
- if (!Array.isArray(funnel.sequence) || funnel.sequence.length < 2) {
145
- errors.push(`funnels[${i}]: sequence must have at least 2 events`);
146
- }
147
- if (typeof funnel.conversionRate !== 'number' ||
148
- funnel.conversionRate < 0 || funnel.conversionRate > 100) {
149
- errors.push(`funnels[${i}]: conversionRate must be integer between 0 and 100`);
150
- }
151
- });
152
- }
153
-
154
- if (errors.length > 0) {
155
- throw new Error(`Schema validation failed:\n${errors.join('\n')}\n\nPlease fix these issues and return the COMPLETE corrected schema (all fields: events, funnels, superProps, userProps, etc).`);
156
- }
157
-
158
- return payload;
159
- }
160
-
161
- /**
162
- * Creates a funnels validator with schema context for semantic validation.
163
- * @param {object} schema - The current schema (for event name validation)
164
- * @returns {(payload: any) => Promise<any>} Validator function
165
- */
166
- function createFunnelsValidator(schema) {
167
- return async function validateFunnelsOutput(payload) {
168
- const errors = [];
169
-
170
- if (!Array.isArray(payload.funnels) || payload.funnels.length < 2) {
171
- errors.push(`funnels: expected array with at least 2 items, got ${payload.funnels?.length || 0}`);
172
- }
173
-
174
- const validEventNames = new Set(
175
- (schema?.events || []).map(e => e.event).filter(Boolean)
176
- );
177
-
178
- if (Array.isArray(payload.funnels)) {
179
- payload.funnels.forEach((funnel, i) => {
180
- if (!Array.isArray(funnel.sequence) || funnel.sequence.length < 2) {
181
- errors.push(`funnels[${i}]: sequence must have at least 2 events`);
182
- }
183
- if (typeof funnel.conversionRate !== 'number' ||
184
- funnel.conversionRate < 0 || funnel.conversionRate > 100) {
185
- errors.push(`funnels[${i}]: conversionRate must be integer between 0 and 100`);
186
- }
187
-
188
- if (Array.isArray(funnel.sequence) && validEventNames.size > 0) {
189
- funnel.sequence.forEach((eventName, j) => {
190
- if (!validEventNames.has(eventName)) {
191
- const available = [...validEventNames].slice(0, 5).join(', ');
192
- errors.push(`funnels[${i}].sequence[${j}]: "${eventName}" is not a valid event. Available: ${available}...`);
193
- }
194
- });
195
- }
196
- });
197
- }
198
-
199
- if (errors.length > 0) {
200
- throw new Error(`Funnels validation failed:\n${errors.join('\n')}\n\nPlease fix these issues and return the COMPLETE corrected funnels object with all funnels.`);
201
- }
202
-
203
- return payload;
204
- };
205
- }
206
-
207
- /**
208
- * Validates AI-generated hook code.
209
- * @param {string} code - Hook function code
210
- * @returns {Promise<string>} Validated code
211
- * @throws {Error} If validation fails
212
- */
213
- async function validateHookOutput(code) {
214
- const errors = [];
215
-
216
- if (typeof code !== 'string' || !code.trim()) {
217
- throw new Error('Hook must be a non-empty string. Please return only the function code starting with: function(record, type, meta) { ... }');
218
- }
219
-
220
- const trimmed = code.trim();
221
-
222
- if (!trimmed.startsWith('function(record, type, meta)') &&
223
- !trimmed.startsWith('function (record, type, meta)')) {
224
- errors.push('Hook must start with: function(record, type, meta)');
225
- }
226
-
227
- if (!trimmed.includes('return record')) {
228
- errors.push('Hook must include "return record" statement');
229
- }
230
-
231
- try {
232
- new Function('return ' + trimmed);
233
- } catch (e) {
234
- errors.push(`Invalid JavaScript syntax: ${e.message}`);
235
- }
236
-
237
- if (errors.length > 0) {
238
- throw new Error(`Hook validation failed:\n${errors.join('\n')}\n\nPlease return the COMPLETE corrected hook function code only (no markdown, no explanations).`);
239
- }
240
-
241
- return code;
242
- }
243
-
244
- // Initialization state tracking
245
- const initState = {
246
- schema: { initialized: false, initializing: false, instance: null, error: null, instructions: null },
247
- hooks: { initialized: false, initializing: false, instance: null, error: null, instructions: null },
248
- refine: { initialized: false, initializing: false, instance: null, error: null, instructions: null },
249
- funnels: { initialized: false, initializing: false, instance: null, error: null, instructions: null }
250
- };
251
-
252
- let cachedTemplates = null;
253
-
254
- /**
255
- * Load and cache instruction templates
256
- * @returns {Promise<object>} Cached templates
257
- */
258
- async function loadTemplates() {
259
- if (cachedTemplates) return cachedTemplates;
260
-
261
- const [INSTRUCTIONS, TYPES, HOOKS_INSTRUCTIONS, REFINE_INSTRUCTIONS, HOOK_EXAMPLES, FUNNELS_INSTRUCTIONS] = await Promise.all([
262
- u.load('./lib/templates/schema-instructions.txt', false),
263
- u.load('./lib/templates/schema.d.ts', false),
264
- u.load('./lib/templates/hooks-instructions.txt', false),
265
- u.load('./lib/templates/refine-instructions.txt', false),
266
- u.load('./lib/templates/hook-examples.json', true),
267
- u.load('./lib/templates/funnels-instructions.txt', false)
268
- ]);
269
-
270
- const transformedExamples = HOOK_EXAMPLES.examples.map(ex => ({
271
- PROMPT: { prompt: ex.prompt },
272
- ANSWER: { hook: ex.response },
273
- EXPLANATION: ex.useCase,
274
- CONTEXT: {
275
- hookTypes: ex.hookTypes,
276
- patterns: ex.patterns,
277
- complexity: ex.complexity
278
- }
279
- }));
280
-
281
- cachedTemplates = {
282
- schemaInstructions: INSTRUCTIONS.replace(/<TYPES>/g, TYPES),
283
- hooksInstructions: HOOKS_INSTRUCTIONS,
284
- refineInstructions: REFINE_INSTRUCTIONS.replace(/<TYPES>/g, TYPES),
285
- funnelsInstructions: FUNNELS_INSTRUCTIONS,
286
- types: TYPES,
287
- hookExamples: transformedExamples
288
- };
289
-
290
- return cachedTemplates;
291
- }
292
-
293
- /**
294
- * Initialize schema generation AI transformer
295
- * @returns {Promise<AITransformer>} Initialized AI instance
296
- */
297
- async function initSchemaAI() {
298
- if (initState.schema.initialized) return initState.schema.instance;
299
- if (initState.schema.initializing) {
300
- while (initState.schema.initializing) {
301
- await new Promise(resolve => setTimeout(resolve, 100));
302
- }
303
- if (initState.schema.error) throw initState.schema.error;
304
- return initState.schema.instance;
305
- }
306
-
307
- initState.schema.initializing = true;
308
-
309
- try {
310
- const templates = await loadTemplates();
311
- initState.schema.instructions = templates.schemaInstructions?.trim();
312
-
313
- const instructionChars = initState.schema.instructions?.length || 0;
314
- const instructionTokens = estimateTokens(initState.schema.instructions);
315
- logger.debug({ component: 'schema', instructionChars, instructionTokens }, `Schema instructions: ${formatNumber(instructionChars)} chars (~${formatNumber(instructionTokens)} tokens)`);
316
-
317
- const ai = new AITransformer({
318
- vertexai: true,
319
- maxOutputTokens: MAX_OUTPUT_TOKENS,
320
- project: GOOGLE_CLOUD_PROJECT,
321
- onlyJSON: true,
322
- systemInstructions: null,
323
- modelName: DEFAULT_MODEL,
324
- logLevel: "none",
325
- labels: {
326
- "app": "dm4",
327
- "component": "schema"
328
- }
329
- });
330
-
331
- await ai.init();
332
-
333
- initState.schema.instance = ai;
334
- initState.schema.initialized = true;
335
-
336
- // @ts-ignore
337
- return ai;
338
- } catch (error) {
339
- initState.schema.error = error;
340
- logger.error({ err: error }, 'Failed to initialize schema AI');
341
- throw error;
342
- } finally {
343
- initState.schema.initializing = false;
344
- }
345
- }
346
-
347
- /**
348
- * Initialize hooks generation AI transformer
349
- * @returns {Promise<AITransformer>} Initialized AI instance
350
- */
351
- async function initHooksAI() {
352
- if (initState.hooks.initialized) return initState.hooks.instance;
353
- if (initState.hooks.initializing) {
354
- while (initState.hooks.initializing) {
355
- await new Promise(resolve => setTimeout(resolve, 100));
356
- }
357
- if (initState.hooks.error) throw initState.hooks.error;
358
- return initState.hooks.instance;
359
- }
360
-
361
- initState.hooks.initializing = true;
362
-
363
- try {
364
- const templates = await loadTemplates();
365
- initState.hooks.instructions = templates.hooksInstructions?.trim();
366
-
367
- const instructionChars = initState.hooks.instructions?.length || 0;
368
- const instructionTokens = estimateTokens(initState.hooks.instructions);
369
- logger.debug({ component: 'hooks', instructionChars, instructionTokens }, `Hooks instructions: ${formatNumber(instructionChars)} chars (~${formatNumber(instructionTokens)} tokens)`);
370
-
371
- const ai = new AITransformer({
372
- vertexai: true,
373
- project: GOOGLE_CLOUD_PROJECT,
374
- maxOutputTokens: MAX_OUTPUT_TOKENS,
375
- onlyJSON: false,
376
- systemInstructions: null,
377
- modelName: DEFAULT_MODEL,
378
- logLevel: "none",
379
- exampleData: templates.hookExamples,
380
- promptKey: 'PROMPT',
381
- answerKey: 'ANSWER',
382
- contextKey: 'CONTEXT',
383
- explanationKey: 'EXPLANATION',
384
- labels: {
385
- "app": "dm4",
386
- "component": "hooks"
387
- }
388
- });
389
-
390
- await ai.init();
391
-
392
- initState.hooks.instance = ai;
393
- initState.hooks.initialized = true;
394
-
395
- // @ts-ignore
396
- return ai;
397
- } catch (error) {
398
- initState.hooks.error = error;
399
- logger.error({ err: error }, 'Failed to initialize hooks AI');
400
- throw error;
401
- } finally {
402
- initState.hooks.initializing = false;
403
- }
404
- }
405
-
406
- /**
407
- * Initialize refine AI transformer
408
- * @returns {Promise<AITransformer>} Initialized AI instance
409
- */
410
- async function initRefineAI() {
411
- if (initState.refine.initialized) return initState.refine.instance;
412
- if (initState.refine.initializing) {
413
- while (initState.refine.initializing) {
414
- await new Promise(resolve => setTimeout(resolve, 100));
415
- }
416
- if (initState.refine.error) throw initState.refine.error;
417
- return initState.refine.instance;
418
- }
419
-
420
- initState.refine.initializing = true;
421
-
422
- try {
423
- const templates = await loadTemplates();
424
- initState.refine.instructions = templates.refineInstructions?.trim();
425
-
426
- const instructionChars = initState.refine.instructions?.length || 0;
427
- const instructionTokens = estimateTokens(initState.refine.instructions);
428
- logger.debug({ component: 'refine', instructionChars, instructionTokens }, `Refine instructions: ${formatNumber(instructionChars)} chars (~${formatNumber(instructionTokens)} tokens)`);
429
-
430
- const ai = new AITransformer({
431
- vertexai: true,
432
- project: GOOGLE_CLOUD_PROJECT,
433
- maxOutputTokens: MAX_OUTPUT_TOKENS,
434
- onlyJSON: true,
435
- systemInstructions: null,
436
- modelName: DEFAULT_MODEL,
437
- chatConfig: { temperature: 0.1 },
438
- logLevel: "none",
439
- labels: {
440
- "app": "dm4",
441
- "component": "refine"
442
- }
443
- });
444
-
445
- await ai.init();
446
-
447
- initState.refine.instance = ai;
448
- initState.refine.initialized = true;
449
-
450
- // @ts-ignore
451
- return ai;
452
- } catch (error) {
453
- initState.refine.error = error;
454
- logger.error({ err: error }, 'Failed to initialize refine AI');
455
- throw error;
456
- } finally {
457
- initState.refine.initializing = false;
458
- }
459
- }
460
-
461
- /**
462
- * Initialize funnels generation AI transformer
463
- * @returns {Promise<AITransformer>} Initialized AI instance
464
- */
465
- async function initFunnelsAI() {
466
- if (initState.funnels.initialized) return initState.funnels.instance;
467
- if (initState.funnels.initializing) {
468
- while (initState.funnels.initializing) {
469
- await new Promise(resolve => setTimeout(resolve, 100));
470
- }
471
- if (initState.funnels.error) throw initState.funnels.error;
472
- return initState.funnels.instance;
473
- }
474
-
475
- initState.funnels.initializing = true;
476
-
477
- try {
478
- const templates = await loadTemplates();
479
- initState.funnels.instructions = templates.funnelsInstructions?.trim();
480
-
481
- const instructionChars = initState.funnels.instructions?.length || 0;
482
- const instructionTokens = estimateTokens(initState.funnels.instructions);
483
- logger.debug({ component: 'funnels', instructionChars, instructionTokens }, `Funnels instructions: ${formatNumber(instructionChars)} chars (~${formatNumber(instructionTokens)} tokens)`);
484
-
485
- const ai = new AITransformer({
486
- vertexai: true,
487
- project: GOOGLE_CLOUD_PROJECT,
488
- maxOutputTokens: MAX_OUTPUT_TOKENS,
489
- onlyJSON: true,
490
- systemInstructions: null,
491
- modelName: DEFAULT_MODEL,
492
- logLevel: "none",
493
- labels: {
494
- "app": "dm4",
495
- "component": "funnels"
496
- }
497
- });
498
-
499
- await ai.init();
500
-
501
- initState.funnels.instance = ai;
502
- initState.funnels.initialized = true;
503
-
504
- // @ts-ignore
505
- return ai;
506
- } catch (error) {
507
- initState.funnels.error = error;
508
- logger.error({ err: error }, 'Failed to initialize funnels AI');
509
- throw error;
510
- } finally {
511
- initState.funnels.initializing = false;
512
- }
513
- }
514
-
515
- /**
516
- * Pre-warm all AI instances (called at module load)
517
- */
518
- async function prewarmAllAI() {
519
- const startTime = Date.now();
520
-
521
- try {
522
- const results = await Promise.allSettled([
523
- initSchemaAI(),
524
- initHooksAI(),
525
- initRefineAI(),
526
- initFunnelsAI()
527
- ]);
528
-
529
- const elapsed = Date.now() - startTime;
530
-
531
- const failures = results.filter(r => r.status === 'rejected');
532
- if (failures.length > 0) {
533
- logger.warn({ failures: failures.length, elapsed }, `Pre-warming completed with ${failures.length} failures in ${elapsed}ms`);
534
- failures.forEach((f, i) => logger.error({ component: ['Schema', 'Hooks', 'Refine', 'Funnels'][i], err: f.reason }));
535
- } else {
536
- logger.info({ elapsed }, `AI transformers pre-warmed in ${elapsed}ms`);
537
- }
538
- } catch (error) {
539
- logger.error({ err: error }, 'Critical error during pre-warming');
540
- }
541
- }
542
-
543
- /**
544
- * Generate AI schema using cached transformer
545
- * @param {object} params - Parameters object
546
- * @param {string} params.prompt - User's prompt
547
- * @param {string} [params.user_id] - User ID for tracking
548
- * @param {string} [params.model] - Model to use
549
- * @returns {Promise<object>} Generated schema
550
- */
551
- export async function generateAISchema(params) {
552
- const { prompt, user_id, model } = params;
553
- if (!prompt) throw new Error("Please provide a prompt");
554
-
555
- const ai = await initSchemaAI();
556
-
557
- const selectedModel = SUPPORTED_MODELS.includes(model) ? model : DEFAULT_MODEL;
558
- ai.modelName = selectedModel;
559
-
560
- const fullPrompt = `${initState.schema.instructions}\n\n---\n\nUSER REQUEST:\n${prompt}`;
561
-
562
- const startTime = Date.now();
563
- logger.debug({ model: selectedModel }, 'Starting AI schema generation');
564
-
565
- let response = null;
566
- let success = false;
567
- let errorMessage = null;
568
-
569
- try {
570
- // @ts-ignore
571
- response = await ai.transformWithValidation(
572
- { prompt: fullPrompt },
573
- { maxRetries: 2 },
574
- validateSchemaOutput
575
- );
576
- success = true;
577
- } catch (error) {
578
- errorMessage = error.message;
579
- throw error;
580
- } finally {
581
- const duration_ms = Date.now() - startTime;
582
- const usage = ai.getLastUsage?.() || null;
583
-
584
- logAICallStructured({
585
- component: 'schema', prompt, response, duration_ms,
586
- success, error: errorMessage, model: selectedModel, usage
587
- });
588
-
589
- trackAIJob({
590
- component: 'schema', prompt, response, duration_ms,
591
- success, error: errorMessage, usage, user_id
592
- });
593
- }
594
-
595
- return response;
596
- }
597
-
598
- /**
599
- * Generate AI hooks using cached transformer
600
- * @param {object} params - Parameters object
601
- * @param {string} params.prompt - User's description of desired trends
602
- * @param {object} params.currentSchema - The existing dungeon schema
603
- * @param {string} [params.user_id] - User ID for tracking
604
- * @param {string} [params.model] - Model to use
605
- * @returns {Promise<string>} Generated hook function code
606
- */
607
- export async function generateAIHooks(params) {
608
- const { prompt, currentSchema, user_id, model } = params;
609
- if (!prompt) throw new Error("Please provide a prompt describing the trends you want");
610
- if (!currentSchema) throw new Error("Please provide the current schema");
611
-
612
- const ai = await initHooksAI();
613
-
614
- const selectedModel = SUPPORTED_MODELS.includes(model) ? model : DEFAULT_MODEL;
615
- ai.modelName = selectedModel;
616
-
617
- const schemaString = JSON.stringify(currentSchema, null, 2);
618
- const instructionsWithSchema = initState.hooks.instructions
619
- .replace(/<CURRENT_SCHEMA>/g, schemaString);
620
-
621
- const fullPrompt = `${instructionsWithSchema}\n\n---\n\nUSER REQUEST:\n${prompt}`;
622
-
623
- const startTime = Date.now();
624
- logger.debug({ model: selectedModel }, 'Starting AI hooks generation');
625
-
626
- let response = null;
627
- let hookCode = null;
628
- let success = false;
629
- let errorMessage = null;
630
-
631
- try {
632
- // @ts-ignore
633
- response = /** @type {string | {hook: string}} */ (await ai.message({ prompt: fullPrompt }));
634
-
635
- if (typeof response === 'string') {
636
- hookCode = response.trim();
637
- } else if (response && typeof response === 'object' && 'hook' in response) {
638
- hookCode = response.hook.trim();
639
- } else {
640
- throw new Error('AI did not return a valid hook function');
641
- }
642
-
643
- hookCode = hookCode
644
- .replace(/^```javascript\s*/g, '')
645
- .replace(/^```js\s*/g, '')
646
- .replace(/^```\s*/g, '')
647
- .replace(/```$/g, '')
648
- .trim();
649
-
650
- await validateHookOutput(hookCode);
651
-
652
- success = true;
653
- logger.info('Hook function validated successfully');
654
- } catch (error) {
655
- errorMessage = error.message;
656
- throw error;
657
- } finally {
658
- const duration_ms = Date.now() - startTime;
659
- const usage = ai.getLastUsage?.() || null;
660
-
661
- logAICallStructured({
662
- component: 'hooks', prompt, response: hookCode, duration_ms,
663
- success, error: errorMessage, model: selectedModel, usage
664
- });
665
-
666
- trackAIJob({
667
- component: 'hooks', prompt, response: hookCode, duration_ms,
668
- success, error: errorMessage, usage, user_id
669
- });
670
- }
671
-
672
- return hookCode;
673
- }
674
-
675
- /**
676
- * Refine existing schema using cached transformer
677
- * @param {object} params - Parameters object
678
- * @param {string} params.prompt - User's description of changes
679
- * @param {object} params.currentSchema - The existing dungeon schema to refine
680
- * @param {string} [params.user_id] - User ID for tracking
681
- * @param {string} [params.model] - Model to use
682
- * @returns {Promise<object>} Refined dungeon schema
683
- */
684
- export async function generateAIRefine(params) {
685
- const { prompt, currentSchema, user_id, model } = params;
686
- if (!prompt) throw new Error("Please provide a description of the changes you want");
687
- if (!currentSchema) throw new Error("Please provide the current schema to refine");
688
-
689
- const ai = await initRefineAI();
690
-
691
- const selectedModel = SUPPORTED_MODELS.includes(model) ? model : DEFAULT_MODEL;
692
- ai.modelName = selectedModel;
693
-
694
- const templates = await loadTemplates();
695
- const schemaString = JSON.stringify(currentSchema, null, 2);
696
- const instructionsWithSchema = initState.refine.instructions
697
- .replace(/<CURRENT_SCHEMA>/g, schemaString)
698
- .replace(/<TYPES>/g, templates.types);
699
-
700
- const fullPrompt = `${instructionsWithSchema}\n\n---\n\nUSER REQUEST:\n${prompt}`;
701
-
702
- const startTime = Date.now();
703
- logger.debug({ model: selectedModel }, 'Starting AI refine operation');
704
-
705
- let response = null;
706
- let success = false;
707
- let errorMessage = null;
708
-
709
- try {
710
- // @ts-ignore
711
- response = await ai.transformWithValidation(
712
- { prompt: fullPrompt },
713
- { maxRetries: 2 },
714
- validateSchemaOutput
715
- );
716
- success = true;
717
- } catch (error) {
718
- errorMessage = error.message;
719
- throw error;
720
- } finally {
721
- const duration_ms = Date.now() - startTime;
722
- const usage = ai.getLastUsage?.() || null;
723
-
724
- logAICallStructured({
725
- component: 'refine', prompt, response, duration_ms,
726
- success, error: errorMessage, model: selectedModel, usage
727
- });
728
-
729
- trackAIJob({
730
- component: 'refine', prompt, response, duration_ms,
731
- success, error: errorMessage, usage, user_id
732
- });
733
- }
734
-
735
- return response;
736
- }
737
-
738
- /**
739
- * Generate AI funnels using cached transformer
740
- * @param {object} params - Parameters object
741
- * @param {string} params.prompt - User's description of user journeys
742
- * @param {object} params.currentSchema - The existing dungeon schema
743
- * @param {string} [params.user_id] - User ID for tracking
744
- * @param {string} [params.model] - Model to use
745
- * @returns {Promise<object>} Generated funnels object
746
- */
747
- export async function generateAIFunnels(params) {
748
- const { prompt, currentSchema, user_id, model } = params;
749
- if (!prompt) throw new Error("Please provide a prompt describing user journeys");
750
- if (!currentSchema) throw new Error("Please provide the current schema");
751
-
752
- const ai = await initFunnelsAI();
753
-
754
- const selectedModel = SUPPORTED_MODELS.includes(model) ? model : DEFAULT_MODEL;
755
- ai.modelName = selectedModel;
756
-
757
- const schemaString = JSON.stringify(currentSchema, null, 2);
758
- const instructionsWithSchema = initState.funnels.instructions
759
- .replace(/<CURRENT_SCHEMA>/g, schemaString);
760
-
761
- const fullPrompt = `${instructionsWithSchema}\n\n---\n\nUSER REQUEST:\n${prompt}`;
762
-
763
- const startTime = Date.now();
764
- logger.debug({ model: selectedModel }, 'Starting AI funnels generation');
765
-
766
- const validateFunnels = createFunnelsValidator(currentSchema);
767
-
768
- let response = null;
769
- let success = false;
770
- let errorMessage = null;
771
-
772
- try {
773
- // @ts-ignore
774
- response = await ai.transformWithValidation(
775
- { prompt: fullPrompt },
776
- { maxRetries: 2 },
777
- validateFunnels
778
- );
779
- success = true;
780
- } catch (error) {
781
- errorMessage = error.message;
782
- throw error;
783
- } finally {
784
- const duration_ms = Date.now() - startTime;
785
- const usage = ai.getLastUsage?.() || null;
786
-
787
- logAICallStructured({
788
- component: 'funnels', prompt, response, duration_ms,
789
- success, error: errorMessage, model: selectedModel, usage
790
- });
791
-
792
- trackAIJob({
793
- component: 'funnels', prompt, response, duration_ms,
794
- success, error: errorMessage, usage, user_id
795
- });
796
- }
797
-
798
- return response;
799
- }
800
-
801
- /**
802
- * Get initialization status for monitoring
803
- * @returns {object} Current initialization state of all AI instances
804
- */
805
- export function getInitStatus() {
806
- return {
807
- schema: { ready: initState.schema.initialized, error: initState.schema.error?.message },
808
- hooks: { ready: initState.hooks.initialized, error: initState.hooks.error?.message },
809
- refine: { ready: initState.refine.initialized, error: initState.refine.error?.message },
810
- funnels: { ready: initState.funnels.initialized, error: initState.funnels.error?.message }
811
- };
812
- }
813
-
814
- // Start pre-warming immediately when module loads (container cold-start)
815
- if (NODE_ENV !== 'test') {
816
- prewarmAllAI().catch(error => {
817
- logger.error({ err: error }, 'Failed to pre-warm AI transformers');
818
- });
819
- }
820
-
821
- /**
822
- * Generic AI question
823
- * @param {string} question - The question to ask
824
- * @param {string} [user_id] - User ID for tracking
825
- * @returns {Promise<string>} The AI response
826
- */
827
- export async function ask(question, user_id = null) {
828
- const prompt = question;
829
- const ai = new AITransformer({
830
- vertexai: true,
831
- project: GOOGLE_CLOUD_PROJECT,
832
- maxOutputTokens: MAX_OUTPUT_TOKENS,
833
- onlyJSON: false,
834
- responseSchema: {
835
- type: "string"
836
- },
837
- modelName: "gemini-2.5-flash-lite",
838
- labels: {
839
- "app": "dm4",
840
- "component": "generic"
841
- }
842
- });
843
-
844
- const startTime = Date.now();
845
- await ai.init();
846
-
847
- let response = null;
848
- let success = false;
849
- let errorMessage = null;
850
-
851
- try {
852
- response = await ai.message({ prompt });
853
- success = true;
854
- } catch (error) {
855
- errorMessage = error.message;
856
- throw error;
857
- } finally {
858
- const duration_ms = Date.now() - startTime;
859
- const usage = ai.getLastUsage?.() || null;
860
-
861
- const logData = {
862
- component: 'ai-generic',
863
- prompt_preview: prompt.substring(0, 200),
864
- prompt_length: prompt.length,
865
- output_length: response?.toString()?.length || 0,
866
- duration_ms,
867
- duration_human: `${(duration_ms / 1000).toFixed(2)} seconds`,
868
- user_id: user_id || 'anonymous',
869
- success,
870
- error: errorMessage,
871
- ...(usage && {
872
- promptTokens: usage.promptTokens,
873
- responseTokens: usage.responseTokens,
874
- totalTokens: usage.totalTokens,
875
- attempts: usage.attempts,
876
- modelVersion: usage.modelVersion,
877
- requestedModel: usage.requestedModel
878
- }),
879
- };
880
-
881
- if (success) {
882
- logger.info(logData, `AI ask completed in ${duration_ms}ms`);
883
- } else {
884
- logger.error(logData, `AI ask failed after ${duration_ms}ms`);
885
- }
886
-
887
- trackAIJob({
888
- component: 'generic', prompt, response: response?.toString(),
889
- duration_ms, success, error: errorMessage, usage, user_id
890
- });
891
- }
892
-
893
- return response?.toString()?.trim();
894
- }
895
-
896
- export default generateAISchema;