@memberjunction/server 2.91.0 → 2.93.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +40 -2
- package/dist/generated/generated.d.ts +29 -6
- package/dist/generated/generated.d.ts.map +1 -1
- package/dist/generated/generated.js +185 -54
- package/dist/generated/generated.js.map +1 -1
- package/dist/resolvers/ReportResolver.d.ts.map +1 -1
- package/dist/resolvers/ReportResolver.js +4 -5
- package/dist/resolvers/ReportResolver.js.map +1 -1
- package/dist/resolvers/RunAIAgentResolver.d.ts +5 -1
- package/dist/resolvers/RunAIAgentResolver.d.ts.map +1 -1
- package/dist/resolvers/RunAIAgentResolver.js +26 -2
- package/dist/resolvers/RunAIAgentResolver.js.map +1 -1
- package/dist/resolvers/RunAIPromptResolver.d.ts +34 -0
- package/dist/resolvers/RunAIPromptResolver.d.ts.map +1 -1
- package/dist/resolvers/RunAIPromptResolver.js +386 -2
- package/dist/resolvers/RunAIPromptResolver.js.map +1 -1
- package/package.json +39 -39
- package/src/generated/generated.ts +120 -40
- package/src/resolvers/ReportResolver.ts +5 -6
- package/src/resolvers/RunAIAgentResolver.ts +80 -13
- package/src/resolvers/RunAIPromptResolver.ts +568 -26
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
import { Resolver, Mutation, Arg, Ctx, ObjectType, Field, Int } from 'type-graphql';
|
|
1
|
+
import { Resolver, Mutation, Query, Arg, Ctx, ObjectType, Field, Int } from 'type-graphql';
|
|
2
2
|
import { UserPayload } from '../types.js';
|
|
3
3
|
import { LogError, LogStatus, Metadata } from '@memberjunction/core';
|
|
4
|
-
import { AIPromptEntityExtended } from '@memberjunction/core-entities';
|
|
4
|
+
import { AIPromptEntityExtended, AIModelEntityExtended } from '@memberjunction/core-entities';
|
|
5
5
|
import { AIPromptRunner } from '@memberjunction/ai-prompts';
|
|
6
6
|
import { AIPromptParams } from '@memberjunction/ai-core-plus';
|
|
7
7
|
import { ResolverBase } from '../generic/ResolverBase.js';
|
|
8
|
+
import { RequireSystemUser } from '../directives/RequireSystemUser.js';
|
|
9
|
+
import { AIEngine } from '@memberjunction/aiengine';
|
|
10
|
+
import { ChatParams, ChatMessage, ChatMessageRole, GetAIAPIKey, BaseLLM } from '@memberjunction/ai';
|
|
11
|
+
import { MJGlobal } from '@memberjunction/global';
|
|
8
12
|
|
|
9
13
|
@ObjectType()
|
|
10
14
|
export class AIPromptRunResult {
|
|
@@ -39,32 +43,72 @@ export class AIPromptRunResult {
|
|
|
39
43
|
chatResult?: string;
|
|
40
44
|
}
|
|
41
45
|
|
|
46
|
+
@ObjectType()
|
|
47
|
+
export class SimplePromptResult {
|
|
48
|
+
@Field()
|
|
49
|
+
success: boolean;
|
|
50
|
+
|
|
51
|
+
@Field({ nullable: true })
|
|
52
|
+
result?: string;
|
|
53
|
+
|
|
54
|
+
@Field({ nullable: true })
|
|
55
|
+
resultObject?: string; // JSON stringified object
|
|
56
|
+
|
|
57
|
+
@Field()
|
|
58
|
+
modelName: string;
|
|
59
|
+
|
|
60
|
+
@Field({ nullable: true })
|
|
61
|
+
error?: string;
|
|
62
|
+
|
|
63
|
+
@Field({ nullable: true })
|
|
64
|
+
executionTimeMs?: number;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
@ObjectType()
|
|
68
|
+
export class EmbedTextResult {
|
|
69
|
+
@Field(() => [[Number]])
|
|
70
|
+
embeddings: number[][];
|
|
71
|
+
|
|
72
|
+
@Field()
|
|
73
|
+
modelName: string;
|
|
74
|
+
|
|
75
|
+
@Field(() => Int)
|
|
76
|
+
vectorDimensions: number;
|
|
77
|
+
|
|
78
|
+
@Field({ nullable: true })
|
|
79
|
+
error?: string;
|
|
80
|
+
}
|
|
81
|
+
|
|
42
82
|
@Resolver()
|
|
43
83
|
export class RunAIPromptResolver extends ResolverBase {
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
84
|
+
/**
|
|
85
|
+
* Internal method that handles the core AI prompt execution logic.
|
|
86
|
+
* This method is called by both the regular and system user resolvers.
|
|
87
|
+
* @private
|
|
88
|
+
*/
|
|
89
|
+
private async executeAIPrompt(
|
|
90
|
+
promptId: string,
|
|
91
|
+
userPayload: UserPayload,
|
|
92
|
+
data?: string,
|
|
93
|
+
overrideModelId?: string,
|
|
94
|
+
overrideVendorId?: string,
|
|
95
|
+
configurationId?: string,
|
|
96
|
+
skipValidation?: boolean,
|
|
97
|
+
templateData?: string,
|
|
98
|
+
responseFormat?: string,
|
|
99
|
+
temperature?: number,
|
|
100
|
+
topP?: number,
|
|
101
|
+
topK?: number,
|
|
102
|
+
minP?: number,
|
|
103
|
+
frequencyPenalty?: number,
|
|
104
|
+
presencePenalty?: number,
|
|
105
|
+
seed?: number,
|
|
106
|
+
stopSequences?: string[],
|
|
107
|
+
includeLogProbs?: boolean,
|
|
108
|
+
topLogProbs?: number,
|
|
109
|
+
messages?: string,
|
|
110
|
+
rerunFromPromptRunID?: string,
|
|
111
|
+
systemPromptOverride?: string
|
|
68
112
|
): Promise<AIPromptRunResult> {
|
|
69
113
|
const startTime = Date.now();
|
|
70
114
|
|
|
@@ -232,4 +276,502 @@ export class RunAIPromptResolver extends ResolverBase {
|
|
|
232
276
|
};
|
|
233
277
|
}
|
|
234
278
|
}
|
|
279
|
+
|
|
280
|
+
/**
|
|
281
|
+
* Public mutation for regular users to run AI prompts with authentication.
|
|
282
|
+
*/
|
|
283
|
+
@Mutation(() => AIPromptRunResult)
|
|
284
|
+
async RunAIPrompt(
|
|
285
|
+
@Arg('promptId') promptId: string,
|
|
286
|
+
@Ctx() { userPayload }: { userPayload: UserPayload },
|
|
287
|
+
@Arg('data', { nullable: true }) data?: string,
|
|
288
|
+
@Arg('overrideModelId', { nullable: true }) overrideModelId?: string,
|
|
289
|
+
@Arg('overrideVendorId', { nullable: true }) overrideVendorId?: string,
|
|
290
|
+
@Arg('configurationId', { nullable: true }) configurationId?: string,
|
|
291
|
+
@Arg('skipValidation', { nullable: true }) skipValidation?: boolean,
|
|
292
|
+
@Arg('templateData', { nullable: true }) templateData?: string,
|
|
293
|
+
@Arg('responseFormat', { nullable: true }) responseFormat?: string,
|
|
294
|
+
@Arg('temperature', { nullable: true }) temperature?: number,
|
|
295
|
+
@Arg('topP', { nullable: true }) topP?: number,
|
|
296
|
+
@Arg('topK', () => Int, { nullable: true }) topK?: number,
|
|
297
|
+
@Arg('minP', { nullable: true }) minP?: number,
|
|
298
|
+
@Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
|
|
299
|
+
@Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
|
|
300
|
+
@Arg('seed', () => Int, { nullable: true }) seed?: number,
|
|
301
|
+
@Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
|
|
302
|
+
@Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
|
|
303
|
+
@Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
|
|
304
|
+
@Arg('messages', { nullable: true }) messages?: string,
|
|
305
|
+
@Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
|
|
306
|
+
@Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
|
|
307
|
+
): Promise<AIPromptRunResult> {
|
|
308
|
+
return this.executeAIPrompt(
|
|
309
|
+
promptId,
|
|
310
|
+
userPayload,
|
|
311
|
+
data,
|
|
312
|
+
overrideModelId,
|
|
313
|
+
overrideVendorId,
|
|
314
|
+
configurationId,
|
|
315
|
+
skipValidation,
|
|
316
|
+
templateData,
|
|
317
|
+
responseFormat,
|
|
318
|
+
temperature,
|
|
319
|
+
topP,
|
|
320
|
+
topK,
|
|
321
|
+
minP,
|
|
322
|
+
frequencyPenalty,
|
|
323
|
+
presencePenalty,
|
|
324
|
+
seed,
|
|
325
|
+
stopSequences,
|
|
326
|
+
includeLogProbs,
|
|
327
|
+
topLogProbs,
|
|
328
|
+
messages,
|
|
329
|
+
rerunFromPromptRunID,
|
|
330
|
+
systemPromptOverride
|
|
331
|
+
);
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
/**
|
|
335
|
+
* System user query for running AI prompts with elevated privileges.
|
|
336
|
+
* Requires the @RequireSystemUser decorator to ensure only system users can access.
|
|
337
|
+
*/
|
|
338
|
+
@RequireSystemUser()
|
|
339
|
+
@Query(() => AIPromptRunResult)
|
|
340
|
+
async RunAIPromptSystemUser(
|
|
341
|
+
@Arg('promptId') promptId: string,
|
|
342
|
+
@Ctx() { userPayload }: { userPayload: UserPayload },
|
|
343
|
+
@Arg('data', { nullable: true }) data?: string,
|
|
344
|
+
@Arg('overrideModelId', { nullable: true }) overrideModelId?: string,
|
|
345
|
+
@Arg('overrideVendorId', { nullable: true }) overrideVendorId?: string,
|
|
346
|
+
@Arg('configurationId', { nullable: true }) configurationId?: string,
|
|
347
|
+
@Arg('skipValidation', { nullable: true }) skipValidation?: boolean,
|
|
348
|
+
@Arg('templateData', { nullable: true }) templateData?: string,
|
|
349
|
+
@Arg('responseFormat', { nullable: true }) responseFormat?: string,
|
|
350
|
+
@Arg('temperature', { nullable: true }) temperature?: number,
|
|
351
|
+
@Arg('topP', { nullable: true }) topP?: number,
|
|
352
|
+
@Arg('topK', () => Int, { nullable: true }) topK?: number,
|
|
353
|
+
@Arg('minP', { nullable: true }) minP?: number,
|
|
354
|
+
@Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
|
|
355
|
+
@Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
|
|
356
|
+
@Arg('seed', () => Int, { nullable: true }) seed?: number,
|
|
357
|
+
@Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
|
|
358
|
+
@Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
|
|
359
|
+
@Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
|
|
360
|
+
@Arg('messages', { nullable: true }) messages?: string,
|
|
361
|
+
@Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
|
|
362
|
+
@Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
|
|
363
|
+
): Promise<AIPromptRunResult> {
|
|
364
|
+
return this.executeAIPrompt(
|
|
365
|
+
promptId,
|
|
366
|
+
userPayload,
|
|
367
|
+
data,
|
|
368
|
+
overrideModelId,
|
|
369
|
+
overrideVendorId,
|
|
370
|
+
configurationId,
|
|
371
|
+
skipValidation,
|
|
372
|
+
templateData,
|
|
373
|
+
responseFormat,
|
|
374
|
+
temperature,
|
|
375
|
+
topP,
|
|
376
|
+
topK,
|
|
377
|
+
minP,
|
|
378
|
+
frequencyPenalty,
|
|
379
|
+
presencePenalty,
|
|
380
|
+
seed,
|
|
381
|
+
stopSequences,
|
|
382
|
+
includeLogProbs,
|
|
383
|
+
topLogProbs,
|
|
384
|
+
messages,
|
|
385
|
+
rerunFromPromptRunID,
|
|
386
|
+
systemPromptOverride
|
|
387
|
+
);
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
/**
|
|
391
|
+
* Helper method to select a model for simple prompt execution based on preferences or power level
|
|
392
|
+
* @private
|
|
393
|
+
*/
|
|
394
|
+
private async selectModelForSimplePrompt(
|
|
395
|
+
preferredModels: string[] | undefined,
|
|
396
|
+
modelPower: string,
|
|
397
|
+
contextUser: any
|
|
398
|
+
): Promise<AIModelEntityExtended> {
|
|
399
|
+
// Ensure AI Engine is configured
|
|
400
|
+
await AIEngine.Instance.Config(false, contextUser);
|
|
401
|
+
|
|
402
|
+
// Get all LLM models that have API keys
|
|
403
|
+
const allModels = AIEngine.Instance.Models.filter(m =>
|
|
404
|
+
m.AIModelType?.trim().toLowerCase() === 'llm' &&
|
|
405
|
+
m.IsActive === true
|
|
406
|
+
);
|
|
407
|
+
|
|
408
|
+
// Filter to only models with valid API keys
|
|
409
|
+
const modelsWithKeys: AIModelEntityExtended[] = [];
|
|
410
|
+
for (const model of allModels) {
|
|
411
|
+
const apiKey = GetAIAPIKey(model.DriverClass);
|
|
412
|
+
if (apiKey && apiKey.trim().length > 0) {
|
|
413
|
+
modelsWithKeys.push(model);
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
if (modelsWithKeys.length === 0) {
|
|
418
|
+
throw new Error('No AI models with valid API keys found');
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
// Try preferred models first if provided
|
|
422
|
+
if (preferredModels && preferredModels.length > 0) {
|
|
423
|
+
for (const preferred of preferredModels) {
|
|
424
|
+
const model = modelsWithKeys.find(m =>
|
|
425
|
+
m.Name === preferred ||
|
|
426
|
+
m.APIName === preferred
|
|
427
|
+
);
|
|
428
|
+
if (model) {
|
|
429
|
+
LogStatus(`Selected preferred model: ${model.Name}`);
|
|
430
|
+
return model;
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
LogStatus('No preferred models available, falling back to power selection');
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
// Sort by PowerRank for power-based selection
|
|
437
|
+
modelsWithKeys.sort((a, b) => (b.PowerRank || 0) - (a.PowerRank || 0));
|
|
438
|
+
|
|
439
|
+
let selectedModel: AIModelEntityExtended;
|
|
440
|
+
switch (modelPower) {
|
|
441
|
+
case 'lowest':
|
|
442
|
+
selectedModel = modelsWithKeys[modelsWithKeys.length - 1];
|
|
443
|
+
break;
|
|
444
|
+
case 'highest':
|
|
445
|
+
selectedModel = modelsWithKeys[0];
|
|
446
|
+
break;
|
|
447
|
+
case 'medium':
|
|
448
|
+
default:
|
|
449
|
+
const midIndex = Math.floor(modelsWithKeys.length / 2);
|
|
450
|
+
selectedModel = modelsWithKeys[midIndex];
|
|
451
|
+
break;
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
LogStatus(`Selected model by power (${modelPower || 'medium'}): ${selectedModel.Name}`);
|
|
455
|
+
return selectedModel;
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
/**
|
|
459
|
+
* Helper method to select an embedding model by size
|
|
460
|
+
* @private
|
|
461
|
+
*/
|
|
462
|
+
private selectEmbeddingModelBySize(modelSize: string): AIModelEntityExtended {
|
|
463
|
+
const localModels = AIEngine.Instance.LocalEmbeddingModels;
|
|
464
|
+
|
|
465
|
+
if (!localModels || localModels.length === 0) {
|
|
466
|
+
throw new Error('No local embedding models available');
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
// Models are already sorted by PowerRank (highest first)
|
|
470
|
+
switch (modelSize) {
|
|
471
|
+
case 'small':
|
|
472
|
+
return localModels[localModels.length - 1]; // Lowest power
|
|
473
|
+
case 'medium':
|
|
474
|
+
default:
|
|
475
|
+
const midIndex = Math.floor(localModels.length / 2);
|
|
476
|
+
return localModels[midIndex] || localModels[0];
|
|
477
|
+
}
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
/**
|
|
481
|
+
* Helper method to build chat messages from system prompt and optional message history
|
|
482
|
+
* @private
|
|
483
|
+
*/
|
|
484
|
+
private buildChatMessages(systemPrompt: string, messagesJson?: string): ChatMessage[] {
|
|
485
|
+
const messages: ChatMessage[] = [];
|
|
486
|
+
|
|
487
|
+
// Add system prompt
|
|
488
|
+
if (systemPrompt && systemPrompt.trim().length > 0) {
|
|
489
|
+
messages.push({
|
|
490
|
+
role: ChatMessageRole.system,
|
|
491
|
+
content: systemPrompt
|
|
492
|
+
});
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
// Add message history if provided
|
|
496
|
+
if (messagesJson) {
|
|
497
|
+
try {
|
|
498
|
+
const parsedMessages = JSON.parse(messagesJson);
|
|
499
|
+
if (Array.isArray(parsedMessages)) {
|
|
500
|
+
for (const msg of parsedMessages) {
|
|
501
|
+
if (msg.message && msg.role) {
|
|
502
|
+
messages.push({
|
|
503
|
+
role: msg.role === 'user' ? ChatMessageRole.user : ChatMessageRole.assistant,
|
|
504
|
+
content: msg.message
|
|
505
|
+
});
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
else if (messagesJson?.length > 0) {
|
|
510
|
+
// messages maybe just has a simple string in it so add
|
|
511
|
+
// as a single message
|
|
512
|
+
messages.push({
|
|
513
|
+
role: ChatMessageRole.user,
|
|
514
|
+
content: messagesJson
|
|
515
|
+
});
|
|
516
|
+
}
|
|
517
|
+
} catch (e) {
|
|
518
|
+
if (messagesJson?.length > 0) {
|
|
519
|
+
// messages maybe just has a simple string in it so add
|
|
520
|
+
// as a single message
|
|
521
|
+
messages.push({
|
|
522
|
+
role: ChatMessageRole.user,
|
|
523
|
+
content: messagesJson
|
|
524
|
+
});
|
|
525
|
+
}
|
|
526
|
+
LogError('Failed to parse messages JSON', undefined, e);
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
return messages;
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
/**
|
|
534
|
+
* Helper method to format simple prompt result
|
|
535
|
+
* @private
|
|
536
|
+
*/
|
|
537
|
+
private formatSimpleResult(chatResult: any, model: AIModelEntityExtended, executionTime: number): SimplePromptResult {
|
|
538
|
+
if (!chatResult || !chatResult.success) {
|
|
539
|
+
return {
|
|
540
|
+
success: false,
|
|
541
|
+
error: chatResult?.errorMessage || 'Unknown error occurred',
|
|
542
|
+
modelName: model.Name,
|
|
543
|
+
executionTimeMs: executionTime
|
|
544
|
+
};
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
const resultContent = chatResult.data?.choices?.[0]?.message?.content || '';
|
|
548
|
+
|
|
549
|
+
// Try to extract JSON from the result
|
|
550
|
+
let resultObject: any = null;
|
|
551
|
+
try {
|
|
552
|
+
// First try to parse the entire result as JSON
|
|
553
|
+
resultObject = JSON.parse(resultContent);
|
|
554
|
+
} catch (e) {
|
|
555
|
+
// Try to find JSON within the text
|
|
556
|
+
const jsonMatch = resultContent.match(/\{[\s\S]*\}|\[[\s\S]*\]/);
|
|
557
|
+
if (jsonMatch) {
|
|
558
|
+
try {
|
|
559
|
+
resultObject = JSON.parse(jsonMatch[0]);
|
|
560
|
+
} catch (e2) {
|
|
561
|
+
// No valid JSON found
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
return {
|
|
567
|
+
success: true,
|
|
568
|
+
result: resultContent,
|
|
569
|
+
resultObject: resultObject ? JSON.stringify(resultObject) : undefined,
|
|
570
|
+
modelName: model.Name,
|
|
571
|
+
executionTimeMs: executionTime
|
|
572
|
+
};
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
/**
|
|
576
|
+
* Execute a simple prompt without requiring a stored AI Prompt entity.
|
|
577
|
+
* This is designed for interactive components that need quick AI responses.
|
|
578
|
+
*/
|
|
579
|
+
@Mutation(() => SimplePromptResult)
|
|
580
|
+
async ExecuteSimplePrompt(
|
|
581
|
+
@Arg('systemPrompt') systemPrompt: string,
|
|
582
|
+
@Ctx() { userPayload }: { userPayload: UserPayload },
|
|
583
|
+
@Arg('messages', { nullable: true }) messages?: string,
|
|
584
|
+
@Arg('preferredModels', () => [String], { nullable: true }) preferredModels?: string[],
|
|
585
|
+
@Arg('modelPower', { nullable: true }) modelPower?: string,
|
|
586
|
+
@Arg('responseFormat', { nullable: true }) responseFormat?: string
|
|
587
|
+
): Promise<SimplePromptResult> {
|
|
588
|
+
const startTime = Date.now();
|
|
589
|
+
|
|
590
|
+
try {
|
|
591
|
+
LogStatus(`=== EXECUTING SIMPLE PROMPT ===`);
|
|
592
|
+
|
|
593
|
+
// Get current user
|
|
594
|
+
const currentUser = this.GetUserFromPayload(userPayload);
|
|
595
|
+
if (!currentUser) {
|
|
596
|
+
return {
|
|
597
|
+
success: false,
|
|
598
|
+
error: 'Unable to determine current user',
|
|
599
|
+
modelName: 'Unknown',
|
|
600
|
+
executionTimeMs: Date.now() - startTime
|
|
601
|
+
};
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
// Select model based on preferences or power level
|
|
605
|
+
const model = await this.selectModelForSimplePrompt(
|
|
606
|
+
preferredModels,
|
|
607
|
+
modelPower || 'medium',
|
|
608
|
+
currentUser
|
|
609
|
+
);
|
|
610
|
+
|
|
611
|
+
// Build chat messages
|
|
612
|
+
const chatMessages = this.buildChatMessages(systemPrompt, messages);
|
|
613
|
+
|
|
614
|
+
if (chatMessages.length === 0) {
|
|
615
|
+
return {
|
|
616
|
+
success: false,
|
|
617
|
+
error: 'No messages to send to model',
|
|
618
|
+
modelName: model.Name,
|
|
619
|
+
executionTimeMs: Date.now() - startTime
|
|
620
|
+
};
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
// Create LLM instance
|
|
624
|
+
const apiKey = GetAIAPIKey(model.DriverClass);
|
|
625
|
+
const llm = MJGlobal.Instance.ClassFactory.CreateInstance<BaseLLM>(
|
|
626
|
+
BaseLLM,
|
|
627
|
+
model.DriverClass,
|
|
628
|
+
apiKey
|
|
629
|
+
);
|
|
630
|
+
|
|
631
|
+
if (!llm) {
|
|
632
|
+
return {
|
|
633
|
+
success: false,
|
|
634
|
+
error: `Failed to create LLM instance for model ${model.Name}`,
|
|
635
|
+
modelName: model.Name,
|
|
636
|
+
executionTimeMs: Date.now() - startTime
|
|
637
|
+
};
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
// Build chat parameters
|
|
641
|
+
const chatParams = new ChatParams();
|
|
642
|
+
chatParams.messages = chatMessages;
|
|
643
|
+
chatParams.model = model.APIName;
|
|
644
|
+
|
|
645
|
+
if (responseFormat) {
|
|
646
|
+
// Cast to valid response format type
|
|
647
|
+
chatParams.responseFormat = responseFormat as 'Any' | 'Text' | 'Markdown' | 'JSON' | 'ModelSpecific';
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
// Execute the chat completion
|
|
651
|
+
const result = await llm.ChatCompletion(chatParams);
|
|
652
|
+
|
|
653
|
+
const executionTime = Date.now() - startTime;
|
|
654
|
+
LogStatus(`=== SIMPLE PROMPT COMPLETED (${executionTime}ms) ===`);
|
|
655
|
+
|
|
656
|
+
// Format and return the result
|
|
657
|
+
return this.formatSimpleResult(result, model, executionTime);
|
|
658
|
+
|
|
659
|
+
} catch (error) {
|
|
660
|
+
const executionTime = Date.now() - startTime;
|
|
661
|
+
LogError('Simple prompt execution failed:', undefined, error);
|
|
662
|
+
return {
|
|
663
|
+
success: false,
|
|
664
|
+
error: (error as Error).message || 'Unknown error occurred',
|
|
665
|
+
modelName: 'Unknown',
|
|
666
|
+
executionTimeMs: executionTime
|
|
667
|
+
};
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
/**
|
|
672
|
+
* System user query for executing simple prompts with elevated privileges
|
|
673
|
+
*/
|
|
674
|
+
@RequireSystemUser()
|
|
675
|
+
@Query(() => SimplePromptResult)
|
|
676
|
+
async ExecuteSimplePromptSystemUser(
|
|
677
|
+
@Arg('systemPrompt') systemPrompt: string,
|
|
678
|
+
@Ctx() { userPayload }: { userPayload: UserPayload },
|
|
679
|
+
@Arg('messages', { nullable: true }) messages?: string,
|
|
680
|
+
@Arg('preferredModels', () => [String], { nullable: true }) preferredModels?: string[],
|
|
681
|
+
@Arg('modelPower', { nullable: true }) modelPower?: string,
|
|
682
|
+
@Arg('responseFormat', { nullable: true }) responseFormat?: string
|
|
683
|
+
): Promise<SimplePromptResult> {
|
|
684
|
+
// Reuse the same logic as the regular mutation
|
|
685
|
+
return this.ExecuteSimplePrompt(systemPrompt, { userPayload }, messages, preferredModels, modelPower, responseFormat);
|
|
686
|
+
}
|
|
687
|
+
|
|
688
|
+
/**
|
|
689
|
+
* Generate embeddings for text using local embedding models.
|
|
690
|
+
* Designed for interactive components that need fast similarity calculations.
|
|
691
|
+
*/
|
|
692
|
+
@Mutation(() => EmbedTextResult)
|
|
693
|
+
async EmbedText(
|
|
694
|
+
@Arg('textToEmbed', () => [String]) textToEmbed: string[],
|
|
695
|
+
@Arg('modelSize') modelSize: string,
|
|
696
|
+
@Ctx() { userPayload }: { userPayload: UserPayload }
|
|
697
|
+
): Promise<EmbedTextResult> {
|
|
698
|
+
try {
|
|
699
|
+
LogStatus(`=== GENERATING EMBEDDINGS for ${textToEmbed.length} text(s) ===`);
|
|
700
|
+
|
|
701
|
+
// Get current user
|
|
702
|
+
const currentUser = this.GetUserFromPayload(userPayload);
|
|
703
|
+
if (!currentUser) {
|
|
704
|
+
return {
|
|
705
|
+
embeddings: [],
|
|
706
|
+
modelName: 'Unknown',
|
|
707
|
+
vectorDimensions: 0,
|
|
708
|
+
error: 'Unable to determine current user'
|
|
709
|
+
};
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
// Ensure AI Engine is configured
|
|
713
|
+
await AIEngine.Instance.Config(false, currentUser);
|
|
714
|
+
|
|
715
|
+
// Select embedding model by size
|
|
716
|
+
const model = this.selectEmbeddingModelBySize(modelSize);
|
|
717
|
+
|
|
718
|
+
LogStatus(`Using embedding model: ${model.Name}`);
|
|
719
|
+
|
|
720
|
+
// Process embeddings
|
|
721
|
+
const embeddings: number[][] = [];
|
|
722
|
+
|
|
723
|
+
for (const text of textToEmbed) {
|
|
724
|
+
if (!text || text.trim().length === 0) {
|
|
725
|
+
// Return zero vector for empty text
|
|
726
|
+
embeddings.push([]);
|
|
727
|
+
continue;
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
// Use AIEngine's EmbedText method
|
|
731
|
+
const result = await AIEngine.Instance.EmbedText(model, text);
|
|
732
|
+
|
|
733
|
+
if (result && result.vector && result.vector.length > 0) {
|
|
734
|
+
embeddings.push(result.vector);
|
|
735
|
+
} else {
|
|
736
|
+
LogError(`Failed to generate embedding for text: ${text.substring(0, 50)}...`);
|
|
737
|
+
embeddings.push([]); // Add empty array for failed embeddings
|
|
738
|
+
}
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
// Get vector dimensions from first successful embedding
|
|
742
|
+
const vectorDimensions = embeddings.find(e => e.length > 0)?.length || 0;
|
|
743
|
+
|
|
744
|
+
LogStatus(`=== EMBEDDINGS GENERATED: ${embeddings.length} vectors of ${vectorDimensions} dimensions ===`);
|
|
745
|
+
|
|
746
|
+
return {
|
|
747
|
+
embeddings,
|
|
748
|
+
modelName: model.Name,
|
|
749
|
+
vectorDimensions,
|
|
750
|
+
error: undefined
|
|
751
|
+
};
|
|
752
|
+
|
|
753
|
+
} catch (error) {
|
|
754
|
+
LogError('Embedding generation failed:', undefined, error);
|
|
755
|
+
return {
|
|
756
|
+
embeddings: [],
|
|
757
|
+
modelName: 'Unknown',
|
|
758
|
+
vectorDimensions: 0,
|
|
759
|
+
error: (error as Error).message || 'Unknown error occurred'
|
|
760
|
+
};
|
|
761
|
+
}
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
/**
|
|
765
|
+
* System user query for generating embeddings with elevated privileges
|
|
766
|
+
*/
|
|
767
|
+
@RequireSystemUser()
|
|
768
|
+
@Query(() => EmbedTextResult)
|
|
769
|
+
async EmbedTextSystemUser(
|
|
770
|
+
@Arg('textToEmbed', () => [String]) textToEmbed: string[],
|
|
771
|
+
@Arg('modelSize') modelSize: string,
|
|
772
|
+
@Ctx() { userPayload }: { userPayload: UserPayload }
|
|
773
|
+
): Promise<EmbedTextResult> {
|
|
774
|
+
// Reuse the same logic as the regular mutation
|
|
775
|
+
return this.EmbedText(textToEmbed, modelSize, { userPayload });
|
|
776
|
+
}
|
|
235
777
|
}
|