@memberjunction/server 2.111.1 → 2.112.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/skip-agent.d.ts +4 -4
- package/dist/agents/skip-agent.d.ts.map +1 -1
- package/dist/agents/skip-agent.js +808 -951
- package/dist/agents/skip-agent.js.map +1 -1
- package/dist/agents/skip-sdk.d.ts +1 -1
- package/dist/agents/skip-sdk.d.ts.map +1 -1
- package/dist/agents/skip-sdk.js +53 -43
- package/dist/agents/skip-sdk.js.map +1 -1
- package/dist/apolloServer/index.js +1 -1
- package/dist/auth/AuthProviderFactory.d.ts +1 -1
- package/dist/auth/AuthProviderFactory.d.ts.map +1 -1
- package/dist/auth/AuthProviderFactory.js +1 -3
- package/dist/auth/AuthProviderFactory.js.map +1 -1
- package/dist/auth/BaseAuthProvider.d.ts +1 -1
- package/dist/auth/BaseAuthProvider.d.ts.map +1 -1
- package/dist/auth/BaseAuthProvider.js +3 -2
- package/dist/auth/BaseAuthProvider.js.map +1 -1
- package/dist/auth/IAuthProvider.d.ts +1 -1
- package/dist/auth/IAuthProvider.d.ts.map +1 -1
- package/dist/auth/exampleNewUserSubClass.d.ts.map +1 -1
- package/dist/auth/exampleNewUserSubClass.js +1 -1
- package/dist/auth/exampleNewUserSubClass.js.map +1 -1
- package/dist/auth/index.d.ts +1 -1
- package/dist/auth/index.d.ts.map +1 -1
- package/dist/auth/index.js +6 -6
- package/dist/auth/index.js.map +1 -1
- package/dist/auth/initializeProviders.js +1 -1
- package/dist/auth/initializeProviders.js.map +1 -1
- package/dist/auth/newUsers.d.ts +1 -1
- package/dist/auth/newUsers.d.ts.map +1 -1
- package/dist/auth/newUsers.js +7 -7
- package/dist/auth/newUsers.js.map +1 -1
- package/dist/auth/providers/Auth0Provider.d.ts +1 -1
- package/dist/auth/providers/Auth0Provider.d.ts.map +1 -1
- package/dist/auth/providers/Auth0Provider.js +1 -1
- package/dist/auth/providers/Auth0Provider.js.map +1 -1
- package/dist/auth/providers/CognitoProvider.d.ts +1 -1
- package/dist/auth/providers/CognitoProvider.d.ts.map +1 -1
- package/dist/auth/providers/CognitoProvider.js +3 -6
- package/dist/auth/providers/CognitoProvider.js.map +1 -1
- package/dist/auth/providers/GoogleProvider.d.ts +1 -1
- package/dist/auth/providers/GoogleProvider.d.ts.map +1 -1
- package/dist/auth/providers/GoogleProvider.js +1 -1
- package/dist/auth/providers/GoogleProvider.js.map +1 -1
- package/dist/auth/providers/MSALProvider.d.ts +1 -1
- package/dist/auth/providers/MSALProvider.d.ts.map +1 -1
- package/dist/auth/providers/MSALProvider.js +1 -1
- package/dist/auth/providers/MSALProvider.js.map +1 -1
- package/dist/auth/providers/OktaProvider.d.ts +1 -1
- package/dist/auth/providers/OktaProvider.d.ts.map +1 -1
- package/dist/auth/providers/OktaProvider.js +1 -1
- package/dist/auth/providers/OktaProvider.js.map +1 -1
- package/dist/config.d.ts.map +1 -1
- package/dist/config.js +22 -10
- package/dist/config.js.map +1 -1
- package/dist/context.d.ts +1 -1
- package/dist/context.d.ts.map +1 -1
- package/dist/context.js +9 -7
- package/dist/context.js.map +1 -1
- package/dist/entitySubclasses/entityPermissions.server.d.ts +1 -1
- package/dist/entitySubclasses/entityPermissions.server.d.ts.map +1 -1
- package/dist/entitySubclasses/entityPermissions.server.js +1 -1
- package/dist/entitySubclasses/entityPermissions.server.js.map +1 -1
- package/dist/generated/generated.d.ts +648 -648
- package/dist/generated/generated.d.ts.map +1 -1
- package/dist/generated/generated.js +2986 -1133
- package/dist/generated/generated.js.map +1 -1
- package/dist/generic/KeyInputOutputTypes.d.ts +1 -1
- package/dist/generic/KeyInputOutputTypes.d.ts.map +1 -1
- package/dist/generic/KeyInputOutputTypes.js +1 -1
- package/dist/generic/KeyInputOutputTypes.js.map +1 -1
- package/dist/generic/ResolverBase.d.ts +1 -1
- package/dist/generic/ResolverBase.d.ts.map +1 -1
- package/dist/generic/ResolverBase.js +15 -10
- package/dist/generic/ResolverBase.js.map +1 -1
- package/dist/generic/RunViewResolver.d.ts +1 -1
- package/dist/generic/RunViewResolver.d.ts.map +1 -1
- package/dist/generic/RunViewResolver.js +15 -15
- package/dist/generic/RunViewResolver.js.map +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +18 -9
- package/dist/index.js.map +1 -1
- package/dist/resolvers/ActionResolver.d.ts +2 -2
- package/dist/resolvers/ActionResolver.d.ts.map +1 -1
- package/dist/resolvers/ActionResolver.js +28 -30
- package/dist/resolvers/ActionResolver.js.map +1 -1
- package/dist/resolvers/AskSkipResolver.d.ts +2 -2
- package/dist/resolvers/AskSkipResolver.d.ts.map +1 -1
- package/dist/resolvers/AskSkipResolver.js +60 -50
- package/dist/resolvers/AskSkipResolver.js.map +1 -1
- package/dist/resolvers/ComponentRegistryResolver.d.ts.map +1 -1
- package/dist/resolvers/ComponentRegistryResolver.js +36 -38
- package/dist/resolvers/ComponentRegistryResolver.js.map +1 -1
- package/dist/resolvers/CreateQueryResolver.d.ts +1 -1
- package/dist/resolvers/CreateQueryResolver.d.ts.map +1 -1
- package/dist/resolvers/CreateQueryResolver.js +43 -40
- package/dist/resolvers/CreateQueryResolver.js.map +1 -1
- package/dist/resolvers/DatasetResolver.d.ts.map +1 -1
- package/dist/resolvers/DatasetResolver.js +1 -1
- package/dist/resolvers/DatasetResolver.js.map +1 -1
- package/dist/resolvers/EntityRecordNameResolver.d.ts +1 -1
- package/dist/resolvers/EntityRecordNameResolver.d.ts.map +1 -1
- package/dist/resolvers/EntityRecordNameResolver.js +1 -1
- package/dist/resolvers/EntityRecordNameResolver.js.map +1 -1
- package/dist/resolvers/EntityResolver.d.ts.map +1 -1
- package/dist/resolvers/EntityResolver.js +1 -1
- package/dist/resolvers/EntityResolver.js.map +1 -1
- package/dist/resolvers/FileCategoryResolver.js +1 -1
- package/dist/resolvers/FileCategoryResolver.js.map +1 -1
- package/dist/resolvers/FileResolver.js +1 -1
- package/dist/resolvers/FileResolver.js.map +1 -1
- package/dist/resolvers/GetDataContextDataResolver.d.ts +1 -1
- package/dist/resolvers/GetDataContextDataResolver.d.ts.map +1 -1
- package/dist/resolvers/GetDataContextDataResolver.js +5 -5
- package/dist/resolvers/GetDataContextDataResolver.js.map +1 -1
- package/dist/resolvers/GetDataResolver.d.ts.map +1 -1
- package/dist/resolvers/GetDataResolver.js +8 -6
- package/dist/resolvers/GetDataResolver.js.map +1 -1
- package/dist/resolvers/MergeRecordsResolver.d.ts +3 -3
- package/dist/resolvers/MergeRecordsResolver.d.ts.map +1 -1
- package/dist/resolvers/MergeRecordsResolver.js +3 -3
- package/dist/resolvers/MergeRecordsResolver.js.map +1 -1
- package/dist/resolvers/PotentialDuplicateRecordResolver.d.ts +1 -1
- package/dist/resolvers/PotentialDuplicateRecordResolver.d.ts.map +1 -1
- package/dist/resolvers/PotentialDuplicateRecordResolver.js +1 -1
- package/dist/resolvers/PotentialDuplicateRecordResolver.js.map +1 -1
- package/dist/resolvers/QueryResolver.d.ts.map +1 -1
- package/dist/resolvers/QueryResolver.js +11 -11
- package/dist/resolvers/QueryResolver.js.map +1 -1
- package/dist/resolvers/ReportResolver.js +1 -1
- package/dist/resolvers/ReportResolver.js.map +1 -1
- package/dist/resolvers/RunAIAgentResolver.d.ts.map +1 -1
- package/dist/resolvers/RunAIAgentResolver.js +27 -28
- package/dist/resolvers/RunAIAgentResolver.js.map +1 -1
- package/dist/resolvers/RunAIPromptResolver.d.ts.map +1 -1
- package/dist/resolvers/RunAIPromptResolver.js +31 -31
- package/dist/resolvers/RunAIPromptResolver.js.map +1 -1
- package/dist/resolvers/RunTemplateResolver.d.ts.map +1 -1
- package/dist/resolvers/RunTemplateResolver.js +9 -9
- package/dist/resolvers/RunTemplateResolver.js.map +1 -1
- package/dist/resolvers/SqlLoggingConfigResolver.d.ts.map +1 -1
- package/dist/resolvers/SqlLoggingConfigResolver.js +10 -10
- package/dist/resolvers/SqlLoggingConfigResolver.js.map +1 -1
- package/dist/resolvers/SyncDataResolver.d.ts +1 -1
- package/dist/resolvers/SyncDataResolver.d.ts.map +1 -1
- package/dist/resolvers/SyncDataResolver.js +15 -14
- package/dist/resolvers/SyncDataResolver.js.map +1 -1
- package/dist/resolvers/SyncRolesUsersResolver.d.ts +1 -1
- package/dist/resolvers/SyncRolesUsersResolver.d.ts.map +1 -1
- package/dist/resolvers/SyncRolesUsersResolver.js +48 -44
- package/dist/resolvers/SyncRolesUsersResolver.js.map +1 -1
- package/dist/resolvers/TaskResolver.d.ts.map +1 -1
- package/dist/resolvers/TaskResolver.js +7 -7
- package/dist/resolvers/TaskResolver.js.map +1 -1
- package/dist/resolvers/TransactionGroupResolver.d.ts +1 -1
- package/dist/resolvers/TransactionGroupResolver.d.ts.map +1 -1
- package/dist/resolvers/TransactionGroupResolver.js +12 -12
- package/dist/resolvers/TransactionGroupResolver.js.map +1 -1
- package/dist/resolvers/UserFavoriteResolver.d.ts +1 -1
- package/dist/resolvers/UserFavoriteResolver.d.ts.map +1 -1
- package/dist/resolvers/UserFavoriteResolver.js +1 -1
- package/dist/resolvers/UserFavoriteResolver.js.map +1 -1
- package/dist/resolvers/UserViewResolver.d.ts.map +1 -1
- package/dist/resolvers/UserViewResolver.js.map +1 -1
- package/dist/rest/EntityCRUDHandler.d.ts +1 -1
- package/dist/rest/EntityCRUDHandler.d.ts.map +1 -1
- package/dist/rest/EntityCRUDHandler.js +14 -16
- package/dist/rest/EntityCRUDHandler.js.map +1 -1
- package/dist/rest/RESTEndpointHandler.d.ts.map +1 -1
- package/dist/rest/RESTEndpointHandler.js +23 -25
- package/dist/rest/RESTEndpointHandler.js.map +1 -1
- package/dist/rest/ViewOperationsHandler.d.ts +1 -1
- package/dist/rest/ViewOperationsHandler.d.ts.map +1 -1
- package/dist/rest/ViewOperationsHandler.js +17 -21
- package/dist/rest/ViewOperationsHandler.js.map +1 -1
- package/dist/scheduler/LearningCycleScheduler.d.ts.map +1 -1
- package/dist/scheduler/LearningCycleScheduler.js.map +1 -1
- package/dist/services/ScheduledJobsService.d.ts.map +1 -1
- package/dist/services/ScheduledJobsService.js +4 -6
- package/dist/services/ScheduledJobsService.js.map +1 -1
- package/dist/services/TaskOrchestrator.d.ts +1 -1
- package/dist/services/TaskOrchestrator.d.ts.map +1 -1
- package/dist/services/TaskOrchestrator.js +30 -30
- package/dist/services/TaskOrchestrator.js.map +1 -1
- package/dist/types.d.ts +3 -3
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +0 -1
- package/dist/types.js.map +1 -1
- package/dist/util.d.ts +1 -1
- package/dist/util.d.ts.map +1 -1
- package/dist/util.js +2 -2
- package/dist/util.js.map +1 -1
- package/package.json +36 -37
- package/src/agents/skip-agent.ts +1067 -1200
- package/src/agents/skip-sdk.ts +877 -851
- package/src/apolloServer/index.ts +2 -2
- package/src/auth/AuthProviderFactory.ts +8 -14
- package/src/auth/BaseAuthProvider.ts +5 -4
- package/src/auth/IAuthProvider.ts +2 -2
- package/src/auth/exampleNewUserSubClass.ts +9 -2
- package/src/auth/index.ts +31 -26
- package/src/auth/initializeProviders.ts +3 -3
- package/src/auth/newUsers.ts +166 -134
- package/src/auth/providers/Auth0Provider.ts +5 -5
- package/src/auth/providers/CognitoProvider.ts +7 -10
- package/src/auth/providers/GoogleProvider.ts +4 -5
- package/src/auth/providers/MSALProvider.ts +5 -5
- package/src/auth/providers/OktaProvider.ts +6 -7
- package/src/config.ts +63 -54
- package/src/context.ts +42 -30
- package/src/entitySubclasses/entityPermissions.server.ts +3 -3
- package/src/generated/generated.ts +48130 -39930
- package/src/generic/KeyInputOutputTypes.ts +3 -6
- package/src/generic/ResolverBase.ts +119 -78
- package/src/generic/RunViewResolver.ts +27 -23
- package/src/index.ts +66 -42
- package/src/resolvers/ActionResolver.ts +46 -57
- package/src/resolvers/AskSkipResolver.ts +607 -533
- package/src/resolvers/ComponentRegistryResolver.ts +547 -562
- package/src/resolvers/CreateQueryResolver.ts +683 -655
- package/src/resolvers/DatasetResolver.ts +5 -6
- package/src/resolvers/EntityCommunicationsResolver.ts +1 -1
- package/src/resolvers/EntityRecordNameResolver.ts +9 -5
- package/src/resolvers/EntityResolver.ts +9 -7
- package/src/resolvers/FileCategoryResolver.ts +2 -2
- package/src/resolvers/FileResolver.ts +4 -4
- package/src/resolvers/GetDataContextDataResolver.ts +106 -118
- package/src/resolvers/GetDataResolver.ts +194 -205
- package/src/resolvers/MergeRecordsResolver.ts +5 -5
- package/src/resolvers/PotentialDuplicateRecordResolver.ts +1 -1
- package/src/resolvers/QueryResolver.ts +95 -78
- package/src/resolvers/ReportResolver.ts +2 -2
- package/src/resolvers/RunAIAgentResolver.ts +818 -828
- package/src/resolvers/RunAIPromptResolver.ts +693 -709
- package/src/resolvers/RunTemplateResolver.ts +105 -103
- package/src/resolvers/SqlLoggingConfigResolver.ts +69 -72
- package/src/resolvers/SyncDataResolver.ts +386 -352
- package/src/resolvers/SyncRolesUsersResolver.ts +387 -350
- package/src/resolvers/TaskResolver.ts +110 -115
- package/src/resolvers/TransactionGroupResolver.ts +143 -138
- package/src/resolvers/UserFavoriteResolver.ts +17 -8
- package/src/resolvers/UserViewResolver.ts +17 -12
- package/src/rest/EntityCRUDHandler.ts +291 -268
- package/src/rest/RESTEndpointHandler.ts +782 -776
- package/src/rest/ViewOperationsHandler.ts +191 -195
- package/src/scheduler/LearningCycleScheduler.ts +8 -52
- package/src/services/ScheduledJobsService.ts +129 -132
- package/src/services/TaskOrchestrator.ts +792 -776
- package/src/types.ts +15 -9
- package/src/util.ts +112 -109
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { Resolver, Mutation, Query, Arg, Ctx, ObjectType, Field, Int } from 'type-graphql';
|
|
2
2
|
import { AppContext, UserPayload } from '../types.js';
|
|
3
|
-
import { DatabaseProviderBase, LogError, LogStatus, Metadata } from '@memberjunction/
|
|
3
|
+
import { DatabaseProviderBase, LogError, LogStatus, Metadata } from '@memberjunction/global';
|
|
4
4
|
import { AIPromptEntityExtended, AIModelEntityExtended } from '@memberjunction/core-entities';
|
|
5
5
|
import { AIPromptRunner } from '@memberjunction/ai-prompts';
|
|
6
6
|
import { AIPromptParams } from '@memberjunction/ai-core-plus';
|
|
@@ -13,769 +13,753 @@ import { GetReadWriteProvider } from '../util.js';
|
|
|
13
13
|
|
|
14
14
|
@ObjectType()
|
|
15
15
|
export class AIPromptRunResult {
|
|
16
|
-
|
|
17
|
-
|
|
16
|
+
@Field()
|
|
17
|
+
success: boolean;
|
|
18
18
|
|
|
19
|
-
|
|
20
|
-
|
|
19
|
+
@Field({ nullable: true })
|
|
20
|
+
output?: string;
|
|
21
21
|
|
|
22
|
-
|
|
23
|
-
|
|
22
|
+
@Field({ nullable: true })
|
|
23
|
+
parsedResult?: string;
|
|
24
24
|
|
|
25
|
-
|
|
26
|
-
|
|
25
|
+
@Field({ nullable: true })
|
|
26
|
+
error?: string;
|
|
27
27
|
|
|
28
|
-
|
|
29
|
-
|
|
28
|
+
@Field({ nullable: true })
|
|
29
|
+
executionTimeMs?: number;
|
|
30
30
|
|
|
31
|
-
|
|
32
|
-
|
|
31
|
+
@Field({ nullable: true })
|
|
32
|
+
tokensUsed?: number;
|
|
33
33
|
|
|
34
|
-
|
|
35
|
-
|
|
34
|
+
@Field({ nullable: true })
|
|
35
|
+
promptRunId?: string;
|
|
36
36
|
|
|
37
|
-
|
|
38
|
-
|
|
37
|
+
@Field({ nullable: true })
|
|
38
|
+
rawResult?: string;
|
|
39
39
|
|
|
40
|
-
|
|
41
|
-
|
|
40
|
+
@Field({ nullable: true })
|
|
41
|
+
validationResult?: string;
|
|
42
42
|
|
|
43
|
-
|
|
44
|
-
|
|
43
|
+
@Field({ nullable: true })
|
|
44
|
+
chatResult?: string;
|
|
45
45
|
}
|
|
46
46
|
|
|
47
47
|
@ObjectType()
|
|
48
48
|
export class SimplePromptResult {
|
|
49
|
-
|
|
50
|
-
|
|
49
|
+
@Field()
|
|
50
|
+
success: boolean;
|
|
51
51
|
|
|
52
|
-
|
|
53
|
-
|
|
52
|
+
@Field({ nullable: true })
|
|
53
|
+
result?: string;
|
|
54
54
|
|
|
55
|
-
|
|
56
|
-
|
|
55
|
+
@Field({ nullable: true })
|
|
56
|
+
resultObject?: string; // JSON stringified object
|
|
57
57
|
|
|
58
|
-
|
|
59
|
-
|
|
58
|
+
@Field()
|
|
59
|
+
modelName: string;
|
|
60
60
|
|
|
61
|
-
|
|
62
|
-
|
|
61
|
+
@Field({ nullable: true })
|
|
62
|
+
error?: string;
|
|
63
63
|
|
|
64
|
-
|
|
65
|
-
|
|
64
|
+
@Field({ nullable: true })
|
|
65
|
+
executionTimeMs?: number;
|
|
66
66
|
}
|
|
67
67
|
|
|
68
68
|
@ObjectType()
|
|
69
69
|
export class EmbedTextResult {
|
|
70
|
-
|
|
71
|
-
|
|
70
|
+
@Field(() => [[Number]])
|
|
71
|
+
embeddings: number[][];
|
|
72
72
|
|
|
73
|
-
|
|
74
|
-
|
|
73
|
+
@Field()
|
|
74
|
+
modelName: string;
|
|
75
75
|
|
|
76
|
-
|
|
77
|
-
|
|
76
|
+
@Field(() => Int)
|
|
77
|
+
vectorDimensions: number;
|
|
78
78
|
|
|
79
|
-
|
|
80
|
-
|
|
79
|
+
@Field({ nullable: true })
|
|
80
|
+
error?: string;
|
|
81
81
|
}
|
|
82
82
|
|
|
83
83
|
@Resolver()
|
|
84
84
|
export class RunAIPromptResolver extends ResolverBase {
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
85
|
+
/**
|
|
86
|
+
* Internal method that handles the core AI prompt execution logic.
|
|
87
|
+
* This method is called by both the regular and system user resolvers.
|
|
88
|
+
* @private
|
|
89
|
+
*/
|
|
90
|
+
private async executeAIPrompt(
|
|
91
|
+
p: DatabaseProviderBase,
|
|
92
|
+
promptId: string,
|
|
93
|
+
userPayload: UserPayload,
|
|
94
|
+
data?: string,
|
|
95
|
+
overrideModelId?: string,
|
|
96
|
+
overrideVendorId?: string,
|
|
97
|
+
configurationId?: string,
|
|
98
|
+
skipValidation?: boolean,
|
|
99
|
+
templateData?: string,
|
|
100
|
+
responseFormat?: string,
|
|
101
|
+
temperature?: number,
|
|
102
|
+
topP?: number,
|
|
103
|
+
topK?: number,
|
|
104
|
+
minP?: number,
|
|
105
|
+
frequencyPenalty?: number,
|
|
106
|
+
presencePenalty?: number,
|
|
107
|
+
seed?: number,
|
|
108
|
+
stopSequences?: string[],
|
|
109
|
+
includeLogProbs?: boolean,
|
|
110
|
+
topLogProbs?: number,
|
|
111
|
+
messages?: string,
|
|
112
|
+
rerunFromPromptRunID?: string,
|
|
113
|
+
systemPromptOverride?: string
|
|
114
|
+
): Promise<AIPromptRunResult> {
|
|
115
|
+
const startTime = Date.now();
|
|
116
|
+
|
|
117
|
+
try {
|
|
118
|
+
LogStatus(`=== RUNNING AI PROMPT FOR ID: ${promptId} ===`);
|
|
119
|
+
|
|
120
|
+
// Parse data contexts (JSON strings)
|
|
121
|
+
let parsedData = {};
|
|
122
|
+
let parsedTemplateData = {};
|
|
123
|
+
|
|
124
|
+
if (data) {
|
|
117
125
|
try {
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
} catch (parseError) {
|
|
128
|
-
return {
|
|
129
|
-
success: false,
|
|
130
|
-
error: `Invalid JSON in data: ${(parseError as Error).message}`,
|
|
131
|
-
executionTimeMs: Date.now() - startTime
|
|
132
|
-
};
|
|
133
|
-
}
|
|
134
|
-
}
|
|
126
|
+
parsedData = JSON.parse(data);
|
|
127
|
+
} catch (parseError) {
|
|
128
|
+
return {
|
|
129
|
+
success: false,
|
|
130
|
+
error: `Invalid JSON in data: ${(parseError as Error).message}`,
|
|
131
|
+
executionTimeMs: Date.now() - startTime,
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
135
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
136
|
+
if (templateData) {
|
|
137
|
+
try {
|
|
138
|
+
parsedTemplateData = JSON.parse(templateData);
|
|
139
|
+
} catch (parseError) {
|
|
140
|
+
return {
|
|
141
|
+
success: false,
|
|
142
|
+
error: `Invalid JSON in template data: ${(parseError as Error).message}`,
|
|
143
|
+
executionTimeMs: Date.now() - startTime,
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
147
|
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
148
|
+
// Get current user from payload
|
|
149
|
+
const currentUser = this.GetUserFromPayload(userPayload);
|
|
150
|
+
if (!currentUser) {
|
|
151
|
+
return {
|
|
152
|
+
success: false,
|
|
153
|
+
error: 'Unable to determine current user',
|
|
154
|
+
executionTimeMs: Date.now() - startTime,
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
157
|
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
if (!promptEntity.IsSaved) {
|
|
163
|
-
return {
|
|
164
|
-
success: false,
|
|
165
|
-
error: `AI Prompt with ID ${promptId} not found`,
|
|
166
|
-
executionTimeMs: Date.now() - startTime
|
|
167
|
-
};
|
|
168
|
-
}
|
|
158
|
+
// Load the AI prompt entity
|
|
159
|
+
const promptEntity = await p.GetEntityObject<AIPromptEntityExtended>('AI Prompts', currentUser);
|
|
160
|
+
await promptEntity.Load(promptId);
|
|
169
161
|
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
}
|
|
162
|
+
if (!promptEntity.IsSaved) {
|
|
163
|
+
return {
|
|
164
|
+
success: false,
|
|
165
|
+
error: `AI Prompt with ID ${promptId} not found`,
|
|
166
|
+
executionTimeMs: Date.now() - startTime,
|
|
167
|
+
};
|
|
168
|
+
}
|
|
178
169
|
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
content: messages
|
|
210
|
-
}];
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
// If responseFormat is provided, override the prompt's default response format
|
|
215
|
-
if (responseFormat) {
|
|
216
|
-
// We'll need to override the prompt's response format setting
|
|
217
|
-
// This will be handled in the AIPromptRunner when it builds the ChatParams
|
|
218
|
-
promptEntity.ResponseFormat = responseFormat as any;
|
|
219
|
-
}
|
|
170
|
+
// Check if prompt is active
|
|
171
|
+
if (promptEntity.Status !== 'Active') {
|
|
172
|
+
return {
|
|
173
|
+
success: false,
|
|
174
|
+
error: `AI Prompt "${promptEntity.Name}" is not active (Status: ${promptEntity.Status})`,
|
|
175
|
+
executionTimeMs: Date.now() - startTime,
|
|
176
|
+
};
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
// Create AI prompt runner and execute
|
|
180
|
+
const promptRunner = new AIPromptRunner();
|
|
181
|
+
|
|
182
|
+
// Build execution parameters
|
|
183
|
+
const promptParams = new AIPromptParams();
|
|
184
|
+
promptParams.prompt = promptEntity;
|
|
185
|
+
promptParams.data = parsedData;
|
|
186
|
+
promptParams.templateData = parsedTemplateData;
|
|
187
|
+
promptParams.configurationId = configurationId;
|
|
188
|
+
promptParams.contextUser = currentUser;
|
|
189
|
+
promptParams.skipValidation = skipValidation || false;
|
|
190
|
+
promptParams.rerunFromPromptRunID = rerunFromPromptRunID;
|
|
191
|
+
promptParams.systemPromptOverride = systemPromptOverride;
|
|
192
|
+
|
|
193
|
+
// Set override if model or vendor ID provided
|
|
194
|
+
if (overrideModelId || overrideVendorId) {
|
|
195
|
+
promptParams.override = {
|
|
196
|
+
modelId: overrideModelId,
|
|
197
|
+
vendorId: overrideVendorId,
|
|
198
|
+
};
|
|
199
|
+
}
|
|
220
200
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
201
|
+
// Parse and set conversation messages if provided
|
|
202
|
+
if (messages) {
|
|
203
|
+
try {
|
|
204
|
+
promptParams.conversationMessages = JSON.parse(messages);
|
|
205
|
+
} catch (parseError) {
|
|
206
|
+
// If parsing fails, treat as a simple user message
|
|
207
|
+
promptParams.conversationMessages = [
|
|
208
|
+
{
|
|
209
|
+
role: 'user',
|
|
210
|
+
content: messages,
|
|
211
|
+
},
|
|
212
|
+
];
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// If responseFormat is provided, override the prompt's default response format
|
|
217
|
+
if (responseFormat) {
|
|
218
|
+
// We'll need to override the prompt's response format setting
|
|
219
|
+
// This will be handled in the AIPromptRunner when it builds the ChatParams
|
|
220
|
+
promptEntity.ResponseFormat = responseFormat as any;
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
// Build additional parameters for chat-specific settings
|
|
224
|
+
const additionalParams: Record<string, any> = {};
|
|
225
|
+
if (temperature != null) additionalParams.temperature = temperature;
|
|
226
|
+
if (topP != null) additionalParams.topP = topP;
|
|
227
|
+
if (topK != null) additionalParams.topK = topK;
|
|
228
|
+
if (minP != null) additionalParams.minP = minP;
|
|
229
|
+
if (frequencyPenalty != null) additionalParams.frequencyPenalty = frequencyPenalty;
|
|
230
|
+
if (presencePenalty != null) additionalParams.presencePenalty = presencePenalty;
|
|
231
|
+
if (seed != null) additionalParams.seed = seed;
|
|
232
|
+
if (stopSequences != null) additionalParams.stopSequences = stopSequences;
|
|
233
|
+
if (includeLogProbs != null) additionalParams.includeLogProbs = includeLogProbs;
|
|
234
|
+
if (topLogProbs != null) additionalParams.topLogProbs = topLogProbs;
|
|
235
|
+
|
|
236
|
+
// Only set additionalParameters if we have any
|
|
237
|
+
if (Object.keys(additionalParams).length > 0) {
|
|
238
|
+
promptParams.additionalParameters = additionalParams;
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
// Execute the prompt
|
|
242
|
+
const result = await promptRunner.ExecutePrompt(promptParams);
|
|
243
|
+
|
|
244
|
+
const executionTime = Date.now() - startTime;
|
|
245
|
+
|
|
246
|
+
if (result.success) {
|
|
247
|
+
LogStatus(`=== AI PROMPT RUN COMPLETED FOR: ${promptEntity.Name} (${executionTime}ms) ===`);
|
|
238
248
|
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
249
|
+
return {
|
|
250
|
+
success: true,
|
|
251
|
+
output: result.rawResult,
|
|
252
|
+
parsedResult: typeof result.result === 'string' ? result.result : JSON.stringify(result.result),
|
|
253
|
+
rawResult: result.rawResult,
|
|
254
|
+
executionTimeMs: executionTime,
|
|
255
|
+
tokensUsed: result.tokensUsed,
|
|
256
|
+
promptRunId: result.promptRun?.ID,
|
|
257
|
+
validationResult: result.validationResult ? JSON.stringify(result.validationResult) : undefined,
|
|
258
|
+
chatResult: result.chatResult ? JSON.stringify(result.chatResult) : undefined,
|
|
259
|
+
};
|
|
260
|
+
} else {
|
|
261
|
+
LogError(`AI Prompt run failed for ${promptEntity.Name}: ${result.errorMessage}`);
|
|
262
|
+
return {
|
|
263
|
+
success: false,
|
|
264
|
+
error: result.errorMessage,
|
|
265
|
+
executionTimeMs: executionTime,
|
|
266
|
+
promptRunId: result.promptRun?.ID,
|
|
267
|
+
chatResult: result.chatResult ? JSON.stringify(result.chatResult) : undefined,
|
|
268
|
+
};
|
|
269
|
+
}
|
|
270
|
+
} catch (error) {
|
|
271
|
+
const executionTime = Date.now() - startTime;
|
|
272
|
+
LogError(`AI Prompt run failed:`, undefined, error);
|
|
273
|
+
return {
|
|
274
|
+
success: false,
|
|
275
|
+
error: (error as Error).message || 'Unknown error occurred',
|
|
276
|
+
executionTimeMs: executionTime,
|
|
277
|
+
};
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
/**
|
|
282
|
+
* Public mutation for regular users to run AI prompts with authentication.
|
|
283
|
+
*/
|
|
284
|
+
@Mutation(() => AIPromptRunResult)
|
|
285
|
+
async RunAIPrompt(
|
|
286
|
+
@Arg('promptId') promptId: string,
|
|
287
|
+
@Ctx() { userPayload, providers }: AppContext,
|
|
288
|
+
@Arg('data', { nullable: true }) data?: string,
|
|
289
|
+
@Arg('overrideModelId', { nullable: true }) overrideModelId?: string,
|
|
290
|
+
@Arg('overrideVendorId', { nullable: true }) overrideVendorId?: string,
|
|
291
|
+
@Arg('configurationId', { nullable: true }) configurationId?: string,
|
|
292
|
+
@Arg('skipValidation', { nullable: true }) skipValidation?: boolean,
|
|
293
|
+
@Arg('templateData', { nullable: true }) templateData?: string,
|
|
294
|
+
@Arg('responseFormat', { nullable: true }) responseFormat?: string,
|
|
295
|
+
@Arg('temperature', { nullable: true }) temperature?: number,
|
|
296
|
+
@Arg('topP', { nullable: true }) topP?: number,
|
|
297
|
+
@Arg('topK', () => Int, { nullable: true }) topK?: number,
|
|
298
|
+
@Arg('minP', { nullable: true }) minP?: number,
|
|
299
|
+
@Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
|
|
300
|
+
@Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
|
|
301
|
+
@Arg('seed', () => Int, { nullable: true }) seed?: number,
|
|
302
|
+
@Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
|
|
303
|
+
@Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
|
|
304
|
+
@Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
|
|
305
|
+
@Arg('messages', { nullable: true }) messages?: string,
|
|
306
|
+
@Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
|
|
307
|
+
@Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
|
|
308
|
+
): Promise<AIPromptRunResult> {
|
|
309
|
+
const p = GetReadWriteProvider(providers);
|
|
310
|
+
return this.executeAIPrompt(
|
|
311
|
+
p,
|
|
312
|
+
promptId,
|
|
313
|
+
userPayload,
|
|
314
|
+
data,
|
|
315
|
+
overrideModelId,
|
|
316
|
+
overrideVendorId,
|
|
317
|
+
configurationId,
|
|
318
|
+
skipValidation,
|
|
319
|
+
templateData,
|
|
320
|
+
responseFormat,
|
|
321
|
+
temperature,
|
|
322
|
+
topP,
|
|
323
|
+
topK,
|
|
324
|
+
minP,
|
|
325
|
+
frequencyPenalty,
|
|
326
|
+
presencePenalty,
|
|
327
|
+
seed,
|
|
328
|
+
stopSequences,
|
|
329
|
+
includeLogProbs,
|
|
330
|
+
topLogProbs,
|
|
331
|
+
messages,
|
|
332
|
+
rerunFromPromptRunID,
|
|
333
|
+
systemPromptOverride
|
|
334
|
+
);
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
/**
|
|
338
|
+
* System user query for running AI prompts with elevated privileges.
|
|
339
|
+
* Requires the @RequireSystemUser decorator to ensure only system users can access.
|
|
340
|
+
*/
|
|
341
|
+
@RequireSystemUser()
|
|
342
|
+
@Query(() => AIPromptRunResult)
|
|
343
|
+
async RunAIPromptSystemUser(
|
|
344
|
+
@Arg('promptId') promptId: string,
|
|
345
|
+
@Ctx() { userPayload, providers }: AppContext,
|
|
346
|
+
@Arg('data', { nullable: true }) data?: string,
|
|
347
|
+
@Arg('overrideModelId', { nullable: true }) overrideModelId?: string,
|
|
348
|
+
@Arg('overrideVendorId', { nullable: true }) overrideVendorId?: string,
|
|
349
|
+
@Arg('configurationId', { nullable: true }) configurationId?: string,
|
|
350
|
+
@Arg('skipValidation', { nullable: true }) skipValidation?: boolean,
|
|
351
|
+
@Arg('templateData', { nullable: true }) templateData?: string,
|
|
352
|
+
@Arg('responseFormat', { nullable: true }) responseFormat?: string,
|
|
353
|
+
@Arg('temperature', { nullable: true }) temperature?: number,
|
|
354
|
+
@Arg('topP', { nullable: true }) topP?: number,
|
|
355
|
+
@Arg('topK', () => Int, { nullable: true }) topK?: number,
|
|
356
|
+
@Arg('minP', { nullable: true }) minP?: number,
|
|
357
|
+
@Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
|
|
358
|
+
@Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
|
|
359
|
+
@Arg('seed', () => Int, { nullable: true }) seed?: number,
|
|
360
|
+
@Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
|
|
361
|
+
@Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
|
|
362
|
+
@Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
|
|
363
|
+
@Arg('messages', { nullable: true }) messages?: string,
|
|
364
|
+
@Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
|
|
365
|
+
@Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
|
|
366
|
+
): Promise<AIPromptRunResult> {
|
|
367
|
+
const p = GetReadWriteProvider(providers);
|
|
368
|
+
return this.executeAIPrompt(
|
|
369
|
+
p,
|
|
370
|
+
promptId,
|
|
371
|
+
userPayload,
|
|
372
|
+
data,
|
|
373
|
+
overrideModelId,
|
|
374
|
+
overrideVendorId,
|
|
375
|
+
configurationId,
|
|
376
|
+
skipValidation,
|
|
377
|
+
templateData,
|
|
378
|
+
responseFormat,
|
|
379
|
+
temperature,
|
|
380
|
+
topP,
|
|
381
|
+
topK,
|
|
382
|
+
minP,
|
|
383
|
+
frequencyPenalty,
|
|
384
|
+
presencePenalty,
|
|
385
|
+
seed,
|
|
386
|
+
stopSequences,
|
|
387
|
+
includeLogProbs,
|
|
388
|
+
topLogProbs,
|
|
389
|
+
messages,
|
|
390
|
+
rerunFromPromptRunID,
|
|
391
|
+
systemPromptOverride
|
|
392
|
+
);
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
/**
|
|
396
|
+
* Helper method to select a model for simple prompt execution based on preferences or power level
|
|
397
|
+
* @private
|
|
398
|
+
*/
|
|
399
|
+
private async selectModelForSimplePrompt(
|
|
400
|
+
preferredModels: string[] | undefined,
|
|
401
|
+
modelPower: string,
|
|
402
|
+
contextUser: any
|
|
403
|
+
): Promise<AIModelEntityExtended> {
|
|
404
|
+
// Ensure AI Engine is configured
|
|
405
|
+
await AIEngine.Instance.Config(false, contextUser);
|
|
406
|
+
|
|
407
|
+
// Get all LLM models that have API keys
|
|
408
|
+
const allModels = AIEngine.Instance.Models.filter((m) => m.AIModelType?.trim().toLowerCase() === 'llm' && m.IsActive === true);
|
|
409
|
+
|
|
410
|
+
// Filter to only models with valid API keys
|
|
411
|
+
const modelsWithKeys: AIModelEntityExtended[] = [];
|
|
412
|
+
for (const model of allModels) {
|
|
413
|
+
const apiKey = GetAIAPIKey(model.DriverClass);
|
|
414
|
+
if (apiKey && apiKey.trim().length > 0) {
|
|
415
|
+
modelsWithKeys.push(model);
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
if (modelsWithKeys.length === 0) {
|
|
420
|
+
throw new Error('No AI models with valid API keys found');
|
|
421
|
+
}
|
|
268
422
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
};
|
|
423
|
+
// Try preferred models first if provided
|
|
424
|
+
if (preferredModels && preferredModels.length > 0) {
|
|
425
|
+
for (const preferred of preferredModels) {
|
|
426
|
+
const model = modelsWithKeys.find((m) => m.Name === preferred || m.APIName === preferred);
|
|
427
|
+
if (model) {
|
|
428
|
+
LogStatus(`Selected preferred model: ${model.Name}`);
|
|
429
|
+
return model;
|
|
277
430
|
}
|
|
431
|
+
}
|
|
432
|
+
LogStatus('No preferred models available, falling back to power selection');
|
|
278
433
|
}
|
|
279
434
|
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
@Arg('topK', () => Int, { nullable: true }) topK?: number,
|
|
297
|
-
@Arg('minP', { nullable: true }) minP?: number,
|
|
298
|
-
@Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
|
|
299
|
-
@Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
|
|
300
|
-
@Arg('seed', () => Int, { nullable: true }) seed?: number,
|
|
301
|
-
@Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
|
|
302
|
-
@Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
|
|
303
|
-
@Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
|
|
304
|
-
@Arg('messages', { nullable: true }) messages?: string,
|
|
305
|
-
@Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
|
|
306
|
-
@Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
|
|
307
|
-
): Promise<AIPromptRunResult> {
|
|
308
|
-
const p = GetReadWriteProvider(providers);
|
|
309
|
-
return this.executeAIPrompt(
|
|
310
|
-
p,
|
|
311
|
-
promptId,
|
|
312
|
-
userPayload,
|
|
313
|
-
data,
|
|
314
|
-
overrideModelId,
|
|
315
|
-
overrideVendorId,
|
|
316
|
-
configurationId,
|
|
317
|
-
skipValidation,
|
|
318
|
-
templateData,
|
|
319
|
-
responseFormat,
|
|
320
|
-
temperature,
|
|
321
|
-
topP,
|
|
322
|
-
topK,
|
|
323
|
-
minP,
|
|
324
|
-
frequencyPenalty,
|
|
325
|
-
presencePenalty,
|
|
326
|
-
seed,
|
|
327
|
-
stopSequences,
|
|
328
|
-
includeLogProbs,
|
|
329
|
-
topLogProbs,
|
|
330
|
-
messages,
|
|
331
|
-
rerunFromPromptRunID,
|
|
332
|
-
systemPromptOverride
|
|
333
|
-
);
|
|
435
|
+
// Sort by PowerRank for power-based selection
|
|
436
|
+
modelsWithKeys.sort((a, b) => (b.PowerRank || 0) - (a.PowerRank || 0));
|
|
437
|
+
|
|
438
|
+
let selectedModel: AIModelEntityExtended;
|
|
439
|
+
switch (modelPower) {
|
|
440
|
+
case 'lowest':
|
|
441
|
+
selectedModel = modelsWithKeys[modelsWithKeys.length - 1];
|
|
442
|
+
break;
|
|
443
|
+
case 'highest':
|
|
444
|
+
selectedModel = modelsWithKeys[0];
|
|
445
|
+
break;
|
|
446
|
+
case 'medium':
|
|
447
|
+
default:
|
|
448
|
+
const midIndex = Math.floor(modelsWithKeys.length / 2);
|
|
449
|
+
selectedModel = modelsWithKeys[midIndex];
|
|
450
|
+
break;
|
|
334
451
|
}
|
|
335
452
|
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
@Arg('skipValidation', { nullable: true }) skipValidation?: boolean,
|
|
350
|
-
@Arg('templateData', { nullable: true }) templateData?: string,
|
|
351
|
-
@Arg('responseFormat', { nullable: true }) responseFormat?: string,
|
|
352
|
-
@Arg('temperature', { nullable: true }) temperature?: number,
|
|
353
|
-
@Arg('topP', { nullable: true }) topP?: number,
|
|
354
|
-
@Arg('topK', () => Int, { nullable: true }) topK?: number,
|
|
355
|
-
@Arg('minP', { nullable: true }) minP?: number,
|
|
356
|
-
@Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
|
|
357
|
-
@Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
|
|
358
|
-
@Arg('seed', () => Int, { nullable: true }) seed?: number,
|
|
359
|
-
@Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
|
|
360
|
-
@Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
|
|
361
|
-
@Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
|
|
362
|
-
@Arg('messages', { nullable: true }) messages?: string,
|
|
363
|
-
@Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
|
|
364
|
-
@Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
|
|
365
|
-
): Promise<AIPromptRunResult> {
|
|
366
|
-
const p = GetReadWriteProvider(providers);
|
|
367
|
-
return this.executeAIPrompt(
|
|
368
|
-
p,
|
|
369
|
-
promptId,
|
|
370
|
-
userPayload,
|
|
371
|
-
data,
|
|
372
|
-
overrideModelId,
|
|
373
|
-
overrideVendorId,
|
|
374
|
-
configurationId,
|
|
375
|
-
skipValidation,
|
|
376
|
-
templateData,
|
|
377
|
-
responseFormat,
|
|
378
|
-
temperature,
|
|
379
|
-
topP,
|
|
380
|
-
topK,
|
|
381
|
-
minP,
|
|
382
|
-
frequencyPenalty,
|
|
383
|
-
presencePenalty,
|
|
384
|
-
seed,
|
|
385
|
-
stopSequences,
|
|
386
|
-
includeLogProbs,
|
|
387
|
-
topLogProbs,
|
|
388
|
-
messages,
|
|
389
|
-
rerunFromPromptRunID,
|
|
390
|
-
systemPromptOverride
|
|
391
|
-
);
|
|
453
|
+
LogStatus(`Selected model by power (${modelPower || 'medium'}): ${selectedModel.Name}`);
|
|
454
|
+
return selectedModel;
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
/**
|
|
458
|
+
* Helper method to select an embedding model by size
|
|
459
|
+
* @private
|
|
460
|
+
*/
|
|
461
|
+
private selectEmbeddingModelBySize(modelSize: string): AIModelEntityExtended {
|
|
462
|
+
const localModels = AIEngine.Instance.LocalEmbeddingModels;
|
|
463
|
+
|
|
464
|
+
if (!localModels || localModels.length === 0) {
|
|
465
|
+
throw new Error('No local embedding models available');
|
|
392
466
|
}
|
|
393
467
|
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
): Promise<AIModelEntityExtended> {
|
|
403
|
-
// Ensure AI Engine is configured
|
|
404
|
-
await AIEngine.Instance.Config(false, contextUser);
|
|
405
|
-
|
|
406
|
-
// Get all LLM models that have API keys
|
|
407
|
-
const allModels = AIEngine.Instance.Models.filter(m =>
|
|
408
|
-
m.AIModelType?.trim().toLowerCase() === 'llm' &&
|
|
409
|
-
m.IsActive === true
|
|
410
|
-
);
|
|
411
|
-
|
|
412
|
-
// Filter to only models with valid API keys
|
|
413
|
-
const modelsWithKeys: AIModelEntityExtended[] = [];
|
|
414
|
-
for (const model of allModels) {
|
|
415
|
-
const apiKey = GetAIAPIKey(model.DriverClass);
|
|
416
|
-
if (apiKey && apiKey.trim().length > 0) {
|
|
417
|
-
modelsWithKeys.push(model);
|
|
418
|
-
}
|
|
419
|
-
}
|
|
420
|
-
|
|
421
|
-
if (modelsWithKeys.length === 0) {
|
|
422
|
-
throw new Error('No AI models with valid API keys found');
|
|
423
|
-
}
|
|
424
|
-
|
|
425
|
-
// Try preferred models first if provided
|
|
426
|
-
if (preferredModels && preferredModels.length > 0) {
|
|
427
|
-
for (const preferred of preferredModels) {
|
|
428
|
-
const model = modelsWithKeys.find(m =>
|
|
429
|
-
m.Name === preferred ||
|
|
430
|
-
m.APIName === preferred
|
|
431
|
-
);
|
|
432
|
-
if (model) {
|
|
433
|
-
LogStatus(`Selected preferred model: ${model.Name}`);
|
|
434
|
-
return model;
|
|
435
|
-
}
|
|
436
|
-
}
|
|
437
|
-
LogStatus('No preferred models available, falling back to power selection');
|
|
438
|
-
}
|
|
439
|
-
|
|
440
|
-
// Sort by PowerRank for power-based selection
|
|
441
|
-
modelsWithKeys.sort((a, b) => (b.PowerRank || 0) - (a.PowerRank || 0));
|
|
442
|
-
|
|
443
|
-
let selectedModel: AIModelEntityExtended;
|
|
444
|
-
switch (modelPower) {
|
|
445
|
-
case 'lowest':
|
|
446
|
-
selectedModel = modelsWithKeys[modelsWithKeys.length - 1];
|
|
447
|
-
break;
|
|
448
|
-
case 'highest':
|
|
449
|
-
selectedModel = modelsWithKeys[0];
|
|
450
|
-
break;
|
|
451
|
-
case 'medium':
|
|
452
|
-
default:
|
|
453
|
-
const midIndex = Math.floor(modelsWithKeys.length / 2);
|
|
454
|
-
selectedModel = modelsWithKeys[midIndex];
|
|
455
|
-
break;
|
|
456
|
-
}
|
|
457
|
-
|
|
458
|
-
LogStatus(`Selected model by power (${modelPower || 'medium'}): ${selectedModel.Name}`);
|
|
459
|
-
return selectedModel;
|
|
468
|
+
// Models are already sorted by PowerRank (highest first)
|
|
469
|
+
switch (modelSize) {
|
|
470
|
+
case 'small':
|
|
471
|
+
return localModels[localModels.length - 1]; // Lowest power
|
|
472
|
+
case 'medium':
|
|
473
|
+
default:
|
|
474
|
+
const midIndex = Math.floor(localModels.length / 2);
|
|
475
|
+
return localModels[midIndex] || localModels[0];
|
|
460
476
|
}
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
return localModels[localModels.length - 1]; // Lowest power
|
|
477
|
-
case 'medium':
|
|
478
|
-
default:
|
|
479
|
-
const midIndex = Math.floor(localModels.length / 2);
|
|
480
|
-
return localModels[midIndex] || localModels[0];
|
|
481
|
-
}
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
/**
|
|
480
|
+
* Helper method to build chat messages from system prompt and optional message history
|
|
481
|
+
* @private
|
|
482
|
+
*/
|
|
483
|
+
private buildChatMessages(systemPrompt: string, messagesJson?: string): ChatMessage[] {
|
|
484
|
+
const messages: ChatMessage[] = [];
|
|
485
|
+
|
|
486
|
+
// Add system prompt
|
|
487
|
+
if (systemPrompt && systemPrompt.trim().length > 0) {
|
|
488
|
+
messages.push({
|
|
489
|
+
role: ChatMessageRole.system,
|
|
490
|
+
content: systemPrompt,
|
|
491
|
+
});
|
|
482
492
|
}
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
content: systemPrompt
|
|
496
|
-
});
|
|
497
|
-
}
|
|
498
|
-
|
|
499
|
-
// Add message history if provided
|
|
500
|
-
if (messagesJson) {
|
|
501
|
-
try {
|
|
502
|
-
const parsedMessages = JSON.parse(messagesJson);
|
|
503
|
-
if (Array.isArray(parsedMessages)) {
|
|
504
|
-
for (const msg of parsedMessages) {
|
|
505
|
-
if (msg.message && msg.role) {
|
|
506
|
-
messages.push({
|
|
507
|
-
role: msg.role === 'user' ? ChatMessageRole.user : ChatMessageRole.assistant,
|
|
508
|
-
content: msg.message
|
|
509
|
-
});
|
|
510
|
-
}
|
|
511
|
-
}
|
|
512
|
-
}
|
|
513
|
-
else if (messagesJson?.length > 0) {
|
|
514
|
-
// messages maybe just has a simple string in it so add
|
|
515
|
-
// as a single message
|
|
516
|
-
messages.push({
|
|
517
|
-
role: ChatMessageRole.user,
|
|
518
|
-
content: messagesJson
|
|
519
|
-
});
|
|
520
|
-
}
|
|
521
|
-
} catch (e) {
|
|
522
|
-
if (messagesJson?.length > 0) {
|
|
523
|
-
// messages maybe just has a simple string in it so add
|
|
524
|
-
// as a single message
|
|
525
|
-
messages.push({
|
|
526
|
-
role: ChatMessageRole.user,
|
|
527
|
-
content: messagesJson
|
|
528
|
-
});
|
|
529
|
-
}
|
|
530
|
-
LogError('Failed to parse messages JSON', undefined, e);
|
|
493
|
+
|
|
494
|
+
// Add message history if provided
|
|
495
|
+
if (messagesJson) {
|
|
496
|
+
try {
|
|
497
|
+
const parsedMessages = JSON.parse(messagesJson);
|
|
498
|
+
if (Array.isArray(parsedMessages)) {
|
|
499
|
+
for (const msg of parsedMessages) {
|
|
500
|
+
if (msg.message && msg.role) {
|
|
501
|
+
messages.push({
|
|
502
|
+
role: msg.role === 'user' ? ChatMessageRole.user : ChatMessageRole.assistant,
|
|
503
|
+
content: msg.message,
|
|
504
|
+
});
|
|
531
505
|
}
|
|
506
|
+
}
|
|
507
|
+
} else if (messagesJson?.length > 0) {
|
|
508
|
+
// messages maybe just has a simple string in it so add
|
|
509
|
+
// as a single message
|
|
510
|
+
messages.push({
|
|
511
|
+
role: ChatMessageRole.user,
|
|
512
|
+
content: messagesJson,
|
|
513
|
+
});
|
|
532
514
|
}
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
private formatSimpleResult(chatResult: any, model: AIModelEntityExtended, executionTime: number): SimplePromptResult {
|
|
542
|
-
if (!chatResult || !chatResult.success) {
|
|
543
|
-
return {
|
|
544
|
-
success: false,
|
|
545
|
-
error: chatResult?.errorMessage || 'Unknown error occurred',
|
|
546
|
-
modelName: model.Name,
|
|
547
|
-
executionTimeMs: executionTime
|
|
548
|
-
};
|
|
515
|
+
} catch (e) {
|
|
516
|
+
if (messagesJson?.length > 0) {
|
|
517
|
+
// messages maybe just has a simple string in it so add
|
|
518
|
+
// as a single message
|
|
519
|
+
messages.push({
|
|
520
|
+
role: ChatMessageRole.user,
|
|
521
|
+
content: messagesJson,
|
|
522
|
+
});
|
|
549
523
|
}
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
524
|
+
LogError('Failed to parse messages JSON', undefined, e);
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
return messages;
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
/**
|
|
532
|
+
* Helper method to format simple prompt result
|
|
533
|
+
* @private
|
|
534
|
+
*/
|
|
535
|
+
private formatSimpleResult(chatResult: any, model: AIModelEntityExtended, executionTime: number): SimplePromptResult {
|
|
536
|
+
if (!chatResult || !chatResult.success) {
|
|
537
|
+
return {
|
|
538
|
+
success: false,
|
|
539
|
+
error: chatResult?.errorMessage || 'Unknown error occurred',
|
|
540
|
+
modelName: model.Name,
|
|
541
|
+
executionTimeMs: executionTime,
|
|
542
|
+
};
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
const resultContent = chatResult.data?.choices?.[0]?.message?.content || '';
|
|
546
|
+
|
|
547
|
+
// Try to extract JSON from the result
|
|
548
|
+
let resultObject: any = null;
|
|
549
|
+
try {
|
|
550
|
+
// First try to parse the entire result as JSON
|
|
551
|
+
resultObject = JSON.parse(resultContent);
|
|
552
|
+
} catch (e) {
|
|
553
|
+
// Try to find JSON within the text
|
|
554
|
+
const jsonMatch = resultContent.match(/\{[\s\S]*\}|\[[\s\S]*\]/);
|
|
555
|
+
if (jsonMatch) {
|
|
555
556
|
try {
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
// Try to find JSON within the text
|
|
560
|
-
const jsonMatch = resultContent.match(/\{[\s\S]*\}|\[[\s\S]*\]/);
|
|
561
|
-
if (jsonMatch) {
|
|
562
|
-
try {
|
|
563
|
-
resultObject = JSON.parse(jsonMatch[0]);
|
|
564
|
-
} catch (e2) {
|
|
565
|
-
// No valid JSON found
|
|
566
|
-
}
|
|
567
|
-
}
|
|
557
|
+
resultObject = JSON.parse(jsonMatch[0]);
|
|
558
|
+
} catch (e2) {
|
|
559
|
+
// No valid JSON found
|
|
568
560
|
}
|
|
569
|
-
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
return {
|
|
565
|
+
success: true,
|
|
566
|
+
result: resultContent,
|
|
567
|
+
resultObject: resultObject ? JSON.stringify(resultObject) : undefined,
|
|
568
|
+
modelName: model.Name,
|
|
569
|
+
executionTimeMs: executionTime,
|
|
570
|
+
};
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
/**
|
|
574
|
+
* Execute a simple prompt without requiring a stored AI Prompt entity.
|
|
575
|
+
* This is designed for interactive components that need quick AI responses.
|
|
576
|
+
*/
|
|
577
|
+
@Mutation(() => SimplePromptResult)
|
|
578
|
+
async ExecuteSimplePrompt(
|
|
579
|
+
@Arg('systemPrompt') systemPrompt: string,
|
|
580
|
+
@Ctx() { userPayload }: { userPayload: UserPayload },
|
|
581
|
+
@Arg('messages', { nullable: true }) messages?: string,
|
|
582
|
+
@Arg('preferredModels', () => [String], { nullable: true }) preferredModels?: string[],
|
|
583
|
+
@Arg('modelPower', { nullable: true }) modelPower?: string,
|
|
584
|
+
@Arg('responseFormat', { nullable: true }) responseFormat?: string
|
|
585
|
+
): Promise<SimplePromptResult> {
|
|
586
|
+
const startTime = Date.now();
|
|
587
|
+
|
|
588
|
+
try {
|
|
589
|
+
LogStatus(`=== EXECUTING SIMPLE PROMPT ===`);
|
|
590
|
+
|
|
591
|
+
// Get current user
|
|
592
|
+
const currentUser = this.GetUserFromPayload(userPayload);
|
|
593
|
+
if (!currentUser) {
|
|
570
594
|
return {
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
executionTimeMs: executionTime
|
|
595
|
+
success: false,
|
|
596
|
+
error: 'Unable to determine current user',
|
|
597
|
+
modelName: 'Unknown',
|
|
598
|
+
executionTimeMs: Date.now() - startTime,
|
|
576
599
|
};
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
// Select model based on preferences or power level
|
|
603
|
+
const model = await this.selectModelForSimplePrompt(preferredModels, modelPower || 'medium', currentUser);
|
|
604
|
+
|
|
605
|
+
// Build chat messages
|
|
606
|
+
const chatMessages = this.buildChatMessages(systemPrompt, messages);
|
|
607
|
+
|
|
608
|
+
if (chatMessages.length === 0) {
|
|
609
|
+
return {
|
|
610
|
+
success: false,
|
|
611
|
+
error: 'No messages to send to model',
|
|
612
|
+
modelName: model.Name,
|
|
613
|
+
executionTimeMs: Date.now() - startTime,
|
|
614
|
+
};
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
// Create LLM instance
|
|
618
|
+
const apiKey = GetAIAPIKey(model.DriverClass);
|
|
619
|
+
const llm = MJGlobal.Instance.ClassFactory.CreateInstance<BaseLLM>(BaseLLM, model.DriverClass, apiKey);
|
|
620
|
+
|
|
621
|
+
if (!llm) {
|
|
622
|
+
return {
|
|
623
|
+
success: false,
|
|
624
|
+
error: `Failed to create LLM instance for model ${model.Name}`,
|
|
625
|
+
modelName: model.Name,
|
|
626
|
+
executionTimeMs: Date.now() - startTime,
|
|
627
|
+
};
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
// Build chat parameters
|
|
631
|
+
const chatParams = new ChatParams();
|
|
632
|
+
chatParams.messages = chatMessages;
|
|
633
|
+
chatParams.model = model.APIName;
|
|
634
|
+
|
|
635
|
+
if (responseFormat) {
|
|
636
|
+
// Cast to valid response format type
|
|
637
|
+
chatParams.responseFormat = responseFormat as 'Any' | 'Text' | 'Markdown' | 'JSON' | 'ModelSpecific';
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
// Execute the chat completion
|
|
641
|
+
const result = await llm.ChatCompletion(chatParams);
|
|
642
|
+
|
|
643
|
+
const executionTime = Date.now() - startTime;
|
|
644
|
+
LogStatus(`=== SIMPLE PROMPT COMPLETED (${executionTime}ms) ===`);
|
|
645
|
+
|
|
646
|
+
// Format and return the result
|
|
647
|
+
return this.formatSimpleResult(result, model, executionTime);
|
|
648
|
+
} catch (error) {
|
|
649
|
+
const executionTime = Date.now() - startTime;
|
|
650
|
+
LogError('Simple prompt execution failed:', undefined, error);
|
|
651
|
+
return {
|
|
652
|
+
success: false,
|
|
653
|
+
error: (error as Error).message || 'Unknown error occurred',
|
|
654
|
+
modelName: 'Unknown',
|
|
655
|
+
executionTimeMs: executionTime,
|
|
656
|
+
};
|
|
577
657
|
}
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
success: false,
|
|
638
|
-
error: `Failed to create LLM instance for model ${model.Name}`,
|
|
639
|
-
modelName: model.Name,
|
|
640
|
-
executionTimeMs: Date.now() - startTime
|
|
641
|
-
};
|
|
642
|
-
}
|
|
643
|
-
|
|
644
|
-
// Build chat parameters
|
|
645
|
-
const chatParams = new ChatParams();
|
|
646
|
-
chatParams.messages = chatMessages;
|
|
647
|
-
chatParams.model = model.APIName;
|
|
648
|
-
|
|
649
|
-
if (responseFormat) {
|
|
650
|
-
// Cast to valid response format type
|
|
651
|
-
chatParams.responseFormat = responseFormat as 'Any' | 'Text' | 'Markdown' | 'JSON' | 'ModelSpecific';
|
|
652
|
-
}
|
|
653
|
-
|
|
654
|
-
// Execute the chat completion
|
|
655
|
-
const result = await llm.ChatCompletion(chatParams);
|
|
656
|
-
|
|
657
|
-
const executionTime = Date.now() - startTime;
|
|
658
|
-
LogStatus(`=== SIMPLE PROMPT COMPLETED (${executionTime}ms) ===`);
|
|
659
|
-
|
|
660
|
-
// Format and return the result
|
|
661
|
-
return this.formatSimpleResult(result, model, executionTime);
|
|
662
|
-
|
|
663
|
-
} catch (error) {
|
|
664
|
-
const executionTime = Date.now() - startTime;
|
|
665
|
-
LogError('Simple prompt execution failed:', undefined, error);
|
|
666
|
-
return {
|
|
667
|
-
success: false,
|
|
668
|
-
error: (error as Error).message || 'Unknown error occurred',
|
|
669
|
-
modelName: 'Unknown',
|
|
670
|
-
executionTimeMs: executionTime
|
|
671
|
-
};
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
/**
|
|
661
|
+
* System user query for executing simple prompts with elevated privileges
|
|
662
|
+
*/
|
|
663
|
+
@RequireSystemUser()
|
|
664
|
+
@Query(() => SimplePromptResult)
|
|
665
|
+
async ExecuteSimplePromptSystemUser(
|
|
666
|
+
@Arg('systemPrompt') systemPrompt: string,
|
|
667
|
+
@Ctx() { userPayload }: { userPayload: UserPayload },
|
|
668
|
+
@Arg('messages', { nullable: true }) messages?: string,
|
|
669
|
+
@Arg('preferredModels', () => [String], { nullable: true }) preferredModels?: string[],
|
|
670
|
+
@Arg('modelPower', { nullable: true }) modelPower?: string,
|
|
671
|
+
@Arg('responseFormat', { nullable: true }) responseFormat?: string
|
|
672
|
+
): Promise<SimplePromptResult> {
|
|
673
|
+
// Reuse the same logic as the regular mutation
|
|
674
|
+
return this.ExecuteSimplePrompt(systemPrompt, { userPayload }, messages, preferredModels, modelPower, responseFormat);
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
/**
|
|
678
|
+
* Generate embeddings for text using local embedding models.
|
|
679
|
+
* Designed for interactive components that need fast similarity calculations.
|
|
680
|
+
*/
|
|
681
|
+
@Mutation(() => EmbedTextResult)
|
|
682
|
+
async EmbedText(
|
|
683
|
+
@Arg('textToEmbed', () => [String]) textToEmbed: string[],
|
|
684
|
+
@Arg('modelSize') modelSize: string,
|
|
685
|
+
@Ctx() { userPayload }: { userPayload: UserPayload }
|
|
686
|
+
): Promise<EmbedTextResult> {
|
|
687
|
+
try {
|
|
688
|
+
LogStatus(`=== GENERATING EMBEDDINGS for ${textToEmbed.length} text(s) ===`);
|
|
689
|
+
|
|
690
|
+
// Get current user
|
|
691
|
+
const currentUser = this.GetUserFromPayload(userPayload);
|
|
692
|
+
if (!currentUser) {
|
|
693
|
+
return {
|
|
694
|
+
embeddings: [],
|
|
695
|
+
modelName: 'Unknown',
|
|
696
|
+
vectorDimensions: 0,
|
|
697
|
+
error: 'Unable to determine current user',
|
|
698
|
+
};
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
// Ensure AI Engine is configured
|
|
702
|
+
await AIEngine.Instance.Config(false, currentUser);
|
|
703
|
+
|
|
704
|
+
// Select embedding model by size
|
|
705
|
+
const model = this.selectEmbeddingModelBySize(modelSize);
|
|
706
|
+
|
|
707
|
+
LogStatus(`Using embedding model: ${model.Name}`);
|
|
708
|
+
|
|
709
|
+
// Process embeddings
|
|
710
|
+
const embeddings: number[][] = [];
|
|
711
|
+
|
|
712
|
+
for (const text of textToEmbed) {
|
|
713
|
+
if (!text || text.trim().length === 0) {
|
|
714
|
+
// Return zero vector for empty text
|
|
715
|
+
embeddings.push([]);
|
|
716
|
+
continue;
|
|
672
717
|
}
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
@Ctx() { userPayload }: { userPayload: UserPayload },
|
|
683
|
-
@Arg('messages', { nullable: true }) messages?: string,
|
|
684
|
-
@Arg('preferredModels', () => [String], { nullable: true }) preferredModels?: string[],
|
|
685
|
-
@Arg('modelPower', { nullable: true }) modelPower?: string,
|
|
686
|
-
@Arg('responseFormat', { nullable: true }) responseFormat?: string
|
|
687
|
-
): Promise<SimplePromptResult> {
|
|
688
|
-
// Reuse the same logic as the regular mutation
|
|
689
|
-
return this.ExecuteSimplePrompt(systemPrompt, { userPayload }, messages, preferredModels, modelPower, responseFormat);
|
|
690
|
-
}
|
|
691
|
-
|
|
692
|
-
/**
|
|
693
|
-
* Generate embeddings for text using local embedding models.
|
|
694
|
-
* Designed for interactive components that need fast similarity calculations.
|
|
695
|
-
*/
|
|
696
|
-
@Mutation(() => EmbedTextResult)
|
|
697
|
-
async EmbedText(
|
|
698
|
-
@Arg('textToEmbed', () => [String]) textToEmbed: string[],
|
|
699
|
-
@Arg('modelSize') modelSize: string,
|
|
700
|
-
@Ctx() { userPayload }: { userPayload: UserPayload }
|
|
701
|
-
): Promise<EmbedTextResult> {
|
|
702
|
-
try {
|
|
703
|
-
LogStatus(`=== GENERATING EMBEDDINGS for ${textToEmbed.length} text(s) ===`);
|
|
704
|
-
|
|
705
|
-
// Get current user
|
|
706
|
-
const currentUser = this.GetUserFromPayload(userPayload);
|
|
707
|
-
if (!currentUser) {
|
|
708
|
-
return {
|
|
709
|
-
embeddings: [],
|
|
710
|
-
modelName: 'Unknown',
|
|
711
|
-
vectorDimensions: 0,
|
|
712
|
-
error: 'Unable to determine current user'
|
|
713
|
-
};
|
|
714
|
-
}
|
|
715
|
-
|
|
716
|
-
// Ensure AI Engine is configured
|
|
717
|
-
await AIEngine.Instance.Config(false, currentUser);
|
|
718
|
-
|
|
719
|
-
// Select embedding model by size
|
|
720
|
-
const model = this.selectEmbeddingModelBySize(modelSize);
|
|
721
|
-
|
|
722
|
-
LogStatus(`Using embedding model: ${model.Name}`);
|
|
723
|
-
|
|
724
|
-
// Process embeddings
|
|
725
|
-
const embeddings: number[][] = [];
|
|
726
|
-
|
|
727
|
-
for (const text of textToEmbed) {
|
|
728
|
-
if (!text || text.trim().length === 0) {
|
|
729
|
-
// Return zero vector for empty text
|
|
730
|
-
embeddings.push([]);
|
|
731
|
-
continue;
|
|
732
|
-
}
|
|
733
|
-
|
|
734
|
-
// Use AIEngine's EmbedText method
|
|
735
|
-
const result = await AIEngine.Instance.EmbedText(model, text);
|
|
736
|
-
|
|
737
|
-
if (result && result.vector && result.vector.length > 0) {
|
|
738
|
-
embeddings.push(result.vector);
|
|
739
|
-
} else {
|
|
740
|
-
LogError(`Failed to generate embedding for text: ${text.substring(0, 50)}...`);
|
|
741
|
-
embeddings.push([]); // Add empty array for failed embeddings
|
|
742
|
-
}
|
|
743
|
-
}
|
|
744
|
-
|
|
745
|
-
// Get vector dimensions from first successful embedding
|
|
746
|
-
const vectorDimensions = embeddings.find(e => e.length > 0)?.length || 0;
|
|
747
|
-
|
|
748
|
-
LogStatus(`=== EMBEDDINGS GENERATED: ${embeddings.length} vectors of ${vectorDimensions} dimensions ===`);
|
|
749
|
-
|
|
750
|
-
return {
|
|
751
|
-
embeddings,
|
|
752
|
-
modelName: model.Name,
|
|
753
|
-
vectorDimensions,
|
|
754
|
-
error: undefined
|
|
755
|
-
};
|
|
756
|
-
|
|
757
|
-
} catch (error) {
|
|
758
|
-
LogError('Embedding generation failed:', undefined, error);
|
|
759
|
-
return {
|
|
760
|
-
embeddings: [],
|
|
761
|
-
modelName: 'Unknown',
|
|
762
|
-
vectorDimensions: 0,
|
|
763
|
-
error: (error as Error).message || 'Unknown error occurred'
|
|
764
|
-
};
|
|
718
|
+
|
|
719
|
+
// Use AIEngine's EmbedText method
|
|
720
|
+
const result = await AIEngine.Instance.EmbedText(model, text);
|
|
721
|
+
|
|
722
|
+
if (result && result.vector && result.vector.length > 0) {
|
|
723
|
+
embeddings.push(result.vector);
|
|
724
|
+
} else {
|
|
725
|
+
LogError(`Failed to generate embedding for text: ${text.substring(0, 50)}...`);
|
|
726
|
+
embeddings.push([]); // Add empty array for failed embeddings
|
|
765
727
|
}
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
// Get vector dimensions from first successful embedding
|
|
731
|
+
const vectorDimensions = embeddings.find((e) => e.length > 0)?.length || 0;
|
|
732
|
+
|
|
733
|
+
LogStatus(`=== EMBEDDINGS GENERATED: ${embeddings.length} vectors of ${vectorDimensions} dimensions ===`);
|
|
734
|
+
|
|
735
|
+
return {
|
|
736
|
+
embeddings,
|
|
737
|
+
modelName: model.Name,
|
|
738
|
+
vectorDimensions,
|
|
739
|
+
error: undefined,
|
|
740
|
+
};
|
|
741
|
+
} catch (error) {
|
|
742
|
+
LogError('Embedding generation failed:', undefined, error);
|
|
743
|
+
return {
|
|
744
|
+
embeddings: [],
|
|
745
|
+
modelName: 'Unknown',
|
|
746
|
+
vectorDimensions: 0,
|
|
747
|
+
error: (error as Error).message || 'Unknown error occurred',
|
|
748
|
+
};
|
|
766
749
|
}
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
):
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
}
|
|
781
|
-
}
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
/**
|
|
753
|
+
* System user query for generating embeddings with elevated privileges
|
|
754
|
+
*/
|
|
755
|
+
@RequireSystemUser()
|
|
756
|
+
@Query(() => EmbedTextResult)
|
|
757
|
+
async EmbedTextSystemUser(
|
|
758
|
+
@Arg('textToEmbed', () => [String]) textToEmbed: string[],
|
|
759
|
+
@Arg('modelSize') modelSize: string,
|
|
760
|
+
@Ctx() { userPayload }: { userPayload: UserPayload }
|
|
761
|
+
): Promise<EmbedTextResult> {
|
|
762
|
+
// Reuse the same logic as the regular mutation
|
|
763
|
+
return this.EmbedText(textToEmbed, modelSize, { userPayload });
|
|
764
|
+
}
|
|
765
|
+
}
|