@memberjunction/server 2.111.1 → 2.112.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. package/dist/agents/skip-agent.d.ts +4 -4
  2. package/dist/agents/skip-agent.d.ts.map +1 -1
  3. package/dist/agents/skip-agent.js +808 -951
  4. package/dist/agents/skip-agent.js.map +1 -1
  5. package/dist/agents/skip-sdk.d.ts +1 -1
  6. package/dist/agents/skip-sdk.d.ts.map +1 -1
  7. package/dist/agents/skip-sdk.js +53 -43
  8. package/dist/agents/skip-sdk.js.map +1 -1
  9. package/dist/apolloServer/index.js +1 -1
  10. package/dist/auth/AuthProviderFactory.d.ts +1 -1
  11. package/dist/auth/AuthProviderFactory.d.ts.map +1 -1
  12. package/dist/auth/AuthProviderFactory.js +1 -3
  13. package/dist/auth/AuthProviderFactory.js.map +1 -1
  14. package/dist/auth/BaseAuthProvider.d.ts +1 -1
  15. package/dist/auth/BaseAuthProvider.d.ts.map +1 -1
  16. package/dist/auth/BaseAuthProvider.js +3 -2
  17. package/dist/auth/BaseAuthProvider.js.map +1 -1
  18. package/dist/auth/IAuthProvider.d.ts +1 -1
  19. package/dist/auth/IAuthProvider.d.ts.map +1 -1
  20. package/dist/auth/exampleNewUserSubClass.d.ts.map +1 -1
  21. package/dist/auth/exampleNewUserSubClass.js +1 -1
  22. package/dist/auth/exampleNewUserSubClass.js.map +1 -1
  23. package/dist/auth/index.d.ts +1 -1
  24. package/dist/auth/index.d.ts.map +1 -1
  25. package/dist/auth/index.js +6 -6
  26. package/dist/auth/index.js.map +1 -1
  27. package/dist/auth/initializeProviders.js +1 -1
  28. package/dist/auth/initializeProviders.js.map +1 -1
  29. package/dist/auth/newUsers.d.ts +1 -1
  30. package/dist/auth/newUsers.d.ts.map +1 -1
  31. package/dist/auth/newUsers.js +7 -7
  32. package/dist/auth/newUsers.js.map +1 -1
  33. package/dist/auth/providers/Auth0Provider.d.ts +1 -1
  34. package/dist/auth/providers/Auth0Provider.d.ts.map +1 -1
  35. package/dist/auth/providers/Auth0Provider.js +1 -1
  36. package/dist/auth/providers/Auth0Provider.js.map +1 -1
  37. package/dist/auth/providers/CognitoProvider.d.ts +1 -1
  38. package/dist/auth/providers/CognitoProvider.d.ts.map +1 -1
  39. package/dist/auth/providers/CognitoProvider.js +3 -6
  40. package/dist/auth/providers/CognitoProvider.js.map +1 -1
  41. package/dist/auth/providers/GoogleProvider.d.ts +1 -1
  42. package/dist/auth/providers/GoogleProvider.d.ts.map +1 -1
  43. package/dist/auth/providers/GoogleProvider.js +1 -1
  44. package/dist/auth/providers/GoogleProvider.js.map +1 -1
  45. package/dist/auth/providers/MSALProvider.d.ts +1 -1
  46. package/dist/auth/providers/MSALProvider.d.ts.map +1 -1
  47. package/dist/auth/providers/MSALProvider.js +1 -1
  48. package/dist/auth/providers/MSALProvider.js.map +1 -1
  49. package/dist/auth/providers/OktaProvider.d.ts +1 -1
  50. package/dist/auth/providers/OktaProvider.d.ts.map +1 -1
  51. package/dist/auth/providers/OktaProvider.js +1 -1
  52. package/dist/auth/providers/OktaProvider.js.map +1 -1
  53. package/dist/config.d.ts.map +1 -1
  54. package/dist/config.js +22 -10
  55. package/dist/config.js.map +1 -1
  56. package/dist/context.d.ts +1 -1
  57. package/dist/context.d.ts.map +1 -1
  58. package/dist/context.js +9 -7
  59. package/dist/context.js.map +1 -1
  60. package/dist/entitySubclasses/entityPermissions.server.d.ts +1 -1
  61. package/dist/entitySubclasses/entityPermissions.server.d.ts.map +1 -1
  62. package/dist/entitySubclasses/entityPermissions.server.js +1 -1
  63. package/dist/entitySubclasses/entityPermissions.server.js.map +1 -1
  64. package/dist/generated/generated.d.ts +648 -648
  65. package/dist/generated/generated.d.ts.map +1 -1
  66. package/dist/generated/generated.js +2986 -1133
  67. package/dist/generated/generated.js.map +1 -1
  68. package/dist/generic/KeyInputOutputTypes.d.ts +1 -1
  69. package/dist/generic/KeyInputOutputTypes.d.ts.map +1 -1
  70. package/dist/generic/KeyInputOutputTypes.js +1 -1
  71. package/dist/generic/KeyInputOutputTypes.js.map +1 -1
  72. package/dist/generic/ResolverBase.d.ts +1 -1
  73. package/dist/generic/ResolverBase.d.ts.map +1 -1
  74. package/dist/generic/ResolverBase.js +15 -10
  75. package/dist/generic/ResolverBase.js.map +1 -1
  76. package/dist/generic/RunViewResolver.d.ts +1 -1
  77. package/dist/generic/RunViewResolver.d.ts.map +1 -1
  78. package/dist/generic/RunViewResolver.js +15 -15
  79. package/dist/generic/RunViewResolver.js.map +1 -1
  80. package/dist/index.d.ts.map +1 -1
  81. package/dist/index.js +18 -9
  82. package/dist/index.js.map +1 -1
  83. package/dist/resolvers/ActionResolver.d.ts +2 -2
  84. package/dist/resolvers/ActionResolver.d.ts.map +1 -1
  85. package/dist/resolvers/ActionResolver.js +28 -30
  86. package/dist/resolvers/ActionResolver.js.map +1 -1
  87. package/dist/resolvers/AskSkipResolver.d.ts +2 -2
  88. package/dist/resolvers/AskSkipResolver.d.ts.map +1 -1
  89. package/dist/resolvers/AskSkipResolver.js +60 -50
  90. package/dist/resolvers/AskSkipResolver.js.map +1 -1
  91. package/dist/resolvers/ComponentRegistryResolver.d.ts.map +1 -1
  92. package/dist/resolvers/ComponentRegistryResolver.js +36 -38
  93. package/dist/resolvers/ComponentRegistryResolver.js.map +1 -1
  94. package/dist/resolvers/CreateQueryResolver.d.ts +1 -1
  95. package/dist/resolvers/CreateQueryResolver.d.ts.map +1 -1
  96. package/dist/resolvers/CreateQueryResolver.js +43 -40
  97. package/dist/resolvers/CreateQueryResolver.js.map +1 -1
  98. package/dist/resolvers/DatasetResolver.d.ts.map +1 -1
  99. package/dist/resolvers/DatasetResolver.js +1 -1
  100. package/dist/resolvers/DatasetResolver.js.map +1 -1
  101. package/dist/resolvers/EntityRecordNameResolver.d.ts +1 -1
  102. package/dist/resolvers/EntityRecordNameResolver.d.ts.map +1 -1
  103. package/dist/resolvers/EntityRecordNameResolver.js +1 -1
  104. package/dist/resolvers/EntityRecordNameResolver.js.map +1 -1
  105. package/dist/resolvers/EntityResolver.d.ts.map +1 -1
  106. package/dist/resolvers/EntityResolver.js +1 -1
  107. package/dist/resolvers/EntityResolver.js.map +1 -1
  108. package/dist/resolvers/FileCategoryResolver.js +1 -1
  109. package/dist/resolvers/FileCategoryResolver.js.map +1 -1
  110. package/dist/resolvers/FileResolver.js +1 -1
  111. package/dist/resolvers/FileResolver.js.map +1 -1
  112. package/dist/resolvers/GetDataContextDataResolver.d.ts +1 -1
  113. package/dist/resolvers/GetDataContextDataResolver.d.ts.map +1 -1
  114. package/dist/resolvers/GetDataContextDataResolver.js +5 -5
  115. package/dist/resolvers/GetDataContextDataResolver.js.map +1 -1
  116. package/dist/resolvers/GetDataResolver.d.ts.map +1 -1
  117. package/dist/resolvers/GetDataResolver.js +8 -6
  118. package/dist/resolvers/GetDataResolver.js.map +1 -1
  119. package/dist/resolvers/MergeRecordsResolver.d.ts +3 -3
  120. package/dist/resolvers/MergeRecordsResolver.d.ts.map +1 -1
  121. package/dist/resolvers/MergeRecordsResolver.js +3 -3
  122. package/dist/resolvers/MergeRecordsResolver.js.map +1 -1
  123. package/dist/resolvers/PotentialDuplicateRecordResolver.d.ts +1 -1
  124. package/dist/resolvers/PotentialDuplicateRecordResolver.d.ts.map +1 -1
  125. package/dist/resolvers/PotentialDuplicateRecordResolver.js +1 -1
  126. package/dist/resolvers/PotentialDuplicateRecordResolver.js.map +1 -1
  127. package/dist/resolvers/QueryResolver.d.ts.map +1 -1
  128. package/dist/resolvers/QueryResolver.js +11 -11
  129. package/dist/resolvers/QueryResolver.js.map +1 -1
  130. package/dist/resolvers/ReportResolver.js +1 -1
  131. package/dist/resolvers/ReportResolver.js.map +1 -1
  132. package/dist/resolvers/RunAIAgentResolver.d.ts.map +1 -1
  133. package/dist/resolvers/RunAIAgentResolver.js +27 -28
  134. package/dist/resolvers/RunAIAgentResolver.js.map +1 -1
  135. package/dist/resolvers/RunAIPromptResolver.d.ts.map +1 -1
  136. package/dist/resolvers/RunAIPromptResolver.js +31 -31
  137. package/dist/resolvers/RunAIPromptResolver.js.map +1 -1
  138. package/dist/resolvers/RunTemplateResolver.d.ts.map +1 -1
  139. package/dist/resolvers/RunTemplateResolver.js +9 -9
  140. package/dist/resolvers/RunTemplateResolver.js.map +1 -1
  141. package/dist/resolvers/SqlLoggingConfigResolver.d.ts.map +1 -1
  142. package/dist/resolvers/SqlLoggingConfigResolver.js +10 -10
  143. package/dist/resolvers/SqlLoggingConfigResolver.js.map +1 -1
  144. package/dist/resolvers/SyncDataResolver.d.ts +1 -1
  145. package/dist/resolvers/SyncDataResolver.d.ts.map +1 -1
  146. package/dist/resolvers/SyncDataResolver.js +15 -14
  147. package/dist/resolvers/SyncDataResolver.js.map +1 -1
  148. package/dist/resolvers/SyncRolesUsersResolver.d.ts +1 -1
  149. package/dist/resolvers/SyncRolesUsersResolver.d.ts.map +1 -1
  150. package/dist/resolvers/SyncRolesUsersResolver.js +48 -44
  151. package/dist/resolvers/SyncRolesUsersResolver.js.map +1 -1
  152. package/dist/resolvers/TaskResolver.d.ts.map +1 -1
  153. package/dist/resolvers/TaskResolver.js +7 -7
  154. package/dist/resolvers/TaskResolver.js.map +1 -1
  155. package/dist/resolvers/TransactionGroupResolver.d.ts +1 -1
  156. package/dist/resolvers/TransactionGroupResolver.d.ts.map +1 -1
  157. package/dist/resolvers/TransactionGroupResolver.js +12 -12
  158. package/dist/resolvers/TransactionGroupResolver.js.map +1 -1
  159. package/dist/resolvers/UserFavoriteResolver.d.ts +1 -1
  160. package/dist/resolvers/UserFavoriteResolver.d.ts.map +1 -1
  161. package/dist/resolvers/UserFavoriteResolver.js +1 -1
  162. package/dist/resolvers/UserFavoriteResolver.js.map +1 -1
  163. package/dist/resolvers/UserViewResolver.d.ts.map +1 -1
  164. package/dist/resolvers/UserViewResolver.js.map +1 -1
  165. package/dist/rest/EntityCRUDHandler.d.ts +1 -1
  166. package/dist/rest/EntityCRUDHandler.d.ts.map +1 -1
  167. package/dist/rest/EntityCRUDHandler.js +14 -16
  168. package/dist/rest/EntityCRUDHandler.js.map +1 -1
  169. package/dist/rest/RESTEndpointHandler.d.ts.map +1 -1
  170. package/dist/rest/RESTEndpointHandler.js +23 -25
  171. package/dist/rest/RESTEndpointHandler.js.map +1 -1
  172. package/dist/rest/ViewOperationsHandler.d.ts +1 -1
  173. package/dist/rest/ViewOperationsHandler.d.ts.map +1 -1
  174. package/dist/rest/ViewOperationsHandler.js +17 -21
  175. package/dist/rest/ViewOperationsHandler.js.map +1 -1
  176. package/dist/scheduler/LearningCycleScheduler.d.ts.map +1 -1
  177. package/dist/scheduler/LearningCycleScheduler.js.map +1 -1
  178. package/dist/services/ScheduledJobsService.d.ts.map +1 -1
  179. package/dist/services/ScheduledJobsService.js +4 -6
  180. package/dist/services/ScheduledJobsService.js.map +1 -1
  181. package/dist/services/TaskOrchestrator.d.ts +1 -1
  182. package/dist/services/TaskOrchestrator.d.ts.map +1 -1
  183. package/dist/services/TaskOrchestrator.js +30 -30
  184. package/dist/services/TaskOrchestrator.js.map +1 -1
  185. package/dist/types.d.ts +3 -3
  186. package/dist/types.d.ts.map +1 -1
  187. package/dist/types.js +0 -1
  188. package/dist/types.js.map +1 -1
  189. package/dist/util.d.ts +1 -1
  190. package/dist/util.d.ts.map +1 -1
  191. package/dist/util.js +2 -2
  192. package/dist/util.js.map +1 -1
  193. package/package.json +36 -37
  194. package/src/agents/skip-agent.ts +1067 -1200
  195. package/src/agents/skip-sdk.ts +877 -851
  196. package/src/apolloServer/index.ts +2 -2
  197. package/src/auth/AuthProviderFactory.ts +8 -14
  198. package/src/auth/BaseAuthProvider.ts +5 -4
  199. package/src/auth/IAuthProvider.ts +2 -2
  200. package/src/auth/exampleNewUserSubClass.ts +9 -2
  201. package/src/auth/index.ts +31 -26
  202. package/src/auth/initializeProviders.ts +3 -3
  203. package/src/auth/newUsers.ts +166 -134
  204. package/src/auth/providers/Auth0Provider.ts +5 -5
  205. package/src/auth/providers/CognitoProvider.ts +7 -10
  206. package/src/auth/providers/GoogleProvider.ts +4 -5
  207. package/src/auth/providers/MSALProvider.ts +5 -5
  208. package/src/auth/providers/OktaProvider.ts +6 -7
  209. package/src/config.ts +63 -54
  210. package/src/context.ts +42 -30
  211. package/src/entitySubclasses/entityPermissions.server.ts +3 -3
  212. package/src/generated/generated.ts +48130 -39930
  213. package/src/generic/KeyInputOutputTypes.ts +3 -6
  214. package/src/generic/ResolverBase.ts +119 -78
  215. package/src/generic/RunViewResolver.ts +27 -23
  216. package/src/index.ts +66 -42
  217. package/src/resolvers/ActionResolver.ts +46 -57
  218. package/src/resolvers/AskSkipResolver.ts +607 -533
  219. package/src/resolvers/ComponentRegistryResolver.ts +547 -562
  220. package/src/resolvers/CreateQueryResolver.ts +683 -655
  221. package/src/resolvers/DatasetResolver.ts +5 -6
  222. package/src/resolvers/EntityCommunicationsResolver.ts +1 -1
  223. package/src/resolvers/EntityRecordNameResolver.ts +9 -5
  224. package/src/resolvers/EntityResolver.ts +9 -7
  225. package/src/resolvers/FileCategoryResolver.ts +2 -2
  226. package/src/resolvers/FileResolver.ts +4 -4
  227. package/src/resolvers/GetDataContextDataResolver.ts +106 -118
  228. package/src/resolvers/GetDataResolver.ts +194 -205
  229. package/src/resolvers/MergeRecordsResolver.ts +5 -5
  230. package/src/resolvers/PotentialDuplicateRecordResolver.ts +1 -1
  231. package/src/resolvers/QueryResolver.ts +95 -78
  232. package/src/resolvers/ReportResolver.ts +2 -2
  233. package/src/resolvers/RunAIAgentResolver.ts +818 -828
  234. package/src/resolvers/RunAIPromptResolver.ts +693 -709
  235. package/src/resolvers/RunTemplateResolver.ts +105 -103
  236. package/src/resolvers/SqlLoggingConfigResolver.ts +69 -72
  237. package/src/resolvers/SyncDataResolver.ts +386 -352
  238. package/src/resolvers/SyncRolesUsersResolver.ts +387 -350
  239. package/src/resolvers/TaskResolver.ts +110 -115
  240. package/src/resolvers/TransactionGroupResolver.ts +143 -138
  241. package/src/resolvers/UserFavoriteResolver.ts +17 -8
  242. package/src/resolvers/UserViewResolver.ts +17 -12
  243. package/src/rest/EntityCRUDHandler.ts +291 -268
  244. package/src/rest/RESTEndpointHandler.ts +782 -776
  245. package/src/rest/ViewOperationsHandler.ts +191 -195
  246. package/src/scheduler/LearningCycleScheduler.ts +8 -52
  247. package/src/services/ScheduledJobsService.ts +129 -132
  248. package/src/services/TaskOrchestrator.ts +792 -776
  249. package/src/types.ts +15 -9
  250. package/src/util.ts +112 -109
@@ -1,6 +1,6 @@
1
1
  import { Resolver, Mutation, Query, Arg, Ctx, ObjectType, Field, Int } from 'type-graphql';
2
2
  import { AppContext, UserPayload } from '../types.js';
3
- import { DatabaseProviderBase, LogError, LogStatus, Metadata } from '@memberjunction/core';
3
+ import { DatabaseProviderBase, LogError, LogStatus, Metadata } from '@memberjunction/global';
4
4
  import { AIPromptEntityExtended, AIModelEntityExtended } from '@memberjunction/core-entities';
5
5
  import { AIPromptRunner } from '@memberjunction/ai-prompts';
6
6
  import { AIPromptParams } from '@memberjunction/ai-core-plus';
@@ -13,769 +13,753 @@ import { GetReadWriteProvider } from '../util.js';
13
13
 
14
14
  @ObjectType()
15
15
  export class AIPromptRunResult {
16
- @Field()
17
- success: boolean;
16
+ @Field()
17
+ success: boolean;
18
18
 
19
- @Field({ nullable: true })
20
- output?: string;
19
+ @Field({ nullable: true })
20
+ output?: string;
21
21
 
22
- @Field({ nullable: true })
23
- parsedResult?: string;
22
+ @Field({ nullable: true })
23
+ parsedResult?: string;
24
24
 
25
- @Field({ nullable: true })
26
- error?: string;
25
+ @Field({ nullable: true })
26
+ error?: string;
27
27
 
28
- @Field({ nullable: true })
29
- executionTimeMs?: number;
28
+ @Field({ nullable: true })
29
+ executionTimeMs?: number;
30
30
 
31
- @Field({ nullable: true })
32
- tokensUsed?: number;
31
+ @Field({ nullable: true })
32
+ tokensUsed?: number;
33
33
 
34
- @Field({ nullable: true })
35
- promptRunId?: string;
34
+ @Field({ nullable: true })
35
+ promptRunId?: string;
36
36
 
37
- @Field({ nullable: true })
38
- rawResult?: string;
37
+ @Field({ nullable: true })
38
+ rawResult?: string;
39
39
 
40
- @Field({ nullable: true })
41
- validationResult?: string;
40
+ @Field({ nullable: true })
41
+ validationResult?: string;
42
42
 
43
- @Field({ nullable: true })
44
- chatResult?: string;
43
+ @Field({ nullable: true })
44
+ chatResult?: string;
45
45
  }
46
46
 
47
47
  @ObjectType()
48
48
  export class SimplePromptResult {
49
- @Field()
50
- success: boolean;
49
+ @Field()
50
+ success: boolean;
51
51
 
52
- @Field({ nullable: true })
53
- result?: string;
52
+ @Field({ nullable: true })
53
+ result?: string;
54
54
 
55
- @Field({ nullable: true })
56
- resultObject?: string; // JSON stringified object
55
+ @Field({ nullable: true })
56
+ resultObject?: string; // JSON stringified object
57
57
 
58
- @Field()
59
- modelName: string;
58
+ @Field()
59
+ modelName: string;
60
60
 
61
- @Field({ nullable: true })
62
- error?: string;
61
+ @Field({ nullable: true })
62
+ error?: string;
63
63
 
64
- @Field({ nullable: true })
65
- executionTimeMs?: number;
64
+ @Field({ nullable: true })
65
+ executionTimeMs?: number;
66
66
  }
67
67
 
68
68
  @ObjectType()
69
69
  export class EmbedTextResult {
70
- @Field(() => [[Number]])
71
- embeddings: number[][];
70
+ @Field(() => [[Number]])
71
+ embeddings: number[][];
72
72
 
73
- @Field()
74
- modelName: string;
73
+ @Field()
74
+ modelName: string;
75
75
 
76
- @Field(() => Int)
77
- vectorDimensions: number;
76
+ @Field(() => Int)
77
+ vectorDimensions: number;
78
78
 
79
- @Field({ nullable: true })
80
- error?: string;
79
+ @Field({ nullable: true })
80
+ error?: string;
81
81
  }
82
82
 
83
83
  @Resolver()
84
84
  export class RunAIPromptResolver extends ResolverBase {
85
- /**
86
- * Internal method that handles the core AI prompt execution logic.
87
- * This method is called by both the regular and system user resolvers.
88
- * @private
89
- */
90
- private async executeAIPrompt(
91
- p: DatabaseProviderBase,
92
- promptId: string,
93
- userPayload: UserPayload,
94
- data?: string,
95
- overrideModelId?: string,
96
- overrideVendorId?: string,
97
- configurationId?: string,
98
- skipValidation?: boolean,
99
- templateData?: string,
100
- responseFormat?: string,
101
- temperature?: number,
102
- topP?: number,
103
- topK?: number,
104
- minP?: number,
105
- frequencyPenalty?: number,
106
- presencePenalty?: number,
107
- seed?: number,
108
- stopSequences?: string[],
109
- includeLogProbs?: boolean,
110
- topLogProbs?: number,
111
- messages?: string,
112
- rerunFromPromptRunID?: string,
113
- systemPromptOverride?: string
114
- ): Promise<AIPromptRunResult> {
115
- const startTime = Date.now();
116
-
85
+ /**
86
+ * Internal method that handles the core AI prompt execution logic.
87
+ * This method is called by both the regular and system user resolvers.
88
+ * @private
89
+ */
90
+ private async executeAIPrompt(
91
+ p: DatabaseProviderBase,
92
+ promptId: string,
93
+ userPayload: UserPayload,
94
+ data?: string,
95
+ overrideModelId?: string,
96
+ overrideVendorId?: string,
97
+ configurationId?: string,
98
+ skipValidation?: boolean,
99
+ templateData?: string,
100
+ responseFormat?: string,
101
+ temperature?: number,
102
+ topP?: number,
103
+ topK?: number,
104
+ minP?: number,
105
+ frequencyPenalty?: number,
106
+ presencePenalty?: number,
107
+ seed?: number,
108
+ stopSequences?: string[],
109
+ includeLogProbs?: boolean,
110
+ topLogProbs?: number,
111
+ messages?: string,
112
+ rerunFromPromptRunID?: string,
113
+ systemPromptOverride?: string
114
+ ): Promise<AIPromptRunResult> {
115
+ const startTime = Date.now();
116
+
117
+ try {
118
+ LogStatus(`=== RUNNING AI PROMPT FOR ID: ${promptId} ===`);
119
+
120
+ // Parse data contexts (JSON strings)
121
+ let parsedData = {};
122
+ let parsedTemplateData = {};
123
+
124
+ if (data) {
117
125
  try {
118
- LogStatus(`=== RUNNING AI PROMPT FOR ID: ${promptId} ===`);
119
-
120
- // Parse data contexts (JSON strings)
121
- let parsedData = {};
122
- let parsedTemplateData = {};
123
-
124
- if (data) {
125
- try {
126
- parsedData = JSON.parse(data);
127
- } catch (parseError) {
128
- return {
129
- success: false,
130
- error: `Invalid JSON in data: ${(parseError as Error).message}`,
131
- executionTimeMs: Date.now() - startTime
132
- };
133
- }
134
- }
126
+ parsedData = JSON.parse(data);
127
+ } catch (parseError) {
128
+ return {
129
+ success: false,
130
+ error: `Invalid JSON in data: ${(parseError as Error).message}`,
131
+ executionTimeMs: Date.now() - startTime,
132
+ };
133
+ }
134
+ }
135
135
 
136
- if (templateData) {
137
- try {
138
- parsedTemplateData = JSON.parse(templateData);
139
- } catch (parseError) {
140
- return {
141
- success: false,
142
- error: `Invalid JSON in template data: ${(parseError as Error).message}`,
143
- executionTimeMs: Date.now() - startTime
144
- };
145
- }
146
- }
136
+ if (templateData) {
137
+ try {
138
+ parsedTemplateData = JSON.parse(templateData);
139
+ } catch (parseError) {
140
+ return {
141
+ success: false,
142
+ error: `Invalid JSON in template data: ${(parseError as Error).message}`,
143
+ executionTimeMs: Date.now() - startTime,
144
+ };
145
+ }
146
+ }
147
147
 
148
- // Get current user from payload
149
- const currentUser = this.GetUserFromPayload(userPayload);
150
- if (!currentUser) {
151
- return {
152
- success: false,
153
- error: 'Unable to determine current user',
154
- executionTimeMs: Date.now() - startTime
155
- };
156
- }
148
+ // Get current user from payload
149
+ const currentUser = this.GetUserFromPayload(userPayload);
150
+ if (!currentUser) {
151
+ return {
152
+ success: false,
153
+ error: 'Unable to determine current user',
154
+ executionTimeMs: Date.now() - startTime,
155
+ };
156
+ }
157
157
 
158
- // Load the AI prompt entity
159
- const promptEntity = await p.GetEntityObject<AIPromptEntityExtended>('AI Prompts', currentUser);
160
- await promptEntity.Load(promptId);
161
-
162
- if (!promptEntity.IsSaved) {
163
- return {
164
- success: false,
165
- error: `AI Prompt with ID ${promptId} not found`,
166
- executionTimeMs: Date.now() - startTime
167
- };
168
- }
158
+ // Load the AI prompt entity
159
+ const promptEntity = await p.GetEntityObject<AIPromptEntityExtended>('AI Prompts', currentUser);
160
+ await promptEntity.Load(promptId);
169
161
 
170
- // Check if prompt is active
171
- if (promptEntity.Status !== 'Active') {
172
- return {
173
- success: false,
174
- error: `AI Prompt "${promptEntity.Name}" is not active (Status: ${promptEntity.Status})`,
175
- executionTimeMs: Date.now() - startTime
176
- };
177
- }
162
+ if (!promptEntity.IsSaved) {
163
+ return {
164
+ success: false,
165
+ error: `AI Prompt with ID ${promptId} not found`,
166
+ executionTimeMs: Date.now() - startTime,
167
+ };
168
+ }
178
169
 
179
- // Create AI prompt runner and execute
180
- const promptRunner = new AIPromptRunner();
181
-
182
- // Build execution parameters
183
- const promptParams = new AIPromptParams();
184
- promptParams.prompt = promptEntity;
185
- promptParams.data = parsedData;
186
- promptParams.templateData = parsedTemplateData;
187
- promptParams.configurationId = configurationId;
188
- promptParams.contextUser = currentUser;
189
- promptParams.skipValidation = skipValidation || false;
190
- promptParams.rerunFromPromptRunID = rerunFromPromptRunID;
191
- promptParams.systemPromptOverride = systemPromptOverride;
192
-
193
- // Set override if model or vendor ID provided
194
- if (overrideModelId || overrideVendorId) {
195
- promptParams.override = {
196
- modelId: overrideModelId,
197
- vendorId: overrideVendorId
198
- };
199
- }
200
-
201
- // Parse and set conversation messages if provided
202
- if (messages) {
203
- try {
204
- promptParams.conversationMessages = JSON.parse(messages);
205
- } catch (parseError) {
206
- // If parsing fails, treat as a simple user message
207
- promptParams.conversationMessages = [{
208
- role: 'user',
209
- content: messages
210
- }];
211
- }
212
- }
213
-
214
- // If responseFormat is provided, override the prompt's default response format
215
- if (responseFormat) {
216
- // We'll need to override the prompt's response format setting
217
- // This will be handled in the AIPromptRunner when it builds the ChatParams
218
- promptEntity.ResponseFormat = responseFormat as any;
219
- }
170
+ // Check if prompt is active
171
+ if (promptEntity.Status !== 'Active') {
172
+ return {
173
+ success: false,
174
+ error: `AI Prompt "${promptEntity.Name}" is not active (Status: ${promptEntity.Status})`,
175
+ executionTimeMs: Date.now() - startTime,
176
+ };
177
+ }
178
+
179
+ // Create AI prompt runner and execute
180
+ const promptRunner = new AIPromptRunner();
181
+
182
+ // Build execution parameters
183
+ const promptParams = new AIPromptParams();
184
+ promptParams.prompt = promptEntity;
185
+ promptParams.data = parsedData;
186
+ promptParams.templateData = parsedTemplateData;
187
+ promptParams.configurationId = configurationId;
188
+ promptParams.contextUser = currentUser;
189
+ promptParams.skipValidation = skipValidation || false;
190
+ promptParams.rerunFromPromptRunID = rerunFromPromptRunID;
191
+ promptParams.systemPromptOverride = systemPromptOverride;
192
+
193
+ // Set override if model or vendor ID provided
194
+ if (overrideModelId || overrideVendorId) {
195
+ promptParams.override = {
196
+ modelId: overrideModelId,
197
+ vendorId: overrideVendorId,
198
+ };
199
+ }
220
200
 
221
- // Build additional parameters for chat-specific settings
222
- const additionalParams: Record<string, any> = {};
223
- if (temperature != null) additionalParams.temperature = temperature;
224
- if (topP != null) additionalParams.topP = topP;
225
- if (topK != null) additionalParams.topK = topK;
226
- if (minP != null) additionalParams.minP = minP;
227
- if (frequencyPenalty != null) additionalParams.frequencyPenalty = frequencyPenalty;
228
- if (presencePenalty != null) additionalParams.presencePenalty = presencePenalty;
229
- if (seed != null) additionalParams.seed = seed;
230
- if (stopSequences != null) additionalParams.stopSequences = stopSequences;
231
- if (includeLogProbs != null) additionalParams.includeLogProbs = includeLogProbs;
232
- if (topLogProbs != null) additionalParams.topLogProbs = topLogProbs;
233
-
234
- // Only set additionalParameters if we have any
235
- if (Object.keys(additionalParams).length > 0) {
236
- promptParams.additionalParameters = additionalParams;
237
- }
201
+ // Parse and set conversation messages if provided
202
+ if (messages) {
203
+ try {
204
+ promptParams.conversationMessages = JSON.parse(messages);
205
+ } catch (parseError) {
206
+ // If parsing fails, treat as a simple user message
207
+ promptParams.conversationMessages = [
208
+ {
209
+ role: 'user',
210
+ content: messages,
211
+ },
212
+ ];
213
+ }
214
+ }
215
+
216
+ // If responseFormat is provided, override the prompt's default response format
217
+ if (responseFormat) {
218
+ // We'll need to override the prompt's response format setting
219
+ // This will be handled in the AIPromptRunner when it builds the ChatParams
220
+ promptEntity.ResponseFormat = responseFormat as any;
221
+ }
222
+
223
+ // Build additional parameters for chat-specific settings
224
+ const additionalParams: Record<string, any> = {};
225
+ if (temperature != null) additionalParams.temperature = temperature;
226
+ if (topP != null) additionalParams.topP = topP;
227
+ if (topK != null) additionalParams.topK = topK;
228
+ if (minP != null) additionalParams.minP = minP;
229
+ if (frequencyPenalty != null) additionalParams.frequencyPenalty = frequencyPenalty;
230
+ if (presencePenalty != null) additionalParams.presencePenalty = presencePenalty;
231
+ if (seed != null) additionalParams.seed = seed;
232
+ if (stopSequences != null) additionalParams.stopSequences = stopSequences;
233
+ if (includeLogProbs != null) additionalParams.includeLogProbs = includeLogProbs;
234
+ if (topLogProbs != null) additionalParams.topLogProbs = topLogProbs;
235
+
236
+ // Only set additionalParameters if we have any
237
+ if (Object.keys(additionalParams).length > 0) {
238
+ promptParams.additionalParameters = additionalParams;
239
+ }
240
+
241
+ // Execute the prompt
242
+ const result = await promptRunner.ExecutePrompt(promptParams);
243
+
244
+ const executionTime = Date.now() - startTime;
245
+
246
+ if (result.success) {
247
+ LogStatus(`=== AI PROMPT RUN COMPLETED FOR: ${promptEntity.Name} (${executionTime}ms) ===`);
238
248
 
239
- // Execute the prompt
240
- const result = await promptRunner.ExecutePrompt(promptParams);
241
-
242
- const executionTime = Date.now() - startTime;
243
-
244
- if (result.success) {
245
- LogStatus(`=== AI PROMPT RUN COMPLETED FOR: ${promptEntity.Name} (${executionTime}ms) ===`);
246
-
247
- return {
248
- success: true,
249
- output: result.rawResult,
250
- parsedResult: typeof result.result === 'string' ? result.result : JSON.stringify(result.result),
251
- rawResult: result.rawResult,
252
- executionTimeMs: executionTime,
253
- tokensUsed: result.tokensUsed,
254
- promptRunId: result.promptRun?.ID,
255
- validationResult: result.validationResult ? JSON.stringify(result.validationResult) : undefined,
256
- chatResult: result.chatResult ? JSON.stringify(result.chatResult) : undefined
257
- };
258
- } else {
259
- LogError(`AI Prompt run failed for ${promptEntity.Name}: ${result.errorMessage}`);
260
- return {
261
- success: false,
262
- error: result.errorMessage,
263
- executionTimeMs: executionTime,
264
- promptRunId: result.promptRun?.ID,
265
- chatResult: result.chatResult ? JSON.stringify(result.chatResult) : undefined
266
- };
267
- }
249
+ return {
250
+ success: true,
251
+ output: result.rawResult,
252
+ parsedResult: typeof result.result === 'string' ? result.result : JSON.stringify(result.result),
253
+ rawResult: result.rawResult,
254
+ executionTimeMs: executionTime,
255
+ tokensUsed: result.tokensUsed,
256
+ promptRunId: result.promptRun?.ID,
257
+ validationResult: result.validationResult ? JSON.stringify(result.validationResult) : undefined,
258
+ chatResult: result.chatResult ? JSON.stringify(result.chatResult) : undefined,
259
+ };
260
+ } else {
261
+ LogError(`AI Prompt run failed for ${promptEntity.Name}: ${result.errorMessage}`);
262
+ return {
263
+ success: false,
264
+ error: result.errorMessage,
265
+ executionTimeMs: executionTime,
266
+ promptRunId: result.promptRun?.ID,
267
+ chatResult: result.chatResult ? JSON.stringify(result.chatResult) : undefined,
268
+ };
269
+ }
270
+ } catch (error) {
271
+ const executionTime = Date.now() - startTime;
272
+ LogError(`AI Prompt run failed:`, undefined, error);
273
+ return {
274
+ success: false,
275
+ error: (error as Error).message || 'Unknown error occurred',
276
+ executionTimeMs: executionTime,
277
+ };
278
+ }
279
+ }
280
+
281
+ /**
282
+ * Public mutation for regular users to run AI prompts with authentication.
283
+ */
284
+ @Mutation(() => AIPromptRunResult)
285
+ async RunAIPrompt(
286
+ @Arg('promptId') promptId: string,
287
+ @Ctx() { userPayload, providers }: AppContext,
288
+ @Arg('data', { nullable: true }) data?: string,
289
+ @Arg('overrideModelId', { nullable: true }) overrideModelId?: string,
290
+ @Arg('overrideVendorId', { nullable: true }) overrideVendorId?: string,
291
+ @Arg('configurationId', { nullable: true }) configurationId?: string,
292
+ @Arg('skipValidation', { nullable: true }) skipValidation?: boolean,
293
+ @Arg('templateData', { nullable: true }) templateData?: string,
294
+ @Arg('responseFormat', { nullable: true }) responseFormat?: string,
295
+ @Arg('temperature', { nullable: true }) temperature?: number,
296
+ @Arg('topP', { nullable: true }) topP?: number,
297
+ @Arg('topK', () => Int, { nullable: true }) topK?: number,
298
+ @Arg('minP', { nullable: true }) minP?: number,
299
+ @Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
300
+ @Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
301
+ @Arg('seed', () => Int, { nullable: true }) seed?: number,
302
+ @Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
303
+ @Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
304
+ @Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
305
+ @Arg('messages', { nullable: true }) messages?: string,
306
+ @Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
307
+ @Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
308
+ ): Promise<AIPromptRunResult> {
309
+ const p = GetReadWriteProvider(providers);
310
+ return this.executeAIPrompt(
311
+ p,
312
+ promptId,
313
+ userPayload,
314
+ data,
315
+ overrideModelId,
316
+ overrideVendorId,
317
+ configurationId,
318
+ skipValidation,
319
+ templateData,
320
+ responseFormat,
321
+ temperature,
322
+ topP,
323
+ topK,
324
+ minP,
325
+ frequencyPenalty,
326
+ presencePenalty,
327
+ seed,
328
+ stopSequences,
329
+ includeLogProbs,
330
+ topLogProbs,
331
+ messages,
332
+ rerunFromPromptRunID,
333
+ systemPromptOverride
334
+ );
335
+ }
336
+
337
+ /**
338
+ * System user query for running AI prompts with elevated privileges.
339
+ * Requires the @RequireSystemUser decorator to ensure only system users can access.
340
+ */
341
+ @RequireSystemUser()
342
+ @Query(() => AIPromptRunResult)
343
+ async RunAIPromptSystemUser(
344
+ @Arg('promptId') promptId: string,
345
+ @Ctx() { userPayload, providers }: AppContext,
346
+ @Arg('data', { nullable: true }) data?: string,
347
+ @Arg('overrideModelId', { nullable: true }) overrideModelId?: string,
348
+ @Arg('overrideVendorId', { nullable: true }) overrideVendorId?: string,
349
+ @Arg('configurationId', { nullable: true }) configurationId?: string,
350
+ @Arg('skipValidation', { nullable: true }) skipValidation?: boolean,
351
+ @Arg('templateData', { nullable: true }) templateData?: string,
352
+ @Arg('responseFormat', { nullable: true }) responseFormat?: string,
353
+ @Arg('temperature', { nullable: true }) temperature?: number,
354
+ @Arg('topP', { nullable: true }) topP?: number,
355
+ @Arg('topK', () => Int, { nullable: true }) topK?: number,
356
+ @Arg('minP', { nullable: true }) minP?: number,
357
+ @Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
358
+ @Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
359
+ @Arg('seed', () => Int, { nullable: true }) seed?: number,
360
+ @Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
361
+ @Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
362
+ @Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
363
+ @Arg('messages', { nullable: true }) messages?: string,
364
+ @Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
365
+ @Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
366
+ ): Promise<AIPromptRunResult> {
367
+ const p = GetReadWriteProvider(providers);
368
+ return this.executeAIPrompt(
369
+ p,
370
+ promptId,
371
+ userPayload,
372
+ data,
373
+ overrideModelId,
374
+ overrideVendorId,
375
+ configurationId,
376
+ skipValidation,
377
+ templateData,
378
+ responseFormat,
379
+ temperature,
380
+ topP,
381
+ topK,
382
+ minP,
383
+ frequencyPenalty,
384
+ presencePenalty,
385
+ seed,
386
+ stopSequences,
387
+ includeLogProbs,
388
+ topLogProbs,
389
+ messages,
390
+ rerunFromPromptRunID,
391
+ systemPromptOverride
392
+ );
393
+ }
394
+
395
+ /**
396
+ * Helper method to select a model for simple prompt execution based on preferences or power level
397
+ * @private
398
+ */
399
+ private async selectModelForSimplePrompt(
400
+ preferredModels: string[] | undefined,
401
+ modelPower: string,
402
+ contextUser: any
403
+ ): Promise<AIModelEntityExtended> {
404
+ // Ensure AI Engine is configured
405
+ await AIEngine.Instance.Config(false, contextUser);
406
+
407
+ // Get all LLM models that have API keys
408
+ const allModels = AIEngine.Instance.Models.filter((m) => m.AIModelType?.trim().toLowerCase() === 'llm' && m.IsActive === true);
409
+
410
+ // Filter to only models with valid API keys
411
+ const modelsWithKeys: AIModelEntityExtended[] = [];
412
+ for (const model of allModels) {
413
+ const apiKey = GetAIAPIKey(model.DriverClass);
414
+ if (apiKey && apiKey.trim().length > 0) {
415
+ modelsWithKeys.push(model);
416
+ }
417
+ }
418
+
419
+ if (modelsWithKeys.length === 0) {
420
+ throw new Error('No AI models with valid API keys found');
421
+ }
268
422
 
269
- } catch (error) {
270
- const executionTime = Date.now() - startTime;
271
- LogError(`AI Prompt run failed:`, undefined, error);
272
- return {
273
- success: false,
274
- error: (error as Error).message || 'Unknown error occurred',
275
- executionTimeMs: executionTime
276
- };
423
+ // Try preferred models first if provided
424
+ if (preferredModels && preferredModels.length > 0) {
425
+ for (const preferred of preferredModels) {
426
+ const model = modelsWithKeys.find((m) => m.Name === preferred || m.APIName === preferred);
427
+ if (model) {
428
+ LogStatus(`Selected preferred model: ${model.Name}`);
429
+ return model;
277
430
  }
431
+ }
432
+ LogStatus('No preferred models available, falling back to power selection');
278
433
  }
279
434
 
280
- /**
281
- * Public mutation for regular users to run AI prompts with authentication.
282
- */
283
- @Mutation(() => AIPromptRunResult)
284
- async RunAIPrompt(
285
- @Arg('promptId') promptId: string,
286
- @Ctx() { userPayload, providers }: AppContext,
287
- @Arg('data', { nullable: true }) data?: string,
288
- @Arg('overrideModelId', { nullable: true }) overrideModelId?: string,
289
- @Arg('overrideVendorId', { nullable: true }) overrideVendorId?: string,
290
- @Arg('configurationId', { nullable: true }) configurationId?: string,
291
- @Arg('skipValidation', { nullable: true }) skipValidation?: boolean,
292
- @Arg('templateData', { nullable: true }) templateData?: string,
293
- @Arg('responseFormat', { nullable: true }) responseFormat?: string,
294
- @Arg('temperature', { nullable: true }) temperature?: number,
295
- @Arg('topP', { nullable: true }) topP?: number,
296
- @Arg('topK', () => Int, { nullable: true }) topK?: number,
297
- @Arg('minP', { nullable: true }) minP?: number,
298
- @Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
299
- @Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
300
- @Arg('seed', () => Int, { nullable: true }) seed?: number,
301
- @Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
302
- @Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
303
- @Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
304
- @Arg('messages', { nullable: true }) messages?: string,
305
- @Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
306
- @Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
307
- ): Promise<AIPromptRunResult> {
308
- const p = GetReadWriteProvider(providers);
309
- return this.executeAIPrompt(
310
- p,
311
- promptId,
312
- userPayload,
313
- data,
314
- overrideModelId,
315
- overrideVendorId,
316
- configurationId,
317
- skipValidation,
318
- templateData,
319
- responseFormat,
320
- temperature,
321
- topP,
322
- topK,
323
- minP,
324
- frequencyPenalty,
325
- presencePenalty,
326
- seed,
327
- stopSequences,
328
- includeLogProbs,
329
- topLogProbs,
330
- messages,
331
- rerunFromPromptRunID,
332
- systemPromptOverride
333
- );
435
+ // Sort by PowerRank for power-based selection
436
+ modelsWithKeys.sort((a, b) => (b.PowerRank || 0) - (a.PowerRank || 0));
437
+
438
+ let selectedModel: AIModelEntityExtended;
439
+ switch (modelPower) {
440
+ case 'lowest':
441
+ selectedModel = modelsWithKeys[modelsWithKeys.length - 1];
442
+ break;
443
+ case 'highest':
444
+ selectedModel = modelsWithKeys[0];
445
+ break;
446
+ case 'medium':
447
+ default:
448
+ const midIndex = Math.floor(modelsWithKeys.length / 2);
449
+ selectedModel = modelsWithKeys[midIndex];
450
+ break;
334
451
  }
335
452
 
336
- /**
337
- * System user query for running AI prompts with elevated privileges.
338
- * Requires the @RequireSystemUser decorator to ensure only system users can access.
339
- */
340
- @RequireSystemUser()
341
- @Query(() => AIPromptRunResult)
342
- async RunAIPromptSystemUser(
343
- @Arg('promptId') promptId: string,
344
- @Ctx() { userPayload, providers }: AppContext,
345
- @Arg('data', { nullable: true }) data?: string,
346
- @Arg('overrideModelId', { nullable: true }) overrideModelId?: string,
347
- @Arg('overrideVendorId', { nullable: true }) overrideVendorId?: string,
348
- @Arg('configurationId', { nullable: true }) configurationId?: string,
349
- @Arg('skipValidation', { nullable: true }) skipValidation?: boolean,
350
- @Arg('templateData', { nullable: true }) templateData?: string,
351
- @Arg('responseFormat', { nullable: true }) responseFormat?: string,
352
- @Arg('temperature', { nullable: true }) temperature?: number,
353
- @Arg('topP', { nullable: true }) topP?: number,
354
- @Arg('topK', () => Int, { nullable: true }) topK?: number,
355
- @Arg('minP', { nullable: true }) minP?: number,
356
- @Arg('frequencyPenalty', { nullable: true }) frequencyPenalty?: number,
357
- @Arg('presencePenalty', { nullable: true }) presencePenalty?: number,
358
- @Arg('seed', () => Int, { nullable: true }) seed?: number,
359
- @Arg('stopSequences', () => [String], { nullable: true }) stopSequences?: string[],
360
- @Arg('includeLogProbs', { nullable: true }) includeLogProbs?: boolean,
361
- @Arg('topLogProbs', () => Int, { nullable: true }) topLogProbs?: number,
362
- @Arg('messages', { nullable: true }) messages?: string,
363
- @Arg('rerunFromPromptRunID', { nullable: true }) rerunFromPromptRunID?: string,
364
- @Arg('systemPromptOverride', { nullable: true }) systemPromptOverride?: string
365
- ): Promise<AIPromptRunResult> {
366
- const p = GetReadWriteProvider(providers);
367
- return this.executeAIPrompt(
368
- p,
369
- promptId,
370
- userPayload,
371
- data,
372
- overrideModelId,
373
- overrideVendorId,
374
- configurationId,
375
- skipValidation,
376
- templateData,
377
- responseFormat,
378
- temperature,
379
- topP,
380
- topK,
381
- minP,
382
- frequencyPenalty,
383
- presencePenalty,
384
- seed,
385
- stopSequences,
386
- includeLogProbs,
387
- topLogProbs,
388
- messages,
389
- rerunFromPromptRunID,
390
- systemPromptOverride
391
- );
453
+ LogStatus(`Selected model by power (${modelPower || 'medium'}): ${selectedModel.Name}`);
454
+ return selectedModel;
455
+ }
456
+
457
+ /**
458
+ * Helper method to select an embedding model by size
459
+ * @private
460
+ */
461
+ private selectEmbeddingModelBySize(modelSize: string): AIModelEntityExtended {
462
+ const localModels = AIEngine.Instance.LocalEmbeddingModels;
463
+
464
+ if (!localModels || localModels.length === 0) {
465
+ throw new Error('No local embedding models available');
392
466
  }
393
467
 
394
- /**
395
- * Helper method to select a model for simple prompt execution based on preferences or power level
396
- * @private
397
- */
398
- private async selectModelForSimplePrompt(
399
- preferredModels: string[] | undefined,
400
- modelPower: string,
401
- contextUser: any
402
- ): Promise<AIModelEntityExtended> {
403
- // Ensure AI Engine is configured
404
- await AIEngine.Instance.Config(false, contextUser);
405
-
406
- // Get all LLM models that have API keys
407
- const allModels = AIEngine.Instance.Models.filter(m =>
408
- m.AIModelType?.trim().toLowerCase() === 'llm' &&
409
- m.IsActive === true
410
- );
411
-
412
- // Filter to only models with valid API keys
413
- const modelsWithKeys: AIModelEntityExtended[] = [];
414
- for (const model of allModels) {
415
- const apiKey = GetAIAPIKey(model.DriverClass);
416
- if (apiKey && apiKey.trim().length > 0) {
417
- modelsWithKeys.push(model);
418
- }
419
- }
420
-
421
- if (modelsWithKeys.length === 0) {
422
- throw new Error('No AI models with valid API keys found');
423
- }
424
-
425
- // Try preferred models first if provided
426
- if (preferredModels && preferredModels.length > 0) {
427
- for (const preferred of preferredModels) {
428
- const model = modelsWithKeys.find(m =>
429
- m.Name === preferred ||
430
- m.APIName === preferred
431
- );
432
- if (model) {
433
- LogStatus(`Selected preferred model: ${model.Name}`);
434
- return model;
435
- }
436
- }
437
- LogStatus('No preferred models available, falling back to power selection');
438
- }
439
-
440
- // Sort by PowerRank for power-based selection
441
- modelsWithKeys.sort((a, b) => (b.PowerRank || 0) - (a.PowerRank || 0));
442
-
443
- let selectedModel: AIModelEntityExtended;
444
- switch (modelPower) {
445
- case 'lowest':
446
- selectedModel = modelsWithKeys[modelsWithKeys.length - 1];
447
- break;
448
- case 'highest':
449
- selectedModel = modelsWithKeys[0];
450
- break;
451
- case 'medium':
452
- default:
453
- const midIndex = Math.floor(modelsWithKeys.length / 2);
454
- selectedModel = modelsWithKeys[midIndex];
455
- break;
456
- }
457
-
458
- LogStatus(`Selected model by power (${modelPower || 'medium'}): ${selectedModel.Name}`);
459
- return selectedModel;
468
+ // Models are already sorted by PowerRank (highest first)
469
+ switch (modelSize) {
470
+ case 'small':
471
+ return localModels[localModels.length - 1]; // Lowest power
472
+ case 'medium':
473
+ default:
474
+ const midIndex = Math.floor(localModels.length / 2);
475
+ return localModels[midIndex] || localModels[0];
460
476
  }
461
-
462
- /**
463
- * Helper method to select an embedding model by size
464
- * @private
465
- */
466
- private selectEmbeddingModelBySize(modelSize: string): AIModelEntityExtended {
467
- const localModels = AIEngine.Instance.LocalEmbeddingModels;
468
-
469
- if (!localModels || localModels.length === 0) {
470
- throw new Error('No local embedding models available');
471
- }
472
-
473
- // Models are already sorted by PowerRank (highest first)
474
- switch (modelSize) {
475
- case 'small':
476
- return localModels[localModels.length - 1]; // Lowest power
477
- case 'medium':
478
- default:
479
- const midIndex = Math.floor(localModels.length / 2);
480
- return localModels[midIndex] || localModels[0];
481
- }
477
+ }
478
+
479
+ /**
480
+ * Helper method to build chat messages from system prompt and optional message history
481
+ * @private
482
+ */
483
+ private buildChatMessages(systemPrompt: string, messagesJson?: string): ChatMessage[] {
484
+ const messages: ChatMessage[] = [];
485
+
486
+ // Add system prompt
487
+ if (systemPrompt && systemPrompt.trim().length > 0) {
488
+ messages.push({
489
+ role: ChatMessageRole.system,
490
+ content: systemPrompt,
491
+ });
482
492
  }
483
-
484
- /**
485
- * Helper method to build chat messages from system prompt and optional message history
486
- * @private
487
- */
488
- private buildChatMessages(systemPrompt: string, messagesJson?: string): ChatMessage[] {
489
- const messages: ChatMessage[] = [];
490
-
491
- // Add system prompt
492
- if (systemPrompt && systemPrompt.trim().length > 0) {
493
- messages.push({
494
- role: ChatMessageRole.system,
495
- content: systemPrompt
496
- });
497
- }
498
-
499
- // Add message history if provided
500
- if (messagesJson) {
501
- try {
502
- const parsedMessages = JSON.parse(messagesJson);
503
- if (Array.isArray(parsedMessages)) {
504
- for (const msg of parsedMessages) {
505
- if (msg.message && msg.role) {
506
- messages.push({
507
- role: msg.role === 'user' ? ChatMessageRole.user : ChatMessageRole.assistant,
508
- content: msg.message
509
- });
510
- }
511
- }
512
- }
513
- else if (messagesJson?.length > 0) {
514
- // messages maybe just has a simple string in it so add
515
- // as a single message
516
- messages.push({
517
- role: ChatMessageRole.user,
518
- content: messagesJson
519
- });
520
- }
521
- } catch (e) {
522
- if (messagesJson?.length > 0) {
523
- // messages maybe just has a simple string in it so add
524
- // as a single message
525
- messages.push({
526
- role: ChatMessageRole.user,
527
- content: messagesJson
528
- });
529
- }
530
- LogError('Failed to parse messages JSON', undefined, e);
493
+
494
+ // Add message history if provided
495
+ if (messagesJson) {
496
+ try {
497
+ const parsedMessages = JSON.parse(messagesJson);
498
+ if (Array.isArray(parsedMessages)) {
499
+ for (const msg of parsedMessages) {
500
+ if (msg.message && msg.role) {
501
+ messages.push({
502
+ role: msg.role === 'user' ? ChatMessageRole.user : ChatMessageRole.assistant,
503
+ content: msg.message,
504
+ });
531
505
  }
506
+ }
507
+ } else if (messagesJson?.length > 0) {
508
+ // messages maybe just has a simple string in it so add
509
+ // as a single message
510
+ messages.push({
511
+ role: ChatMessageRole.user,
512
+ content: messagesJson,
513
+ });
532
514
  }
533
-
534
- return messages;
535
- }
536
-
537
- /**
538
- * Helper method to format simple prompt result
539
- * @private
540
- */
541
- private formatSimpleResult(chatResult: any, model: AIModelEntityExtended, executionTime: number): SimplePromptResult {
542
- if (!chatResult || !chatResult.success) {
543
- return {
544
- success: false,
545
- error: chatResult?.errorMessage || 'Unknown error occurred',
546
- modelName: model.Name,
547
- executionTimeMs: executionTime
548
- };
515
+ } catch (e) {
516
+ if (messagesJson?.length > 0) {
517
+ // messages maybe just has a simple string in it so add
518
+ // as a single message
519
+ messages.push({
520
+ role: ChatMessageRole.user,
521
+ content: messagesJson,
522
+ });
549
523
  }
550
-
551
- const resultContent = chatResult.data?.choices?.[0]?.message?.content || '';
552
-
553
- // Try to extract JSON from the result
554
- let resultObject: any = null;
524
+ LogError('Failed to parse messages JSON', undefined, e);
525
+ }
526
+ }
527
+
528
+ return messages;
529
+ }
530
+
531
+ /**
532
+ * Helper method to format simple prompt result
533
+ * @private
534
+ */
535
+ private formatSimpleResult(chatResult: any, model: AIModelEntityExtended, executionTime: number): SimplePromptResult {
536
+ if (!chatResult || !chatResult.success) {
537
+ return {
538
+ success: false,
539
+ error: chatResult?.errorMessage || 'Unknown error occurred',
540
+ modelName: model.Name,
541
+ executionTimeMs: executionTime,
542
+ };
543
+ }
544
+
545
+ const resultContent = chatResult.data?.choices?.[0]?.message?.content || '';
546
+
547
+ // Try to extract JSON from the result
548
+ let resultObject: any = null;
549
+ try {
550
+ // First try to parse the entire result as JSON
551
+ resultObject = JSON.parse(resultContent);
552
+ } catch (e) {
553
+ // Try to find JSON within the text
554
+ const jsonMatch = resultContent.match(/\{[\s\S]*\}|\[[\s\S]*\]/);
555
+ if (jsonMatch) {
555
556
  try {
556
- // First try to parse the entire result as JSON
557
- resultObject = JSON.parse(resultContent);
558
- } catch (e) {
559
- // Try to find JSON within the text
560
- const jsonMatch = resultContent.match(/\{[\s\S]*\}|\[[\s\S]*\]/);
561
- if (jsonMatch) {
562
- try {
563
- resultObject = JSON.parse(jsonMatch[0]);
564
- } catch (e2) {
565
- // No valid JSON found
566
- }
567
- }
557
+ resultObject = JSON.parse(jsonMatch[0]);
558
+ } catch (e2) {
559
+ // No valid JSON found
568
560
  }
569
-
561
+ }
562
+ }
563
+
564
+ return {
565
+ success: true,
566
+ result: resultContent,
567
+ resultObject: resultObject ? JSON.stringify(resultObject) : undefined,
568
+ modelName: model.Name,
569
+ executionTimeMs: executionTime,
570
+ };
571
+ }
572
+
573
+ /**
574
+ * Execute a simple prompt without requiring a stored AI Prompt entity.
575
+ * This is designed for interactive components that need quick AI responses.
576
+ */
577
+ @Mutation(() => SimplePromptResult)
578
+ async ExecuteSimplePrompt(
579
+ @Arg('systemPrompt') systemPrompt: string,
580
+ @Ctx() { userPayload }: { userPayload: UserPayload },
581
+ @Arg('messages', { nullable: true }) messages?: string,
582
+ @Arg('preferredModels', () => [String], { nullable: true }) preferredModels?: string[],
583
+ @Arg('modelPower', { nullable: true }) modelPower?: string,
584
+ @Arg('responseFormat', { nullable: true }) responseFormat?: string
585
+ ): Promise<SimplePromptResult> {
586
+ const startTime = Date.now();
587
+
588
+ try {
589
+ LogStatus(`=== EXECUTING SIMPLE PROMPT ===`);
590
+
591
+ // Get current user
592
+ const currentUser = this.GetUserFromPayload(userPayload);
593
+ if (!currentUser) {
570
594
  return {
571
- success: true,
572
- result: resultContent,
573
- resultObject: resultObject ? JSON.stringify(resultObject) : undefined,
574
- modelName: model.Name,
575
- executionTimeMs: executionTime
595
+ success: false,
596
+ error: 'Unable to determine current user',
597
+ modelName: 'Unknown',
598
+ executionTimeMs: Date.now() - startTime,
576
599
  };
600
+ }
601
+
602
+ // Select model based on preferences or power level
603
+ const model = await this.selectModelForSimplePrompt(preferredModels, modelPower || 'medium', currentUser);
604
+
605
+ // Build chat messages
606
+ const chatMessages = this.buildChatMessages(systemPrompt, messages);
607
+
608
+ if (chatMessages.length === 0) {
609
+ return {
610
+ success: false,
611
+ error: 'No messages to send to model',
612
+ modelName: model.Name,
613
+ executionTimeMs: Date.now() - startTime,
614
+ };
615
+ }
616
+
617
+ // Create LLM instance
618
+ const apiKey = GetAIAPIKey(model.DriverClass);
619
+ const llm = MJGlobal.Instance.ClassFactory.CreateInstance<BaseLLM>(BaseLLM, model.DriverClass, apiKey);
620
+
621
+ if (!llm) {
622
+ return {
623
+ success: false,
624
+ error: `Failed to create LLM instance for model ${model.Name}`,
625
+ modelName: model.Name,
626
+ executionTimeMs: Date.now() - startTime,
627
+ };
628
+ }
629
+
630
+ // Build chat parameters
631
+ const chatParams = new ChatParams();
632
+ chatParams.messages = chatMessages;
633
+ chatParams.model = model.APIName;
634
+
635
+ if (responseFormat) {
636
+ // Cast to valid response format type
637
+ chatParams.responseFormat = responseFormat as 'Any' | 'Text' | 'Markdown' | 'JSON' | 'ModelSpecific';
638
+ }
639
+
640
+ // Execute the chat completion
641
+ const result = await llm.ChatCompletion(chatParams);
642
+
643
+ const executionTime = Date.now() - startTime;
644
+ LogStatus(`=== SIMPLE PROMPT COMPLETED (${executionTime}ms) ===`);
645
+
646
+ // Format and return the result
647
+ return this.formatSimpleResult(result, model, executionTime);
648
+ } catch (error) {
649
+ const executionTime = Date.now() - startTime;
650
+ LogError('Simple prompt execution failed:', undefined, error);
651
+ return {
652
+ success: false,
653
+ error: (error as Error).message || 'Unknown error occurred',
654
+ modelName: 'Unknown',
655
+ executionTimeMs: executionTime,
656
+ };
577
657
  }
578
-
579
- /**
580
- * Execute a simple prompt without requiring a stored AI Prompt entity.
581
- * This is designed for interactive components that need quick AI responses.
582
- */
583
- @Mutation(() => SimplePromptResult)
584
- async ExecuteSimplePrompt(
585
- @Arg('systemPrompt') systemPrompt: string,
586
- @Ctx() { userPayload }: { userPayload: UserPayload },
587
- @Arg('messages', { nullable: true }) messages?: string,
588
- @Arg('preferredModels', () => [String], { nullable: true }) preferredModels?: string[],
589
- @Arg('modelPower', { nullable: true }) modelPower?: string,
590
- @Arg('responseFormat', { nullable: true }) responseFormat?: string
591
- ): Promise<SimplePromptResult> {
592
- const startTime = Date.now();
593
-
594
- try {
595
- LogStatus(`=== EXECUTING SIMPLE PROMPT ===`);
596
-
597
- // Get current user
598
- const currentUser = this.GetUserFromPayload(userPayload);
599
- if (!currentUser) {
600
- return {
601
- success: false,
602
- error: 'Unable to determine current user',
603
- modelName: 'Unknown',
604
- executionTimeMs: Date.now() - startTime
605
- };
606
- }
607
-
608
- // Select model based on preferences or power level
609
- const model = await this.selectModelForSimplePrompt(
610
- preferredModels,
611
- modelPower || 'medium',
612
- currentUser
613
- );
614
-
615
- // Build chat messages
616
- const chatMessages = this.buildChatMessages(systemPrompt, messages);
617
-
618
- if (chatMessages.length === 0) {
619
- return {
620
- success: false,
621
- error: 'No messages to send to model',
622
- modelName: model.Name,
623
- executionTimeMs: Date.now() - startTime
624
- };
625
- }
626
-
627
- // Create LLM instance
628
- const apiKey = GetAIAPIKey(model.DriverClass);
629
- const llm = MJGlobal.Instance.ClassFactory.CreateInstance<BaseLLM>(
630
- BaseLLM,
631
- model.DriverClass,
632
- apiKey
633
- );
634
-
635
- if (!llm) {
636
- return {
637
- success: false,
638
- error: `Failed to create LLM instance for model ${model.Name}`,
639
- modelName: model.Name,
640
- executionTimeMs: Date.now() - startTime
641
- };
642
- }
643
-
644
- // Build chat parameters
645
- const chatParams = new ChatParams();
646
- chatParams.messages = chatMessages;
647
- chatParams.model = model.APIName;
648
-
649
- if (responseFormat) {
650
- // Cast to valid response format type
651
- chatParams.responseFormat = responseFormat as 'Any' | 'Text' | 'Markdown' | 'JSON' | 'ModelSpecific';
652
- }
653
-
654
- // Execute the chat completion
655
- const result = await llm.ChatCompletion(chatParams);
656
-
657
- const executionTime = Date.now() - startTime;
658
- LogStatus(`=== SIMPLE PROMPT COMPLETED (${executionTime}ms) ===`);
659
-
660
- // Format and return the result
661
- return this.formatSimpleResult(result, model, executionTime);
662
-
663
- } catch (error) {
664
- const executionTime = Date.now() - startTime;
665
- LogError('Simple prompt execution failed:', undefined, error);
666
- return {
667
- success: false,
668
- error: (error as Error).message || 'Unknown error occurred',
669
- modelName: 'Unknown',
670
- executionTimeMs: executionTime
671
- };
658
+ }
659
+
660
+ /**
661
+ * System user query for executing simple prompts with elevated privileges
662
+ */
663
+ @RequireSystemUser()
664
+ @Query(() => SimplePromptResult)
665
+ async ExecuteSimplePromptSystemUser(
666
+ @Arg('systemPrompt') systemPrompt: string,
667
+ @Ctx() { userPayload }: { userPayload: UserPayload },
668
+ @Arg('messages', { nullable: true }) messages?: string,
669
+ @Arg('preferredModels', () => [String], { nullable: true }) preferredModels?: string[],
670
+ @Arg('modelPower', { nullable: true }) modelPower?: string,
671
+ @Arg('responseFormat', { nullable: true }) responseFormat?: string
672
+ ): Promise<SimplePromptResult> {
673
+ // Reuse the same logic as the regular mutation
674
+ return this.ExecuteSimplePrompt(systemPrompt, { userPayload }, messages, preferredModels, modelPower, responseFormat);
675
+ }
676
+
677
+ /**
678
+ * Generate embeddings for text using local embedding models.
679
+ * Designed for interactive components that need fast similarity calculations.
680
+ */
681
+ @Mutation(() => EmbedTextResult)
682
+ async EmbedText(
683
+ @Arg('textToEmbed', () => [String]) textToEmbed: string[],
684
+ @Arg('modelSize') modelSize: string,
685
+ @Ctx() { userPayload }: { userPayload: UserPayload }
686
+ ): Promise<EmbedTextResult> {
687
+ try {
688
+ LogStatus(`=== GENERATING EMBEDDINGS for ${textToEmbed.length} text(s) ===`);
689
+
690
+ // Get current user
691
+ const currentUser = this.GetUserFromPayload(userPayload);
692
+ if (!currentUser) {
693
+ return {
694
+ embeddings: [],
695
+ modelName: 'Unknown',
696
+ vectorDimensions: 0,
697
+ error: 'Unable to determine current user',
698
+ };
699
+ }
700
+
701
+ // Ensure AI Engine is configured
702
+ await AIEngine.Instance.Config(false, currentUser);
703
+
704
+ // Select embedding model by size
705
+ const model = this.selectEmbeddingModelBySize(modelSize);
706
+
707
+ LogStatus(`Using embedding model: ${model.Name}`);
708
+
709
+ // Process embeddings
710
+ const embeddings: number[][] = [];
711
+
712
+ for (const text of textToEmbed) {
713
+ if (!text || text.trim().length === 0) {
714
+ // Return zero vector for empty text
715
+ embeddings.push([]);
716
+ continue;
672
717
  }
673
- }
674
-
675
- /**
676
- * System user query for executing simple prompts with elevated privileges
677
- */
678
- @RequireSystemUser()
679
- @Query(() => SimplePromptResult)
680
- async ExecuteSimplePromptSystemUser(
681
- @Arg('systemPrompt') systemPrompt: string,
682
- @Ctx() { userPayload }: { userPayload: UserPayload },
683
- @Arg('messages', { nullable: true }) messages?: string,
684
- @Arg('preferredModels', () => [String], { nullable: true }) preferredModels?: string[],
685
- @Arg('modelPower', { nullable: true }) modelPower?: string,
686
- @Arg('responseFormat', { nullable: true }) responseFormat?: string
687
- ): Promise<SimplePromptResult> {
688
- // Reuse the same logic as the regular mutation
689
- return this.ExecuteSimplePrompt(systemPrompt, { userPayload }, messages, preferredModels, modelPower, responseFormat);
690
- }
691
-
692
- /**
693
- * Generate embeddings for text using local embedding models.
694
- * Designed for interactive components that need fast similarity calculations.
695
- */
696
- @Mutation(() => EmbedTextResult)
697
- async EmbedText(
698
- @Arg('textToEmbed', () => [String]) textToEmbed: string[],
699
- @Arg('modelSize') modelSize: string,
700
- @Ctx() { userPayload }: { userPayload: UserPayload }
701
- ): Promise<EmbedTextResult> {
702
- try {
703
- LogStatus(`=== GENERATING EMBEDDINGS for ${textToEmbed.length} text(s) ===`);
704
-
705
- // Get current user
706
- const currentUser = this.GetUserFromPayload(userPayload);
707
- if (!currentUser) {
708
- return {
709
- embeddings: [],
710
- modelName: 'Unknown',
711
- vectorDimensions: 0,
712
- error: 'Unable to determine current user'
713
- };
714
- }
715
-
716
- // Ensure AI Engine is configured
717
- await AIEngine.Instance.Config(false, currentUser);
718
-
719
- // Select embedding model by size
720
- const model = this.selectEmbeddingModelBySize(modelSize);
721
-
722
- LogStatus(`Using embedding model: ${model.Name}`);
723
-
724
- // Process embeddings
725
- const embeddings: number[][] = [];
726
-
727
- for (const text of textToEmbed) {
728
- if (!text || text.trim().length === 0) {
729
- // Return zero vector for empty text
730
- embeddings.push([]);
731
- continue;
732
- }
733
-
734
- // Use AIEngine's EmbedText method
735
- const result = await AIEngine.Instance.EmbedText(model, text);
736
-
737
- if (result && result.vector && result.vector.length > 0) {
738
- embeddings.push(result.vector);
739
- } else {
740
- LogError(`Failed to generate embedding for text: ${text.substring(0, 50)}...`);
741
- embeddings.push([]); // Add empty array for failed embeddings
742
- }
743
- }
744
-
745
- // Get vector dimensions from first successful embedding
746
- const vectorDimensions = embeddings.find(e => e.length > 0)?.length || 0;
747
-
748
- LogStatus(`=== EMBEDDINGS GENERATED: ${embeddings.length} vectors of ${vectorDimensions} dimensions ===`);
749
-
750
- return {
751
- embeddings,
752
- modelName: model.Name,
753
- vectorDimensions,
754
- error: undefined
755
- };
756
-
757
- } catch (error) {
758
- LogError('Embedding generation failed:', undefined, error);
759
- return {
760
- embeddings: [],
761
- modelName: 'Unknown',
762
- vectorDimensions: 0,
763
- error: (error as Error).message || 'Unknown error occurred'
764
- };
718
+
719
+ // Use AIEngine's EmbedText method
720
+ const result = await AIEngine.Instance.EmbedText(model, text);
721
+
722
+ if (result && result.vector && result.vector.length > 0) {
723
+ embeddings.push(result.vector);
724
+ } else {
725
+ LogError(`Failed to generate embedding for text: ${text.substring(0, 50)}...`);
726
+ embeddings.push([]); // Add empty array for failed embeddings
765
727
  }
728
+ }
729
+
730
+ // Get vector dimensions from first successful embedding
731
+ const vectorDimensions = embeddings.find((e) => e.length > 0)?.length || 0;
732
+
733
+ LogStatus(`=== EMBEDDINGS GENERATED: ${embeddings.length} vectors of ${vectorDimensions} dimensions ===`);
734
+
735
+ return {
736
+ embeddings,
737
+ modelName: model.Name,
738
+ vectorDimensions,
739
+ error: undefined,
740
+ };
741
+ } catch (error) {
742
+ LogError('Embedding generation failed:', undefined, error);
743
+ return {
744
+ embeddings: [],
745
+ modelName: 'Unknown',
746
+ vectorDimensions: 0,
747
+ error: (error as Error).message || 'Unknown error occurred',
748
+ };
766
749
  }
767
-
768
- /**
769
- * System user query for generating embeddings with elevated privileges
770
- */
771
- @RequireSystemUser()
772
- @Query(() => EmbedTextResult)
773
- async EmbedTextSystemUser(
774
- @Arg('textToEmbed', () => [String]) textToEmbed: string[],
775
- @Arg('modelSize') modelSize: string,
776
- @Ctx() { userPayload }: { userPayload: UserPayload }
777
- ): Promise<EmbedTextResult> {
778
- // Reuse the same logic as the regular mutation
779
- return this.EmbedText(textToEmbed, modelSize, { userPayload });
780
- }
781
- }
750
+ }
751
+
752
+ /**
753
+ * System user query for generating embeddings with elevated privileges
754
+ */
755
+ @RequireSystemUser()
756
+ @Query(() => EmbedTextResult)
757
+ async EmbedTextSystemUser(
758
+ @Arg('textToEmbed', () => [String]) textToEmbed: string[],
759
+ @Arg('modelSize') modelSize: string,
760
+ @Ctx() { userPayload }: { userPayload: UserPayload }
761
+ ): Promise<EmbedTextResult> {
762
+ // Reuse the same logic as the regular mutation
763
+ return this.EmbedText(textToEmbed, modelSize, { userPayload });
764
+ }
765
+ }