@memberjunction/server 1.0.1 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@memberjunction/server",
3
- "version": "1.0.1",
3
+ "version": "1.0.4",
4
4
  "description": "MemberJunction: This project provides API access via GraphQL to the common data store.",
5
5
  "main": "dist/index.js",
6
6
  "types": "src/index.ts",
@@ -21,17 +21,18 @@
21
21
  "dependencies": {
22
22
  "@apollo/server": "^4.9.1",
23
23
  "@graphql-tools/utils": "^10.0.1",
24
- "@memberjunction/ai": "^1.0.1",
25
- "@memberjunction/aiengine": "^1.0.1",
26
- "@memberjunction/core": "^1.0.1",
27
- "@memberjunction/core-entities": "^1.0.1",
28
- "@memberjunction/data-context": "^1.0.1",
29
- "@memberjunction/data-context-server": "^1.0.1",
30
- "@memberjunction/global": "^1.0.1",
31
- "@memberjunction/storage": "^1.0.1",
32
- "@memberjunction/queue": "^1.0.1",
33
- "@memberjunction/sqlserver-dataprovider": "^1.0.1",
34
- "@memberjunction/skip-types": "^1.0.1",
24
+ "@memberjunction/ai": "^1.0.4",
25
+ "@memberjunction/ai-openai": "^1.0.4",
26
+ "@memberjunction/aiengine": "^1.0.4",
27
+ "@memberjunction/core": "^1.0.4",
28
+ "@memberjunction/core-entities": "^1.0.4",
29
+ "@memberjunction/data-context": "^1.0.4",
30
+ "@memberjunction/data-context-server": "^1.0.4",
31
+ "@memberjunction/global": "^1.0.4",
32
+ "@memberjunction/storage": "^1.0.4",
33
+ "@memberjunction/queue": "^1.0.4",
34
+ "@memberjunction/sqlserver-dataprovider": "^1.0.4",
35
+ "@memberjunction/skip-types": "^1.0.4",
35
36
  "@types/cors": "^2.8.13",
36
37
  "@types/jsonwebtoken": "^8.5.9",
37
38
  "@types/node": "^18.11.14",
@@ -1,8 +1,10 @@
1
- import { MJGlobal, RegisterClass } from "@memberjunction/global";
1
+ import { CleanJSON, MJGlobal, RegisterClass } from "@memberjunction/global";
2
2
  import { BaseEntity, EntityInfo, LogError, Metadata } from "@memberjunction/core";
3
- import { AIModelEntity, UserViewEntityExtended } from '@memberjunction/core-entities'
3
+ import { AIModelEntity, AIModelEntityExtended, UserViewEntityExtended } from '@memberjunction/core-entities'
4
4
  import { BaseLLM, ChatParams, GetAIAPIKey } from "@memberjunction/ai";
5
5
  import { AIEngine } from "@memberjunction/aiengine";
6
+ import { LoadOpenAILLM } from "@memberjunction/ai-openai";
7
+ LoadOpenAILLM(); // this is to prevent tree shaking since the openai package is not directly used and rather instantiated dynamically in the LoadOpenAILLM function. Since no static code path exists tree shaking can result in this class being optimized out
6
8
 
7
9
  @RegisterClass(BaseEntity, 'User Views', 3) // high priority to ensure this is used ahead of the UserViewEntityExtended in the @memberjunction/core-entities package (which has priority of 2)
8
10
  export class UserViewEntity_Server extends UserViewEntityExtended {
@@ -14,12 +16,24 @@ export class UserViewEntity_Server extends UserViewEntityExtended {
14
16
  }
15
17
 
16
18
  /**
17
- * Default implementation simply grabs the first AI model that is of type 1 (Language Model). If you want to override this to use a different model you can override this method in your subclass and return the model you want to use.
19
+ * Default implementation simply returns 'OpenAI' - override this in your subclass if you are using a different AI vendor.
18
20
  * @returns
19
21
  */
20
- protected GetAIModel(): AIModelEntity {
21
- const model = AIEngine.Models.find(m => m.AIModelTypeID === 1/*elim this hardcoding by adding virtual field for Type to AI Models entity*/) // get the first llm
22
- return model;
22
+ protected get AIVendorName(): string {
23
+ return 'OpenAI';
24
+ }
25
+
26
+ /**
27
+ * Default implementation simply grabs the first AI model that matches GetAIModelName().
28
+ * @returns
29
+ */
30
+ protected async GetAIModel(): Promise<AIModelEntityExtended> {
31
+ await AIEngine.LoadAIMetadata(this.ContextCurrentUser); // most of the time this is already loaded, but just in case it isn't we will load it here
32
+ const models = AIEngine.Models.filter(m => m.AIModelType.trim().toLowerCase() === 'llm' &&
33
+ m.Vendor.trim().toLowerCase() === this.AIVendorName.trim().toLowerCase())
34
+ // next, sort the models by the PowerRank field so that the highest power rank model is the first array element
35
+ models.sort((a, b) => b.PowerRank - a.PowerRank); // highest power rank first
36
+ return models[0];
23
37
  }
24
38
 
25
39
  /**
@@ -29,11 +43,11 @@ export class UserViewEntity_Server extends UserViewEntityExtended {
29
43
  */
30
44
  public async GenerateSmartFilterWhereClause(prompt: string, entityInfo: EntityInfo): Promise<{whereClause: string, userExplanation: string}> {
31
45
  try {
32
- const model = this.GetAIModel();
46
+ const model = await this.GetAIModel();
33
47
  const llm = MJGlobal.Instance.ClassFactory.CreateInstance<BaseLLM>(BaseLLM, model.DriverClass, GetAIAPIKey(model.DriverClass));
34
48
 
35
49
  const chatParams: ChatParams = {
36
- model: 'gpt-4',
50
+ model: model.APINameOrName,
37
51
  messages: [
38
52
  {
39
53
  role: 'system',
@@ -51,7 +65,11 @@ export class UserViewEntity_Server extends UserViewEntityExtended {
51
65
  if (llmResponse) {
52
66
  // try to parse it as JSON
53
67
  try {
54
- const parsed = JSON.parse(llmResponse);
68
+ const cleansed = CleanJSON(llmResponse);
69
+ if (!cleansed)
70
+ throw new Error('Invalid JSON response from AI: ' + llmResponse);
71
+
72
+ const parsed = JSON.parse(cleansed);
55
73
  if (parsed.whereClause && parsed.whereClause.length > 0) {
56
74
  // we have the where clause. Sometimes the LLM prefixes it with WHERE and somtimes not, we need to strip WHERE if it is there
57
75
  const trimmed = parsed.whereClause.trim();
@@ -2,7 +2,7 @@
2
2
  * ALL ENTITIES - TypeGraphQL Type Class Definition - AUTO GENERATED FILE
3
3
  * Generated Entities and Resolvers for Server
4
4
  *
5
- * GENERATED: 4/3/2024, 5:30:40 PM
5
+ * GENERATED: 4/8/2024, 7:27:10 PM
6
6
  *
7
7
  * >>> DO NOT MODIFY THIS FILE!!!!!!!!!!!!
8
8
  * >>> YOUR CHANGES WILL BE OVERWRITTEN
@@ -7101,6 +7101,10 @@ export class ListDetail_ {
7101
7101
 
7102
7102
  @Field(() => Int)
7103
7103
  Sequence: number;
7104
+
7105
+ @Field()
7106
+ @MaxLength(200)
7107
+ List: string;
7104
7108
 
7105
7109
  }
7106
7110
 
@@ -8821,6 +8825,10 @@ export class Authorization_ {
8821
8825
  @Field()
8822
8826
  @MaxLength(8)
8823
8827
  UpdatedAt: Date;
8828
+
8829
+ @Field({nullable: true})
8830
+ @MaxLength(200)
8831
+ Parent?: string;
8824
8832
 
8825
8833
  @Field(() => [mj_core_schema_server_object_types.AuthorizationRole_])
8826
8834
  AuthorizationRolesArray: mj_core_schema_server_object_types.AuthorizationRole_[]; // Link to AuthorizationRoles
@@ -9266,6 +9274,9 @@ export class AIModel_ {
9266
9274
  @Field(() => Int)
9267
9275
  AIModelTypeID: number;
9268
9276
 
9277
+ @Field(() => Boolean)
9278
+ IsActive: boolean;
9279
+
9269
9280
  @Field({nullable: true})
9270
9281
  Description?: string;
9271
9282
 
@@ -9277,8 +9288,12 @@ export class AIModel_ {
9277
9288
  @MaxLength(510)
9278
9289
  DriverImportPath?: string;
9279
9290
 
9280
- @Field(() => Boolean)
9281
- IsActive: boolean;
9291
+ @Field({nullable: true, description: 'The name of the model to use with API calls which might differ from the Name, if APIName is not provided, Name will be used for API calls'})
9292
+ @MaxLength(200)
9293
+ APIName?: string;
9294
+
9295
+ @Field(() => Int, {nullable: true, description: 'A simplified power rank of each model for a given AI Model Type. For example, if we have GPT 3, GPT 3.5, and GPT 4, we would have a PowerRank of 1 for GPT3, 2 for GPT 3.5, and 3 for GPT 4. This can be used within model families like OpenAI or across all models. For example if you had Llama 2 in the mix which is similar to GPT 3.5 it would also have a PowerRank of 2. This can be used at runtime to pick the most/least powerful or compare model relative power.'})
9296
+ PowerRank?: number;
9282
9297
 
9283
9298
  @Field()
9284
9299
  @MaxLength(8)
@@ -9287,6 +9302,10 @@ export class AIModel_ {
9287
9302
  @Field()
9288
9303
  @MaxLength(8)
9289
9304
  UpdatedAt: Date;
9305
+
9306
+ @Field()
9307
+ @MaxLength(100)
9308
+ AIModelType: string;
9290
9309
 
9291
9310
  @Field(() => [mj_core_schema_server_object_types.AIAction_])
9292
9311
  AIActionsArray: mj_core_schema_server_object_types.AIAction_[]; // Link to AIActions
@@ -9319,6 +9338,9 @@ export class UpdateAIModelInput {
9319
9338
  @Field(() => Int)
9320
9339
  AIModelTypeID: number;
9321
9340
 
9341
+ @Field(() => Boolean)
9342
+ IsActive: boolean;
9343
+
9322
9344
  @Field({ nullable: true })
9323
9345
  Description: string;
9324
9346
 
@@ -9328,8 +9350,11 @@ export class UpdateAIModelInput {
9328
9350
  @Field({ nullable: true })
9329
9351
  DriverImportPath: string;
9330
9352
 
9331
- @Field(() => Boolean)
9332
- IsActive: boolean;
9353
+ @Field({ nullable: true })
9354
+ APIName: string;
9355
+
9356
+ @Field(() => Int, { nullable: true })
9357
+ PowerRank: number;
9333
9358
  }
9334
9359
 
9335
9360
  //****************************************************************************
@@ -10569,6 +10594,10 @@ export class QueueTask_ {
10569
10594
 
10570
10595
  @Field({nullable: true})
10571
10596
  Comments?: string;
10597
+
10598
+ @Field()
10599
+ @MaxLength(100)
10600
+ Queue: string;
10572
10601
 
10573
10602
  }
10574
10603
 
@@ -11327,6 +11356,10 @@ export class Report_ {
11327
11356
  @Field({nullable: true})
11328
11357
  @MaxLength(510)
11329
11358
  OutputEvent?: string;
11359
+
11360
+ @Field({nullable: true})
11361
+ @MaxLength(200)
11362
+ OutputWorkflow?: string;
11330
11363
 
11331
11364
  @Field(() => [mj_core_schema_server_object_types.ReportSnapshot_])
11332
11365
  ReportSnapshotsArray: mj_core_schema_server_object_types.ReportSnapshot_[]; // Link to ReportSnapshots