@memberjunction/ai-mistral 0.9.12 → 0.9.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/config.d.ts +1 -0
- package/dist/config.js +10 -0
- package/dist/config.js.map +1 -0
- package/dist/models/mistral.d.ts +9 -108
- package/dist/models/mistral.js +59 -111
- package/dist/models/mistral.js.map +1 -1
- package/dist/models/mistralClient.d.ts +181 -0
- package/dist/models/mistralClient.js +155 -0
- package/dist/models/mistralClient.js.map +1 -0
- package/package.json +4 -5
package/dist/config.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare const mistralAPIKey: string;
|
package/dist/config.js
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.mistralAPIKey = void 0;
|
|
7
|
+
const dotenv_1 = __importDefault(require("dotenv"));
|
|
8
|
+
dotenv_1.default.config();
|
|
9
|
+
exports.mistralAPIKey = process.env.MISTRAL_API_KEY;
|
|
10
|
+
//# sourceMappingURL=config.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"config.js","sourceRoot":"","sources":["../src/config.ts"],"names":[],"mappings":";;;;;;AAAA,oDAA4B;AAC5B,gBAAM,CAAC,MAAM,EAAE,CAAC;AAEH,QAAA,aAAa,GAAW,OAAO,CAAC,GAAG,CAAC,eAAe,CAAC"}
|
package/dist/models/mistral.d.ts
CHANGED
|
@@ -1,119 +1,20 @@
|
|
|
1
|
-
import { BaseLLM,
|
|
1
|
+
import { BaseLLM, ChatParams, ChatResult, ClassifyParams, ClassifyResult, EmbedParams, EmbedResult, SummarizeParams, SummarizeResult } from '@memberjunction/ai';
|
|
2
|
+
import { EmbeddingResponse, ListModelsResponse, MistralClient } from './mistralClient';
|
|
2
3
|
export declare class MistralLLM extends BaseLLM {
|
|
3
|
-
|
|
4
|
-
private enableSafePrompt;
|
|
4
|
+
static _client: MistralClient;
|
|
5
5
|
constructor(apiKey: string);
|
|
6
|
-
|
|
6
|
+
get client(): MistralClient;
|
|
7
|
+
ChatCompletion(params: MistralChatParams): Promise<ChatResult>;
|
|
7
8
|
SummarizeText(params: SummarizeParams): Promise<SummarizeResult>;
|
|
8
9
|
ClassifyText(params: ClassifyParams): Promise<ClassifyResult>;
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
* @param prompt
|
|
12
|
-
*/
|
|
13
|
-
Ask(prompt: string): Promise<void>;
|
|
10
|
+
createEmbedding(model: string, text: string): Promise<EmbeddingResponse>;
|
|
11
|
+
EmbedText(params: EmbedParams): Promise<EmbedResult>;
|
|
14
12
|
/**
|
|
15
13
|
* Returns a list of available models
|
|
16
14
|
* @returns {Promise<AvailableModelInfo>}
|
|
17
15
|
*/
|
|
18
|
-
|
|
19
|
-
ChatSingle(message: string, role?: string, model?: string, temperature?: number, maxTokens?: number, topP?: number, randomSeed?: number, safePrompt?: boolean): Promise<ChatCompletionResponse>;
|
|
20
|
-
Chat(messages: ChatMessage[], model?: string, temperature?: number, maxTokens?: number, topP?: number, randomSeed?: number, safePrompt?: boolean): Promise<ChatCompletionResponse>;
|
|
21
|
-
private MakeChatCompletionRequest;
|
|
22
|
-
private createAxiosRequestConfig;
|
|
23
|
-
private callApi;
|
|
24
|
-
createChatMessages(prompts: string[], role?: string): ChatMessage[];
|
|
25
|
-
}
|
|
26
|
-
export declare const MistralRoles: {
|
|
27
|
-
User: string;
|
|
28
|
-
System: string;
|
|
29
|
-
};
|
|
30
|
-
export declare const MistralModels: {
|
|
31
|
-
Tiny: string;
|
|
32
|
-
Small: string;
|
|
33
|
-
Medium: string;
|
|
34
|
-
};
|
|
35
|
-
export interface ModelPermission {
|
|
36
|
-
id: string;
|
|
37
|
-
object: 'model_permission';
|
|
38
|
-
created: number;
|
|
39
|
-
allow_create_engine: boolean;
|
|
40
|
-
allow_sampling: boolean;
|
|
41
|
-
allow_logprobs: boolean;
|
|
42
|
-
allow_search_indices: boolean;
|
|
43
|
-
allow_view: boolean;
|
|
44
|
-
allow_fine_tuning: boolean;
|
|
45
|
-
organization: string;
|
|
46
|
-
group: string | null;
|
|
47
|
-
is_blocking: boolean;
|
|
48
|
-
}
|
|
49
|
-
export interface Model {
|
|
50
|
-
id: string;
|
|
51
|
-
object: 'model';
|
|
52
|
-
created: number;
|
|
53
|
-
owned_by: string;
|
|
54
|
-
root: string | null;
|
|
55
|
-
parent: string | null;
|
|
56
|
-
permission: ModelPermission[];
|
|
57
|
-
}
|
|
58
|
-
export interface ListModelsResponse {
|
|
59
|
-
object: 'list';
|
|
60
|
-
data: Model[];
|
|
61
|
-
}
|
|
62
|
-
export interface TokenUsage {
|
|
63
|
-
prompt_tokens: number;
|
|
64
|
-
completion_tokens: number;
|
|
65
|
-
total_tokens: number;
|
|
66
|
-
}
|
|
67
|
-
export type ChatCompletetionRequest = {
|
|
68
|
-
model: string;
|
|
69
|
-
messages: ChatMessage[];
|
|
70
|
-
temperature: number;
|
|
71
|
-
max_tokens: number;
|
|
72
|
-
top_p: number;
|
|
73
|
-
random_seed: number;
|
|
74
|
-
stream: boolean;
|
|
75
|
-
safe_prompt: boolean;
|
|
76
|
-
};
|
|
77
|
-
export interface ChatCompletionResponseChoice {
|
|
78
|
-
index: number;
|
|
79
|
-
message: {
|
|
80
|
-
role: string;
|
|
81
|
-
content: string;
|
|
82
|
-
};
|
|
83
|
-
finish_reason: string;
|
|
84
|
-
}
|
|
85
|
-
export interface ChatCompletionResponseChunkChoice {
|
|
86
|
-
index: number;
|
|
87
|
-
delta: {
|
|
88
|
-
role?: string;
|
|
89
|
-
content?: string;
|
|
90
|
-
};
|
|
91
|
-
finish_reason: string;
|
|
92
|
-
}
|
|
93
|
-
export interface ChatCompletionResponse {
|
|
94
|
-
id: string;
|
|
95
|
-
object: 'chat.completion';
|
|
96
|
-
created: number;
|
|
97
|
-
model: string;
|
|
98
|
-
choices: ChatCompletionResponseChoice[];
|
|
99
|
-
usage: TokenUsage;
|
|
100
|
-
}
|
|
101
|
-
export interface ChatCompletionResponseChunk {
|
|
102
|
-
id: string;
|
|
103
|
-
object: 'chat.completion.chunk';
|
|
104
|
-
created: number;
|
|
105
|
-
model: string;
|
|
106
|
-
choices: ChatCompletionResponseChunkChoice[];
|
|
107
|
-
}
|
|
108
|
-
export interface Embedding {
|
|
109
|
-
id: string;
|
|
110
|
-
object: 'embedding';
|
|
111
|
-
embedding: number[];
|
|
16
|
+
listModels(): Promise<ListModelsResponse>;
|
|
112
17
|
}
|
|
113
|
-
export
|
|
114
|
-
id: string;
|
|
115
|
-
object: 'list';
|
|
116
|
-
data: Embedding[];
|
|
18
|
+
export declare class MistralChatParams extends ChatParams {
|
|
117
19
|
model: string;
|
|
118
|
-
usage: TokenUsage;
|
|
119
20
|
}
|
package/dist/models/mistral.js
CHANGED
|
@@ -5,39 +5,55 @@ var __decorate = (this && this.__decorate) || function (decorators, target, key,
|
|
|
5
5
|
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
6
6
|
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
7
7
|
};
|
|
8
|
-
var
|
|
9
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
10
|
-
};
|
|
8
|
+
var MistralLLM_1;
|
|
11
9
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
-
exports.
|
|
13
|
-
const axios_1 = __importDefault(require("axios"));
|
|
10
|
+
exports.MistralChatParams = exports.MistralLLM = void 0;
|
|
14
11
|
const ai_1 = require("@memberjunction/ai");
|
|
15
12
|
const global_1 = require("@memberjunction/global");
|
|
16
|
-
|
|
13
|
+
const mistralClient_1 = require("./mistralClient");
|
|
14
|
+
let MistralLLM = MistralLLM_1 = class MistralLLM extends ai_1.BaseLLM {
|
|
17
15
|
constructor(apiKey) {
|
|
18
16
|
super(apiKey);
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
17
|
+
if (!MistralLLM_1._client) {
|
|
18
|
+
MistralLLM_1._client = new mistralClient_1.MistralClient({ apiKey });
|
|
19
|
+
}
|
|
22
20
|
}
|
|
21
|
+
get client() { return MistralLLM_1._client; }
|
|
23
22
|
async ChatCompletion(params) {
|
|
24
|
-
const
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
23
|
+
const startTime = new Date();
|
|
24
|
+
const chatResponse = await this.client.chat({
|
|
25
|
+
model: params.model,
|
|
26
|
+
messages: params.messages
|
|
27
|
+
});
|
|
28
|
+
const endTime = new Date();
|
|
29
|
+
let choices = chatResponse.choices.map((choice) => {
|
|
30
|
+
const res = {
|
|
31
|
+
message: {
|
|
32
|
+
role: 'assistant',
|
|
33
|
+
content: choice.message.content
|
|
34
|
+
},
|
|
35
|
+
finish_reason: choice.finish_reason,
|
|
36
|
+
index: choice.index
|
|
37
|
+
};
|
|
38
|
+
return res;
|
|
39
|
+
});
|
|
40
|
+
return {
|
|
41
|
+
success: true,
|
|
42
|
+
statusText: "OK",
|
|
43
|
+
startTime: startTime,
|
|
44
|
+
endTime: endTime,
|
|
45
|
+
timeElapsed: endTime.getTime() - startTime.getTime(),
|
|
31
46
|
data: {
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
47
|
+
choices: choices,
|
|
48
|
+
usage: {
|
|
49
|
+
totalTokens: chatResponse.usage.total_tokens,
|
|
50
|
+
promptTokens: chatResponse.usage.prompt_tokens,
|
|
51
|
+
completionTokens: chatResponse.usage.completion_tokens
|
|
52
|
+
}
|
|
53
|
+
},
|
|
54
|
+
errorMessage: "",
|
|
55
|
+
exception: null,
|
|
36
56
|
};
|
|
37
|
-
let result = await this.callApi(config);
|
|
38
|
-
console.log(result);
|
|
39
|
-
console.log(result.choices);
|
|
40
|
-
return result;
|
|
41
57
|
}
|
|
42
58
|
async SummarizeText(params) {
|
|
43
59
|
throw new Error("Method not implemented.");
|
|
@@ -45,101 +61,33 @@ let MistralLLM = class MistralLLM extends ai_1.BaseLLM {
|
|
|
45
61
|
async ClassifyText(params) {
|
|
46
62
|
throw new Error("Method not implemented.");
|
|
47
63
|
}
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
* @param prompt
|
|
51
|
-
*/
|
|
52
|
-
async Ask(prompt) {
|
|
53
|
-
let response = await this.ChatSingle(prompt, undefined, exports.MistralModels.Tiny);
|
|
54
|
-
console.log(response);
|
|
55
|
-
response.choices.forEach((choice) => {
|
|
56
|
-
console.log(choice.message.content);
|
|
57
|
-
});
|
|
58
|
-
}
|
|
59
|
-
/**
|
|
60
|
-
* Returns a list of available models
|
|
61
|
-
* @returns {Promise<AvailableModelInfo>}
|
|
62
|
-
*/
|
|
63
|
-
async ListModels() {
|
|
64
|
-
const request = this.createAxiosRequestConfig('get', 'models');
|
|
65
|
-
let response = await this.callApi(request);
|
|
64
|
+
async createEmbedding(model, text) {
|
|
65
|
+
const response = await this.client.embeddings(model, text);
|
|
66
66
|
return response;
|
|
67
67
|
}
|
|
68
|
-
async
|
|
69
|
-
const
|
|
70
|
-
{
|
|
71
|
-
role: role,
|
|
72
|
-
content: message
|
|
73
|
-
}
|
|
74
|
-
];
|
|
75
|
-
return await this.Chat(chatMessage, model, temperature, maxTokens, topP, randomSeed, safePrompt);
|
|
76
|
-
}
|
|
77
|
-
async Chat(messages, model = exports.MistralModels.Medium, temperature = null, maxTokens = null, topP = null, randomSeed = null, safePrompt = this.enableSafePrompt) {
|
|
78
|
-
const request = this.MakeChatCompletionRequest(model, messages, temperature, maxTokens, topP, randomSeed, false, safePrompt);
|
|
79
|
-
const axiosRequest = this.createAxiosRequestConfig('post', "chat/completions", request);
|
|
80
|
-
return await this.callApi(axiosRequest);
|
|
81
|
-
}
|
|
82
|
-
MakeChatCompletionRequest(model, messages, temperature = null, maxTokens = null, topP = null, randomSeed = null, stream = null, safePrompt = null) {
|
|
83
|
-
return {
|
|
84
|
-
model: model,
|
|
85
|
-
messages: messages,
|
|
86
|
-
temperature: temperature ?? undefined,
|
|
87
|
-
max_tokens: maxTokens ?? undefined,
|
|
88
|
-
top_p: topP ?? undefined,
|
|
89
|
-
random_seed: randomSeed ?? undefined,
|
|
90
|
-
stream: stream ?? undefined,
|
|
91
|
-
safe_prompt: safePrompt ?? undefined,
|
|
92
|
-
};
|
|
93
|
-
}
|
|
94
|
-
;
|
|
95
|
-
createAxiosRequestConfig(method, path, options = null) {
|
|
68
|
+
async EmbedText(params) {
|
|
69
|
+
const response = await this.client.embeddings(params.model, params.text);
|
|
96
70
|
return {
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
Accept: options?.stream ? 'text/event-stream' : 'application/json',
|
|
102
|
-
ContentType: 'application/json',
|
|
103
|
-
Authorization: `Bearer ${this.apiKey}`
|
|
104
|
-
},
|
|
105
|
-
data: method !== 'get' ? options : null,
|
|
106
|
-
timeout: 120 * 1000
|
|
71
|
+
object: 'object',
|
|
72
|
+
model: params.model || "mistral-embed", //hard coded for now as theres only one available embedding model
|
|
73
|
+
ModelUsage: new ai_1.ModelUsage(response.usage.prompt_tokens, response.usage.completion_tokens),
|
|
74
|
+
data: response.data[0].embedding
|
|
107
75
|
};
|
|
108
76
|
}
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
console.error("An error occured when making request to", data.baseURL + data.url, ":\n", error.data?.message);
|
|
117
|
-
return null;
|
|
118
|
-
}
|
|
119
|
-
}
|
|
120
|
-
;
|
|
121
|
-
createChatMessages(prompts, role = 'user') {
|
|
122
|
-
let messages = [];
|
|
123
|
-
prompts.forEach((prompt) => {
|
|
124
|
-
messages.push({
|
|
125
|
-
role: role,
|
|
126
|
-
content: prompt
|
|
127
|
-
});
|
|
128
|
-
});
|
|
129
|
-
return messages;
|
|
77
|
+
/**
|
|
78
|
+
* Returns a list of available models
|
|
79
|
+
* @returns {Promise<AvailableModelInfo>}
|
|
80
|
+
*/
|
|
81
|
+
async listModels() {
|
|
82
|
+
const listModelsResponse = await this.client.listModels();
|
|
83
|
+
return listModelsResponse;
|
|
130
84
|
}
|
|
131
85
|
};
|
|
132
86
|
exports.MistralLLM = MistralLLM;
|
|
133
|
-
exports.MistralLLM = MistralLLM = __decorate([
|
|
87
|
+
exports.MistralLLM = MistralLLM = MistralLLM_1 = __decorate([
|
|
134
88
|
(0, global_1.RegisterClass)(ai_1.BaseLLM, "MistralLLM")
|
|
135
89
|
], MistralLLM);
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
};
|
|
140
|
-
exports.MistralModels = {
|
|
141
|
-
Tiny: "mistral-tiny",
|
|
142
|
-
Small: "mistral-small",
|
|
143
|
-
Medium: "mistral-medium"
|
|
144
|
-
};
|
|
90
|
+
class MistralChatParams extends ai_1.ChatParams {
|
|
91
|
+
}
|
|
92
|
+
exports.MistralChatParams = MistralChatParams;
|
|
145
93
|
//# sourceMappingURL=mistral.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"mistral.js","sourceRoot":"","sources":["../../src/models/mistral.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"mistral.js","sourceRoot":"","sources":["../../src/models/mistral.ts"],"names":[],"mappings":";;;;;;;;;;AAAA,2CAA+L;AAC/L,mDAAuD;AACvD,mDAAqH;AAG9G,IAAM,UAAU,kBAAhB,MAAM,UAAW,SAAQ,YAAO;IAEnC,YAAY,MAAc;QACtB,KAAK,CAAC,MAAM,CAAC,CAAC;QACd,IAAI,CAAC,YAAU,CAAC,OAAO,EAAC,CAAC;YACrB,YAAU,CAAC,OAAO,GAAG,IAAI,6BAAa,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC;QACvD,CAAC;IACL,CAAC;IAED,IAAW,MAAM,KAAmB,OAAO,YAAU,CAAC,OAAO,CAAC,CAAA,CAAC;IAExD,KAAK,CAAC,cAAc,CAAC,MAAyB;QACjD,MAAM,SAAS,GAAG,IAAI,IAAI,EAAE,CAAC;QAC7B,MAAM,YAAY,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC;YACxC,KAAK,EAAE,MAAM,CAAC,KAAK;YACnB,QAAQ,EAAE,MAAM,CAAC,QAAQ;SAC5B,CAAC,CAAC;QACH,MAAM,OAAO,GAAG,IAAI,IAAI,EAAE,CAAC;QAE3B,IAAI,OAAO,GAAuB,YAAY,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,MAAoC,EAAE,EAAE;YAChG,MAAM,GAAG,GAAqB;gBAC1B,OAAO,EAAE;oBACL,IAAI,EAAE,WAAW;oBACjB,OAAO,EAAE,MAAM,CAAC,OAAO,CAAC,OAAO;iBAClC;gBACD,aAAa,EAAE,MAAM,CAAC,aAAa;gBACnC,KAAK,EAAE,MAAM,CAAC,KAAK;aACtB,CAAC;YACF,OAAO,GAAG,CAAC;QACf,CAAC,CAAC,CAAC;QAEH,OAAO;YACH,OAAO,EAAE,IAAI;YACb,UAAU,EAAE,IAAI;YAChB,SAAS,EAAE,SAAS;YACpB,OAAO,EAAE,OAAO;YAChB,WAAW,EAAE,OAAO,CAAC,OAAO,EAAE,GAAG,SAAS,CAAC,OAAO,EAAE;YACpD,IAAI,EAAE;gBACF,OAAO,EAAE,OAAO;gBAChB,KAAK,EAAE;oBACH,WAAW,EAAE,YAAY,CAAC,KAAK,CAAC,YAAY;oBAC5C,YAAY,EAAE,YAAY,CAAC,KAAK,CAAC,aAAa;oBAC9C,gBAAgB,EAAE,YAAY,CAAC,KAAK,CAAC,iBAAiB;iBACzD;aACJ;YACD,YAAY,EAAE,EAAE;YAChB,SAAS,EAAE,IAAI;SAClB,CAAA;IAEL,CAAC;IAEM,KAAK,CAAC,aAAa,CAAC,MAAuB;QAC9C,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;IAC/C,CAAC;IAEM,KAAK,CAAC,YAAY,CAAC,MAAsB;QAC5C,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;IAC/C,CAAC;IAEM,KAAK,CAAC,eAAe,CAAC,KAAa,EAAE,IAAY;QACpD,MAAM,QAAQ,GAAsB,MAAM,IAAI,CAAC,MAAM,CAAC,UAAU,CAAC,KAAK,EAAE,IAAI,CAAC,CAAC;QAC9E,OAAO,QAAQ,CAAC;IACpB,CAAC;IAEM,KAAK,CAAC,SAAS,CAAC,MAAmB;QACtC,MAAM,QAAQ,GAAsB,MAAM,IAAI,CAAC,MAAM,CAAC,UAAU,CAAC,MAAM,CAAC,KAAK,EAAE,MAAM,CAAC,IAAI,CAAC,CAAC;QAC5F,OAAO;YACH,MAAM,EAAE,QAAQ;YAChB,KAAK,EAAE,MAAM,CAAC,KAAK,IAAI,eAAe,EAAE,iEAAiE;YACzG,UAAU,EAAE,IAAI,eAAU,CAAC,QAAQ,CAAC,KAAK,CAAC,aAAa,EAAE,QAAQ,CAAC,KAAK,CAAC,iBAAiB,CAAC;YAC1F,IAAI,EAAE,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,SAAS;SACnC,CAAA;IACL,CAAC;IAED;;;OAGG;IACI,KAAK,CAAC,UAAU;QACnB,MAAM,kBAAkB,GAAuB,MAAM,IAAI,CAAC,MAAM,CAAC,UAAU,EAAE,CAAC;QAC9E,OAAO,kBAAkB,CAAC;IAC9B,CAAC;CACJ,CAAA;AAlFY,gCAAU;qBAAV,UAAU;IADtB,IAAA,sBAAa,EAAC,YAAO,EAAE,YAAY,CAAC;GACxB,UAAU,CAkFtB;AAED,MAAa,iBAAkB,SAAQ,eAAU;CAEhD;AAFD,8CAEC"}
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
import { ChatMessage } from '@memberjunction/ai';
|
|
2
|
+
/**
|
|
3
|
+
* A simple and lightweight client for the Mistral API
|
|
4
|
+
* @param {*} apiKey can be set as an environment variable MISTRAL_API_KEY,
|
|
5
|
+
* or provided in this parameter
|
|
6
|
+
* @param {*} endpoint defaults to https://api.mistral.ai
|
|
7
|
+
*/
|
|
8
|
+
export declare class MistralClient {
|
|
9
|
+
private RETRY_STATUS_CODES;
|
|
10
|
+
private ENDPOINT;
|
|
11
|
+
endpoint: string;
|
|
12
|
+
apiKey: string;
|
|
13
|
+
textDecoder: TextDecoder;
|
|
14
|
+
constructor(config: {
|
|
15
|
+
apiKey?: string;
|
|
16
|
+
endpoint?: string;
|
|
17
|
+
});
|
|
18
|
+
/**
|
|
19
|
+
*
|
|
20
|
+
* @param {*} method
|
|
21
|
+
* @param {*} path
|
|
22
|
+
* @param {*} request
|
|
23
|
+
* @return {Promise<T>}
|
|
24
|
+
*/
|
|
25
|
+
private request;
|
|
26
|
+
/**
|
|
27
|
+
* Creates a chat completion request
|
|
28
|
+
* @param {*} model
|
|
29
|
+
* @param {*} messages
|
|
30
|
+
* @param {*} temperature
|
|
31
|
+
* @param {*} maxTokens
|
|
32
|
+
* @param {*} topP
|
|
33
|
+
* @param {*} randomSeed
|
|
34
|
+
* @param {*} stream
|
|
35
|
+
* @param {*} safeMode
|
|
36
|
+
* @return {Promise<Object>}
|
|
37
|
+
*/
|
|
38
|
+
_makeChatCompletionRequest: (model: any, messages: any, temperature: any, maxTokens: any, topP: any, randomSeed: any, stream: any, safeMode: any) => {
|
|
39
|
+
model: any;
|
|
40
|
+
messages: any;
|
|
41
|
+
temperature: any;
|
|
42
|
+
max_tokens: any;
|
|
43
|
+
top_p: any;
|
|
44
|
+
random_seed: any;
|
|
45
|
+
stream: any;
|
|
46
|
+
safe_prompt: any;
|
|
47
|
+
};
|
|
48
|
+
/**
|
|
49
|
+
* Returns a list of the available models
|
|
50
|
+
* @return {Promise<ListModelsResponse>}
|
|
51
|
+
*/
|
|
52
|
+
listModels(): Promise<ListModelsResponse>;
|
|
53
|
+
/**
|
|
54
|
+
* A chat endpoint without streaming
|
|
55
|
+
* @param {*} model the name of the model to chat with, e.g. mistral-tiny
|
|
56
|
+
* @param {*} messages an array of messages to chat with, e.g.
|
|
57
|
+
* [{role: 'user', content: 'What is the best French cheese?'}]
|
|
58
|
+
* @param {*} temperature the temperature to use for sampling, e.g. 0.5
|
|
59
|
+
* @param {*} maxTokens the maximum number of tokens to generate, e.g. 100
|
|
60
|
+
* @param {*} topP the cumulative probability of tokens to generate, e.g. 0.9
|
|
61
|
+
* @param {*} randomSeed the random seed to use for sampling, e.g. 42
|
|
62
|
+
* @param {*} safeMode whether to use safe mode, e.g. true
|
|
63
|
+
* @return {Promise<Object>}
|
|
64
|
+
*/
|
|
65
|
+
chat(params: ChatCompletetionRequest): Promise<ChatCompletionResponse>;
|
|
66
|
+
/**
|
|
67
|
+
* A chat endpoint that streams responses.
|
|
68
|
+
* @param {*} model the name of the model to chat with, e.g. mistral-tiny
|
|
69
|
+
* @param {*} messages an array of messages to chat with, e.g.
|
|
70
|
+
* [{role: 'user', content: 'What is the best French cheese?'}]
|
|
71
|
+
* @param {*} temperature the temperature to use for sampling, e.g. 0.5
|
|
72
|
+
* @param {*} maxTokens the maximum number of tokens to generate, e.g. 100
|
|
73
|
+
* @param {*} topP the cumulative probability of tokens to generate, e.g. 0.9
|
|
74
|
+
* @param {*} randomSeed the random seed to use for sampling, e.g. 42
|
|
75
|
+
* @param {*} safeMode whether to use safe mode, e.g. true
|
|
76
|
+
* @return {Promise<Object>}
|
|
77
|
+
*/
|
|
78
|
+
chatStream: ({ model, messages, temperature, maxTokens, topP, randomSeed, safeMode }: {
|
|
79
|
+
model: any;
|
|
80
|
+
messages: any;
|
|
81
|
+
temperature: any;
|
|
82
|
+
maxTokens: any;
|
|
83
|
+
topP: any;
|
|
84
|
+
randomSeed: any;
|
|
85
|
+
safeMode: any;
|
|
86
|
+
}) => AsyncGenerator<any, void, unknown>;
|
|
87
|
+
/**
|
|
88
|
+
* An embedddings endpoint that returns embeddings for a single,
|
|
89
|
+
* or batch of inputs
|
|
90
|
+
* @param {*} model The embedding model to use, e.g. mistral-embed
|
|
91
|
+
* @param {*} input The input to embed,
|
|
92
|
+
* e.g. ['What is the best French cheese?']
|
|
93
|
+
* @return {Promise<Object>}
|
|
94
|
+
*/
|
|
95
|
+
embeddings(model: string, input: string): Promise<EmbeddingResponse>;
|
|
96
|
+
}
|
|
97
|
+
export interface ModelPermission {
|
|
98
|
+
id: string;
|
|
99
|
+
object: 'model_permission';
|
|
100
|
+
created: number;
|
|
101
|
+
allow_create_engine: boolean;
|
|
102
|
+
allow_sampling: boolean;
|
|
103
|
+
allow_logprobs: boolean;
|
|
104
|
+
allow_search_indices: boolean;
|
|
105
|
+
allow_view: boolean;
|
|
106
|
+
allow_fine_tuning: boolean;
|
|
107
|
+
organization: string;
|
|
108
|
+
group: string | null;
|
|
109
|
+
is_blocking: boolean;
|
|
110
|
+
}
|
|
111
|
+
export interface Model {
|
|
112
|
+
id: string;
|
|
113
|
+
object: 'model';
|
|
114
|
+
created: number;
|
|
115
|
+
owned_by: string;
|
|
116
|
+
root: string | null;
|
|
117
|
+
parent: string | null;
|
|
118
|
+
permission: ModelPermission[];
|
|
119
|
+
}
|
|
120
|
+
export interface ListModelsResponse {
|
|
121
|
+
object: 'list';
|
|
122
|
+
data: Model[];
|
|
123
|
+
}
|
|
124
|
+
export interface TokenUsage {
|
|
125
|
+
prompt_tokens: number;
|
|
126
|
+
completion_tokens: number;
|
|
127
|
+
total_tokens: number;
|
|
128
|
+
}
|
|
129
|
+
export type ChatCompletetionRequest = {
|
|
130
|
+
model: string;
|
|
131
|
+
messages: ChatMessage[];
|
|
132
|
+
temperature?: number;
|
|
133
|
+
max_tokens?: number;
|
|
134
|
+
top_p?: number;
|
|
135
|
+
random_seed?: number;
|
|
136
|
+
stream?: boolean;
|
|
137
|
+
safe_prompt?: boolean;
|
|
138
|
+
};
|
|
139
|
+
export interface ChatCompletionResponseChoice {
|
|
140
|
+
index: number;
|
|
141
|
+
message: {
|
|
142
|
+
role: string;
|
|
143
|
+
content: string;
|
|
144
|
+
};
|
|
145
|
+
finish_reason: string;
|
|
146
|
+
}
|
|
147
|
+
export interface ChatCompletionResponseChunkChoice {
|
|
148
|
+
index: number;
|
|
149
|
+
delta: {
|
|
150
|
+
role?: string;
|
|
151
|
+
content?: string;
|
|
152
|
+
};
|
|
153
|
+
finish_reason: string;
|
|
154
|
+
}
|
|
155
|
+
export interface ChatCompletionResponse {
|
|
156
|
+
id: string;
|
|
157
|
+
object: 'chat.completion';
|
|
158
|
+
created: number;
|
|
159
|
+
model: string;
|
|
160
|
+
choices: ChatCompletionResponseChoice[];
|
|
161
|
+
usage: TokenUsage;
|
|
162
|
+
}
|
|
163
|
+
export interface ChatCompletionResponseChunk {
|
|
164
|
+
id: string;
|
|
165
|
+
object: 'chat.completion.chunk';
|
|
166
|
+
created: number;
|
|
167
|
+
model: string;
|
|
168
|
+
choices: ChatCompletionResponseChunkChoice[];
|
|
169
|
+
}
|
|
170
|
+
export interface Embedding {
|
|
171
|
+
id: string;
|
|
172
|
+
object: 'embedding';
|
|
173
|
+
embedding: number[];
|
|
174
|
+
}
|
|
175
|
+
export interface EmbeddingResponse {
|
|
176
|
+
id: string;
|
|
177
|
+
object: 'list';
|
|
178
|
+
data: Embedding[];
|
|
179
|
+
model: string;
|
|
180
|
+
usage: TokenUsage;
|
|
181
|
+
}
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.MistralClient = void 0;
|
|
7
|
+
const axios_1 = __importDefault(require("axios"));
|
|
8
|
+
const axios_retry_1 = __importDefault(require("axios-retry"));
|
|
9
|
+
//This is a 1:1 copy of the mistralAI client library
|
|
10
|
+
//but modified to work with MJ
|
|
11
|
+
//see https://github.com/mistralai/client-js
|
|
12
|
+
/**
|
|
13
|
+
* A simple and lightweight client for the Mistral API
|
|
14
|
+
* @param {*} apiKey can be set as an environment variable MISTRAL_API_KEY,
|
|
15
|
+
* or provided in this parameter
|
|
16
|
+
* @param {*} endpoint defaults to https://api.mistral.ai
|
|
17
|
+
*/
|
|
18
|
+
class MistralClient {
|
|
19
|
+
constructor(config) {
|
|
20
|
+
this.RETRY_STATUS_CODES = [429, 500, 502, 503, 504];
|
|
21
|
+
this.ENDPOINT = 'https://api.mistral.ai';
|
|
22
|
+
/**
|
|
23
|
+
* Creates a chat completion request
|
|
24
|
+
* @param {*} model
|
|
25
|
+
* @param {*} messages
|
|
26
|
+
* @param {*} temperature
|
|
27
|
+
* @param {*} maxTokens
|
|
28
|
+
* @param {*} topP
|
|
29
|
+
* @param {*} randomSeed
|
|
30
|
+
* @param {*} stream
|
|
31
|
+
* @param {*} safeMode
|
|
32
|
+
* @return {Promise<Object>}
|
|
33
|
+
*/
|
|
34
|
+
this._makeChatCompletionRequest = function (model, messages, temperature, maxTokens, topP, randomSeed, stream, safeMode) {
|
|
35
|
+
return {
|
|
36
|
+
model: model,
|
|
37
|
+
messages: messages,
|
|
38
|
+
temperature: temperature ?? undefined,
|
|
39
|
+
max_tokens: maxTokens ?? undefined,
|
|
40
|
+
top_p: topP ?? undefined,
|
|
41
|
+
random_seed: randomSeed ?? undefined,
|
|
42
|
+
stream: stream ?? undefined,
|
|
43
|
+
safe_prompt: safeMode ?? undefined,
|
|
44
|
+
};
|
|
45
|
+
};
|
|
46
|
+
/**
|
|
47
|
+
* A chat endpoint that streams responses.
|
|
48
|
+
* @param {*} model the name of the model to chat with, e.g. mistral-tiny
|
|
49
|
+
* @param {*} messages an array of messages to chat with, e.g.
|
|
50
|
+
* [{role: 'user', content: 'What is the best French cheese?'}]
|
|
51
|
+
* @param {*} temperature the temperature to use for sampling, e.g. 0.5
|
|
52
|
+
* @param {*} maxTokens the maximum number of tokens to generate, e.g. 100
|
|
53
|
+
* @param {*} topP the cumulative probability of tokens to generate, e.g. 0.9
|
|
54
|
+
* @param {*} randomSeed the random seed to use for sampling, e.g. 42
|
|
55
|
+
* @param {*} safeMode whether to use safe mode, e.g. true
|
|
56
|
+
* @return {Promise<Object>}
|
|
57
|
+
*/
|
|
58
|
+
this.chatStream = async function* ({ model, messages, temperature, maxTokens, topP, randomSeed, safeMode }) {
|
|
59
|
+
const request = this._makeChatCompletionRequest(model, messages, temperature, maxTokens, topP, randomSeed, true, safeMode);
|
|
60
|
+
const response = await this._request('post', 'v1/chat/completions', request);
|
|
61
|
+
for await (const chunk of response) {
|
|
62
|
+
const chunkString = this.textDecoder.decode(chunk);
|
|
63
|
+
// split the chunks by new line
|
|
64
|
+
const chunkLines = chunkString.split('\n');
|
|
65
|
+
// Iterate through the lines
|
|
66
|
+
for (const chunkLine of chunkLines) {
|
|
67
|
+
// If the line starts with data: then it is a chunk
|
|
68
|
+
if (chunkLine.startsWith('data:')) {
|
|
69
|
+
const chunkData = chunkLine.substring(6).trim();
|
|
70
|
+
if (chunkData !== '[DONE]') {
|
|
71
|
+
yield JSON.parse(chunkData);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
};
|
|
77
|
+
this.endpoint = config.endpoint || this.ENDPOINT;
|
|
78
|
+
this.apiKey = config.apiKey || process.env.MISTRAL_API_KEY;
|
|
79
|
+
this.textDecoder = new TextDecoder();
|
|
80
|
+
(0, axios_retry_1.default)(axios_1.default, {
|
|
81
|
+
retries: 3,
|
|
82
|
+
retryCondition: (error) => {
|
|
83
|
+
return this.RETRY_STATUS_CODES.includes(error.response.status);
|
|
84
|
+
},
|
|
85
|
+
retryDelay: (retryCount, error) => {
|
|
86
|
+
console.debug(`retry attempt: ${retryCount}`, error);
|
|
87
|
+
return retryCount * 500;
|
|
88
|
+
},
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
*
|
|
93
|
+
* @param {*} method
|
|
94
|
+
* @param {*} path
|
|
95
|
+
* @param {*} request
|
|
96
|
+
* @return {Promise<T>}
|
|
97
|
+
*/
|
|
98
|
+
async request(method, path, request) {
|
|
99
|
+
const response = await (0, axios_1.default)({
|
|
100
|
+
method: method,
|
|
101
|
+
url: `${this.endpoint}/${path}`,
|
|
102
|
+
data: request || {},
|
|
103
|
+
headers: {
|
|
104
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
105
|
+
},
|
|
106
|
+
responseType: request?.stream ? 'stream' : 'json',
|
|
107
|
+
}).catch((error) => {
|
|
108
|
+
console.error(error);
|
|
109
|
+
return error.response;
|
|
110
|
+
});
|
|
111
|
+
return response.data;
|
|
112
|
+
}
|
|
113
|
+
/**
|
|
114
|
+
* Returns a list of the available models
|
|
115
|
+
* @return {Promise<ListModelsResponse>}
|
|
116
|
+
*/
|
|
117
|
+
async listModels() {
|
|
118
|
+
const response = await this.request('get', 'v1/models');
|
|
119
|
+
return response;
|
|
120
|
+
}
|
|
121
|
+
/**
|
|
122
|
+
* A chat endpoint without streaming
|
|
123
|
+
* @param {*} model the name of the model to chat with, e.g. mistral-tiny
|
|
124
|
+
* @param {*} messages an array of messages to chat with, e.g.
|
|
125
|
+
* [{role: 'user', content: 'What is the best French cheese?'}]
|
|
126
|
+
* @param {*} temperature the temperature to use for sampling, e.g. 0.5
|
|
127
|
+
* @param {*} maxTokens the maximum number of tokens to generate, e.g. 100
|
|
128
|
+
* @param {*} topP the cumulative probability of tokens to generate, e.g. 0.9
|
|
129
|
+
* @param {*} randomSeed the random seed to use for sampling, e.g. 42
|
|
130
|
+
* @param {*} safeMode whether to use safe mode, e.g. true
|
|
131
|
+
* @return {Promise<Object>}
|
|
132
|
+
*/
|
|
133
|
+
async chat(params) {
|
|
134
|
+
const response = await this.request('post', 'v1/chat/completions', params);
|
|
135
|
+
return response;
|
|
136
|
+
}
|
|
137
|
+
/**
|
|
138
|
+
* An embedddings endpoint that returns embeddings for a single,
|
|
139
|
+
* or batch of inputs
|
|
140
|
+
* @param {*} model The embedding model to use, e.g. mistral-embed
|
|
141
|
+
* @param {*} input The input to embed,
|
|
142
|
+
* e.g. ['What is the best French cheese?']
|
|
143
|
+
* @return {Promise<Object>}
|
|
144
|
+
*/
|
|
145
|
+
async embeddings(model, input) {
|
|
146
|
+
const request = {
|
|
147
|
+
model: model,
|
|
148
|
+
input: input,
|
|
149
|
+
};
|
|
150
|
+
const response = await this.request('post', 'v1/embeddings', request);
|
|
151
|
+
return response;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
exports.MistralClient = MistralClient;
|
|
155
|
+
//# sourceMappingURL=mistralClient.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"mistralClient.js","sourceRoot":"","sources":["../../src/models/mistralClient.ts"],"names":[],"mappings":";;;;;;AACA,kDAA0B;AAC1B,8DAAqC;AAErC,oDAAoD;AACpD,8BAA8B;AAC9B,6CAA6C;AAE7C;;;;;GAKG;AACH,MAAa,aAAa;IAStB,YAAY,MAA4C;QAPhD,uBAAkB,GAAG,CAAC,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,CAAC,CAAC;QAC/C,aAAQ,GAAG,wBAAwB,CAAC;QAiD9C;;;;;;;;;;;WAWG;QACH,+BAA0B,GAAG,UAC3B,KAAK,EACL,QAAQ,EACR,WAAW,EACX,SAAS,EACT,IAAI,EACJ,UAAU,EACV,MAAM,EACN,QAAQ;YAER,OAAO;gBACL,KAAK,EAAE,KAAK;gBACZ,QAAQ,EAAE,QAAQ;gBAClB,WAAW,EAAE,WAAW,IAAI,SAAS;gBACrC,UAAU,EAAE,SAAS,IAAI,SAAS;gBAClC,KAAK,EAAE,IAAI,IAAI,SAAS;gBACxB,WAAW,EAAE,UAAU,IAAI,SAAS;gBACpC,MAAM,EAAE,MAAM,IAAI,SAAS;gBAC3B,WAAW,EAAE,QAAQ,IAAI,SAAS;aACnC,CAAC;QACJ,CAAC,CAAC;QAgCF;;;;;;;;;;;WAWG;QACH,eAAU,GAAG,KAAK,SAAS,CAAC,EAAE,EAC5B,KAAK,EACL,QAAQ,EACR,WAAW,EACX,SAAS,EACT,IAAI,EACJ,UAAU,EACV,QAAQ,EAAC;YACT,MAAM,OAAO,GAAG,IAAI,CAAC,0BAA0B,CAC7C,KAAK,EACL,QAAQ,EACR,WAAW,EACX,SAAS,EACT,IAAI,EACJ,UAAU,EACV,IAAI,EACJ,QAAQ,CACT,CAAC;YACF,MAAM,QAAQ,GAAG,MAAM,IAAI,CAAC,QAAQ,CAClC,MAAM,EAAE,qBAAqB,EAAE,OAAO,CACvC,CAAC;YAEF,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,QAAQ,EAAE,CAAC;gBACnC,MAAM,WAAW,GAAG,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;gBACnD,+BAA+B;gBAC/B,MAAM,UAAU,GAAG,WAAW,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;gBAC3C,4BAA4B;gBAC5B,KAAK,MAAM,SAAS,IAAI,UAAU,EAAE,CAAC;oBACnC,mDAAmD;oBACnD,IAAI,SAAS,CAAC,UAAU,CAAC,OAAO,CAAC,EAAE,CAAC;wBAClC,MAAM,SAAS,GAAG,SAAS,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAC;wBAChD,IAAI,SAAS,KAAK,QAAQ,EAAE,CAAC;4BAC3B,MAAM,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC;wBAC9B,CAAC;oBACH,CAAC;gBACH,CAAC;YACH,CAAC;QACH,CAAC,CAAC;QA3JI,IAAI,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,IAAI,IAAI,CAAC,QAAQ,CAAC;QACjD,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,IAAI,OAAO,CAAC,GAAG,CAAC,eAAe,CAAC;QAE3D,IAAI,CAAC,WAAW,GAAG,IAAI,WAAW,EAAE,CAAC;QAErC,IAAA,qBAAU,EAAC,eAAK,EAAE;YAClB,OAAO,EAAE,CAAC;YACV,cAAc,EAAE,CAAC,KAAK,EAAE,EAAE;gBACtB,OAAO,IAAI,CAAC,kBAAkB,CAAC,QAAQ,CAAC,KAAK,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC;YACnE,CAAC;YAED,UAAU,EAAE,CAAC,UAAU,EAAE,KAAK,EAAE,EAAE;gBAC9B,OAAO,CAAC,KAAK,CAAC,kBAAkB,UAAU,EAAE,EAAE,KAAK,CAAC,CAAC;gBACrD,OAAO,UAAU,GAAG,GAAG,CAAC;YAC5B,CAAC;SACA,CAAC,CAAC;IACP,CAAC;IAEH;;;;;;OAMG;IACK,KAAK,CAAC,OAAO,CAAI,MAAc,EAAE,IAAY,EAAE,OAAa;QAElE,MAAM,QAAQ,GAAG,MAAM,IAAA,eAAK,EAAC;YACzB,MAAM,EAAE,MAAM;YACd,GAAG,EAAE,GAAG,IAAI,CAAC,QAAQ,IAAI,IAAI,EAAE;YAC/B,IAAI,EAAE,OAAO,IAAI,EAAE;YACnB,OAAO,EAAE;gBACP,eAAe,EAAE,UAAU,IAAI,CAAC,MAAM,EAAE;aACzC;YACD,YAAY,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM;SAClD,CAAC,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,EAAE;YACjB,OAAO,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC;YACrB,OAAO,KAAK,CAAC,QAAQ,CAAC;QACxB,CAAC,CAAC,CAAC;QACH,OAAO,QAAQ,CAAC,IAAI,CAAC;IACzB,CAAC;IAqCD;;;OAGG;IACI,KAAK,CAAC,UAAU;QACrB,MAAM,QAAQ,GAAuB,MAAM,IAAI,CAAC,OAAO,CAAC,KAAK,EAAE,WAAW,CAAC,CAAC;QAC5E,OAAO,QAAQ,CAAC;IAClB,CAAC;IAED;;;;;;;;;;;OAWG;IACI,KAAK,CAAC,IAAI,CAAC,MAA+B;QAC/C,MAAM,QAAQ,GAA2B,MAAM,IAAI,CAAC,OAAO,CACzD,MAAM,EAAE,qBAAqB,EAAE,MAAM,CACtC,CAAC;QAEF,OAAO,QAAQ,CAAC;IAClB,CAAC;IAqDD;;;;;;;OAOG;IACI,KAAK,CAAC,UAAU,CAAC,KAAa,EAAE,KAAa;QAClD,MAAM,OAAO,GAAG;YACZ,KAAK,EAAE,KAAK;YACZ,KAAK,EAAE,KAAK;SACb,CAAC;QACF,MAAM,QAAQ,GAAsB,MAAM,IAAI,CAAC,OAAO,CACpD,MAAM,EAAE,eAAe,EAAE,OAAO,CACjC,CAAC;QACF,OAAO,QAAQ,CAAC;IACpB,CAAC;CACF;AAzLD,sCAyLC"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@memberjunction/ai-mistral",
|
|
3
|
-
"version": "0.9.
|
|
3
|
+
"version": "0.9.14",
|
|
4
4
|
"description": "MemberJunction Wrapper for Mistral AI's AI Models",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|
|
@@ -16,11 +16,10 @@
|
|
|
16
16
|
"license": "ISC",
|
|
17
17
|
"devDependencies": {
|
|
18
18
|
"ts-node-dev": "^2.0.0",
|
|
19
|
-
"typescript": "^5.3.3"
|
|
20
|
-
"openai": "^3.2.1"
|
|
19
|
+
"typescript": "^5.3.3"
|
|
21
20
|
},
|
|
22
21
|
"dependencies": {
|
|
23
|
-
"@memberjunction/ai": "^0.9.
|
|
24
|
-
"@memberjunction/global": "^0.9.
|
|
22
|
+
"@memberjunction/ai": "^0.9.159",
|
|
23
|
+
"@memberjunction/global": "^0.9.152"
|
|
25
24
|
}
|
|
26
25
|
}
|