openlayer 0.1.34 → 0.1.35
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/cli/cliHandler.d.ts +6 -0
- package/dist/cli/cliHandler.js +76 -0
- package/dist/examples/openai_dev_mode.d.ts +10 -0
- package/dist/examples/openai_dev_mode.js +55 -0
- package/dist/lib/cli/cliHandler.d.ts +6 -0
- package/dist/lib/cli/cliHandler.js +76 -0
- package/dist/lib/index.d.ts +236 -0
- package/dist/lib/index.js +608 -0
- package/dist/lib/utils/request.d.ts +5 -0
- package/dist/lib/utils/request.js +34 -0
- package/dist/lib/utils/run.d.ts +13 -0
- package/dist/lib/utils/run.js +2 -0
- package/dist/utils/run.d.ts +13 -0
- package/dist/utils/run.js +2 -0
- package/examples/openai_dev_mode.ts +58 -0
- package/package.json +1 -1
- package/static/logo-purple-text.svg +14 -0
- package/tsconfig.json +1 -0
- package/static/logo.png +0 -0
package/README.md
CHANGED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/*
|
|
3
|
+
* Description: This file contains the CLIHandler class which is responsible
|
|
4
|
+
* For handling the CLI input and output.
|
|
5
|
+
*
|
|
6
|
+
* Example Usage:
|
|
7
|
+
* // Initialize CLI handler with the user's model run method
|
|
8
|
+
* const cliHandler = new CLIHandler(model.run.bind(model));
|
|
9
|
+
*
|
|
10
|
+
* // Setup CLI and process dataset
|
|
11
|
+
* cliHandler.runFromCLI();
|
|
12
|
+
*/
|
|
13
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
14
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
15
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
16
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
17
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
18
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
19
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
20
|
+
});
|
|
21
|
+
};
|
|
22
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
23
|
+
exports.CLIHandler = void 0;
|
|
24
|
+
const commander_1 = require("commander");
|
|
25
|
+
const fs = require("fs");
|
|
26
|
+
const path = require("path");
|
|
27
|
+
class CLIHandler {
|
|
28
|
+
constructor(runFunction) {
|
|
29
|
+
this.run = runFunction;
|
|
30
|
+
}
|
|
31
|
+
runFromCLI() {
|
|
32
|
+
commander_1.program
|
|
33
|
+
.requiredOption('--dataset-path <path>', 'Path to the dataset')
|
|
34
|
+
.requiredOption('--output-dir <path>', 'Directory to place results');
|
|
35
|
+
commander_1.program.parse(process.argv);
|
|
36
|
+
const options = commander_1.program.opts();
|
|
37
|
+
const { datasetPath, outputDir } = options;
|
|
38
|
+
// Load dataset
|
|
39
|
+
const datasetFullPath = path.resolve(datasetPath);
|
|
40
|
+
const rawData = fs.readFileSync(datasetFullPath, 'utf8');
|
|
41
|
+
const dataset = JSON.parse(rawData);
|
|
42
|
+
// Process each item in the dataset dynamically
|
|
43
|
+
Promise.all(dataset.map((item) => __awaiter(this, void 0, void 0, function* () {
|
|
44
|
+
const result = yield this.run(item);
|
|
45
|
+
// Merge the original item fields with the result
|
|
46
|
+
return Object.assign(Object.assign(Object.assign({}, item), result.otherFields), { output: result.output });
|
|
47
|
+
})))
|
|
48
|
+
.then((results) => {
|
|
49
|
+
/*
|
|
50
|
+
* Wait for all rows to be run
|
|
51
|
+
* Write results now to output dir or log to console
|
|
52
|
+
*/
|
|
53
|
+
this.writeOutput(results, outputDir);
|
|
54
|
+
console.log('Results processing completed. Check console for output.');
|
|
55
|
+
})
|
|
56
|
+
.catch((err) => {
|
|
57
|
+
console.error(`Error processing dataset: ${err}`);
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
writeOutput(results, outputDir) {
|
|
61
|
+
const config = {
|
|
62
|
+
metadata: { outputTimestamp: Date.now() },
|
|
63
|
+
outputColumnName: 'output',
|
|
64
|
+
};
|
|
65
|
+
// Construct an output directory {outputDir}/{datasetName}/
|
|
66
|
+
const outputDirPath = path.resolve(outputDir);
|
|
67
|
+
fs.mkdirSync(outputDirPath, { recursive: true });
|
|
68
|
+
const datasetPath = path.join(outputDirPath, 'dataset.json');
|
|
69
|
+
const configPath = path.join(outputDirPath, 'config.json');
|
|
70
|
+
fs.writeFileSync(datasetPath, JSON.stringify(results, null, 4), 'utf8');
|
|
71
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4), 'utf8');
|
|
72
|
+
console.log(`Output written to ${datasetPath}`);
|
|
73
|
+
console.log(`Config written to ${configPath}`);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
exports.CLIHandler = CLIHandler;
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/*
|
|
3
|
+
* This shows how to use the OpenAI monitor with Openlayer to create a CLI handler
|
|
4
|
+
* for processing datasets. The script can be called with the following command:
|
|
5
|
+
* node dist/run.js --dataset-path {{ path }} --dataset-name {{ name }}
|
|
6
|
+
*/
|
|
7
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
8
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
9
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
10
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
11
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
12
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
13
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
14
|
+
});
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
exports.MyModel = void 0;
|
|
18
|
+
const openlayer_1 = require("openlayer");
|
|
19
|
+
const cliHandler_1 = require("openlayer/cli/cliHandler");
|
|
20
|
+
class MyModel {
|
|
21
|
+
constructor() {
|
|
22
|
+
this.openaiApiKey = process.env.OPENAI_API_KEY || '';
|
|
23
|
+
this.openlayerApiKey = process.env.OPENLAYER_API_KEY || '';
|
|
24
|
+
const openlayerProjectName = process.env.OPENLAYER_PROJECT_NAME || '';
|
|
25
|
+
this.monitor = new openlayer_1.OpenAIMonitor({
|
|
26
|
+
openAiApiKey: this.openaiApiKey,
|
|
27
|
+
openlayerApiKey: this.openlayerApiKey,
|
|
28
|
+
openlayerProjectName,
|
|
29
|
+
});
|
|
30
|
+
}
|
|
31
|
+
run({ userQuery }) {
|
|
32
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
33
|
+
// Implement the model run logic here
|
|
34
|
+
const model = 'gpt-3.5-turbo';
|
|
35
|
+
const response = yield this.monitor.createChatCompletion({
|
|
36
|
+
messages: [
|
|
37
|
+
{
|
|
38
|
+
content: userQuery,
|
|
39
|
+
role: 'user',
|
|
40
|
+
},
|
|
41
|
+
],
|
|
42
|
+
model,
|
|
43
|
+
}, undefined);
|
|
44
|
+
const result = response.choices[0].message.content;
|
|
45
|
+
return { otherFields: { model }, output: result };
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
exports.MyModel = MyModel;
|
|
50
|
+
// User implements their model
|
|
51
|
+
const model = new MyModel();
|
|
52
|
+
// Initialize CLI handler with the user's model run method
|
|
53
|
+
const cliHandler = new cliHandler_1.CLIHandler(model.run.bind(model));
|
|
54
|
+
// Setup CLI and process dataset
|
|
55
|
+
cliHandler.runFromCLI();
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/*
|
|
3
|
+
* Description: This file contains the CLIHandler class which is responsible
|
|
4
|
+
* For handling the CLI input and output.
|
|
5
|
+
*
|
|
6
|
+
* Example Usage:
|
|
7
|
+
* // Initialize CLI handler with the user's model run method
|
|
8
|
+
* const cliHandler = new CLIHandler(model.run.bind(model));
|
|
9
|
+
*
|
|
10
|
+
* // Setup CLI and process dataset
|
|
11
|
+
* cliHandler.runFromCLI();
|
|
12
|
+
*/
|
|
13
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
14
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
15
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
16
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
17
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
18
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
19
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
20
|
+
});
|
|
21
|
+
};
|
|
22
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
23
|
+
exports.CLIHandler = void 0;
|
|
24
|
+
const commander_1 = require("commander");
|
|
25
|
+
const fs = require("fs");
|
|
26
|
+
const path = require("path");
|
|
27
|
+
class CLIHandler {
|
|
28
|
+
constructor(runFunction) {
|
|
29
|
+
this.run = runFunction;
|
|
30
|
+
}
|
|
31
|
+
runFromCLI() {
|
|
32
|
+
commander_1.program
|
|
33
|
+
.requiredOption('--dataset-path <path>', 'Path to the dataset')
|
|
34
|
+
.requiredOption('--output-dir <path>', 'Directory to place results');
|
|
35
|
+
commander_1.program.parse(process.argv);
|
|
36
|
+
const options = commander_1.program.opts();
|
|
37
|
+
const { datasetPath, outputDir } = options;
|
|
38
|
+
// Load dataset
|
|
39
|
+
const datasetFullPath = path.resolve(datasetPath);
|
|
40
|
+
const rawData = fs.readFileSync(datasetFullPath, 'utf8');
|
|
41
|
+
const dataset = JSON.parse(rawData);
|
|
42
|
+
// Process each item in the dataset dynamically
|
|
43
|
+
Promise.all(dataset.map((item) => __awaiter(this, void 0, void 0, function* () {
|
|
44
|
+
const result = yield this.run(item);
|
|
45
|
+
// Merge the original item fields with the result
|
|
46
|
+
return Object.assign(Object.assign(Object.assign({}, item), result.otherFields), { output: result.output });
|
|
47
|
+
})))
|
|
48
|
+
.then((results) => {
|
|
49
|
+
/*
|
|
50
|
+
* Wait for all rows to be run
|
|
51
|
+
* Write results now to output dir or log to console
|
|
52
|
+
*/
|
|
53
|
+
this.writeOutput(results, outputDir);
|
|
54
|
+
console.log('Results processing completed. Check console for output.');
|
|
55
|
+
})
|
|
56
|
+
.catch((err) => {
|
|
57
|
+
console.error(`Error processing dataset: ${err}`);
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
writeOutput(results, outputDir) {
|
|
61
|
+
const config = {
|
|
62
|
+
metadata: { outputTimestamp: Date.now() },
|
|
63
|
+
outputColumnName: 'output',
|
|
64
|
+
};
|
|
65
|
+
// Construct an output directory {outputDir}/{datasetName}/
|
|
66
|
+
const outputDirPath = path.resolve(outputDir);
|
|
67
|
+
fs.mkdirSync(outputDirPath, { recursive: true });
|
|
68
|
+
const datasetPath = path.join(outputDirPath, 'dataset.json');
|
|
69
|
+
const configPath = path.join(outputDirPath, 'config.json');
|
|
70
|
+
fs.writeFileSync(datasetPath, JSON.stringify(results, null, 4), 'utf8');
|
|
71
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4), 'utf8');
|
|
72
|
+
console.log(`Output written to ${datasetPath}`);
|
|
73
|
+
console.log(`Config written to ${configPath}`);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
exports.CLIHandler = CLIHandler;
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
import { RequestOptions } from 'openai/core';
|
|
2
|
+
import { ChatCompletion, ChatCompletionChunk, ChatCompletionCreateParams, ChatCompletionMessageParam, Completion, CompletionCreateParams } from 'openai/resources';
|
|
3
|
+
import { Run } from 'openai/resources/beta/threads/runs/runs';
|
|
4
|
+
import { Stream } from 'openai/streaming';
|
|
5
|
+
/**
|
|
6
|
+
* Represents the data structure for a chat completion.
|
|
7
|
+
* Object keys represent a column name and the values represent the column value.
|
|
8
|
+
*/
|
|
9
|
+
export interface StreamingData {
|
|
10
|
+
[columnName: string]: any;
|
|
11
|
+
/**
|
|
12
|
+
* The total estimated cost of the chat completion in USD. Optional.
|
|
13
|
+
*/
|
|
14
|
+
cost?: number;
|
|
15
|
+
/**
|
|
16
|
+
* The latency of the chat completion in milliseconds. Optional.
|
|
17
|
+
*/
|
|
18
|
+
latency?: number;
|
|
19
|
+
/**
|
|
20
|
+
* The output string generated by the chat completion.
|
|
21
|
+
*/
|
|
22
|
+
output: string;
|
|
23
|
+
/**
|
|
24
|
+
* A timestamp representing when the chat completion occurred. Optional.
|
|
25
|
+
*/
|
|
26
|
+
timestamp?: number;
|
|
27
|
+
/**
|
|
28
|
+
* The number of tokens used in the chat completion. Optional.
|
|
29
|
+
*/
|
|
30
|
+
tokens?: number;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Configuration settings for uploading chat completion data to Openlayer.
|
|
34
|
+
*/
|
|
35
|
+
interface StreamingDataConfig {
|
|
36
|
+
/**
|
|
37
|
+
* The name of the column that stores the request cost data. Can be null.
|
|
38
|
+
*/
|
|
39
|
+
costColumnName: string | null;
|
|
40
|
+
/**
|
|
41
|
+
* The name of the column that stores the ground truth data. Can be null.
|
|
42
|
+
*/
|
|
43
|
+
groundTruthColumnName: string | null;
|
|
44
|
+
/**
|
|
45
|
+
* The name of the column that stores inference IDs. Can be null.
|
|
46
|
+
*/
|
|
47
|
+
inferenceIdColumnName: string | null;
|
|
48
|
+
/**
|
|
49
|
+
* An array of names for input variable columns. Can be null.
|
|
50
|
+
*/
|
|
51
|
+
inputVariableNames?: string[] | null;
|
|
52
|
+
/**
|
|
53
|
+
* The name of the column that stores latency data. Can be null.
|
|
54
|
+
*/
|
|
55
|
+
latencyColumnName: string | null;
|
|
56
|
+
/**
|
|
57
|
+
* The name of the column that stores the number of tokens. Can be null.
|
|
58
|
+
*/
|
|
59
|
+
numOfTokenColumnName: string | null;
|
|
60
|
+
/**
|
|
61
|
+
* The name of the column that stores output data. Can be null.
|
|
62
|
+
*/
|
|
63
|
+
outputColumnName: string | null;
|
|
64
|
+
/**
|
|
65
|
+
* The full prompt history for the chat completion.
|
|
66
|
+
*/
|
|
67
|
+
prompt?: ChatCompletionMessageParam[];
|
|
68
|
+
/**
|
|
69
|
+
* The name of the column that stores timestamp data. Can be null.
|
|
70
|
+
*/
|
|
71
|
+
timestampColumnName: string | null;
|
|
72
|
+
}
|
|
73
|
+
type OpenlayerClientConstructorProps = {
|
|
74
|
+
openlayerApiKey?: string;
|
|
75
|
+
openlayerServerUrl?: string;
|
|
76
|
+
};
|
|
77
|
+
type OpenAIMonitorConstructorProps = OpenlayerClientConstructorProps & {
|
|
78
|
+
openAiApiKey: string;
|
|
79
|
+
openlayerInferencePipelineId?: string;
|
|
80
|
+
openlayerInferencePipelineName?: string;
|
|
81
|
+
openlayerProjectName?: string;
|
|
82
|
+
};
|
|
83
|
+
type OpenlayerInferencePipeline = {
|
|
84
|
+
dataVolumeGraphs?: OpenlayerSampleVolumeGraph;
|
|
85
|
+
dateCreated: string;
|
|
86
|
+
dateLastEvaluated?: string;
|
|
87
|
+
dateLastSampleReceived?: string;
|
|
88
|
+
dateOfNextEvaluation?: string;
|
|
89
|
+
dateUpdated: string;
|
|
90
|
+
description?: string;
|
|
91
|
+
failingGoalCount: number;
|
|
92
|
+
id: string;
|
|
93
|
+
name: string;
|
|
94
|
+
passingGoalCount: number;
|
|
95
|
+
projectId: string;
|
|
96
|
+
status: OpenlayerInferencePipelineStatus;
|
|
97
|
+
statusMessage?: string;
|
|
98
|
+
totalGoalCount: number;
|
|
99
|
+
};
|
|
100
|
+
type OpenlayerInferencePipelineStatus = 'completed' | 'failed' | 'paused' | 'queued' | 'running' | 'unknown';
|
|
101
|
+
type OpenlayerProject = {
|
|
102
|
+
dateCreated: string;
|
|
103
|
+
dateUpdated: string;
|
|
104
|
+
description?: string;
|
|
105
|
+
developmentGoalCount: number;
|
|
106
|
+
goalCount: number;
|
|
107
|
+
id: string;
|
|
108
|
+
inferencePipelineCount: number;
|
|
109
|
+
memberIds: string[];
|
|
110
|
+
monitoringGoalCount: number;
|
|
111
|
+
name: string;
|
|
112
|
+
sample?: boolean;
|
|
113
|
+
slackChannelId?: string;
|
|
114
|
+
slackChannelName?: string;
|
|
115
|
+
slackChannelNotificationsEnabled: boolean;
|
|
116
|
+
taskType: OpenlayerTaskType;
|
|
117
|
+
unreadNotificationCount: number;
|
|
118
|
+
versionCount: number;
|
|
119
|
+
};
|
|
120
|
+
type OpenlayerSampleVolumeGraphBucket = {
|
|
121
|
+
title: string;
|
|
122
|
+
xAxis: {
|
|
123
|
+
data: string[];
|
|
124
|
+
title: string;
|
|
125
|
+
};
|
|
126
|
+
yAxis: {
|
|
127
|
+
data: number[];
|
|
128
|
+
title: string;
|
|
129
|
+
};
|
|
130
|
+
};
|
|
131
|
+
type OpenlayerSampleVolumeGraph = {
|
|
132
|
+
daily: OpenlayerSampleVolumeGraphBucket;
|
|
133
|
+
hourly: OpenlayerSampleVolumeGraphBucket;
|
|
134
|
+
monthly: OpenlayerSampleVolumeGraphBucket;
|
|
135
|
+
weekly: OpenlayerSampleVolumeGraphBucket;
|
|
136
|
+
};
|
|
137
|
+
type OpenlayerTaskType = 'llm-base' | 'tabular-classification' | 'tabular-regression' | 'text-classification';
|
|
138
|
+
export declare class OpenlayerClient {
|
|
139
|
+
private openlayerApiKey?;
|
|
140
|
+
defaultConfig: StreamingDataConfig;
|
|
141
|
+
private openlayerServerUrl;
|
|
142
|
+
private version;
|
|
143
|
+
/**
|
|
144
|
+
* Constructs an OpenlayerClient instance.
|
|
145
|
+
* @param {OpenlayerClientConstructorProps} props - The config for the Openlayer client. The API key is required.
|
|
146
|
+
*/
|
|
147
|
+
constructor({ openlayerApiKey, openlayerServerUrl, }: OpenlayerClientConstructorProps);
|
|
148
|
+
private resolvedQuery;
|
|
149
|
+
/**
|
|
150
|
+
* Creates a new inference pipeline in Openlayer or loads an existing one.
|
|
151
|
+
* @param {string} projectId - The ID of the project containing the inference pipeline.
|
|
152
|
+
* @param {string} [name='production'] - The name of the inference pipeline, defaults to 'production'.
|
|
153
|
+
* @returns {Promise<OpenlayerInferencePipeline>} A promise that resolves to an OpenlayerInferencePipeline object.
|
|
154
|
+
* @throws {Error} Throws an error if the inference pipeline cannot be created or found.
|
|
155
|
+
*/
|
|
156
|
+
createInferencePipeline: (projectId: string, name?: string) => Promise<OpenlayerInferencePipeline>;
|
|
157
|
+
/**
|
|
158
|
+
* Creates a new project in Openlayer or loads an existing one.
|
|
159
|
+
* @param {string} name - The name of the project.
|
|
160
|
+
* @param {OpenlayerTaskType} taskType - The type of task associated with the project.
|
|
161
|
+
* @param {string} [description] - Optional description of the project.
|
|
162
|
+
* @returns {Promise<OpenlayerProject>} A promise that resolves to an OpenlayerProject object.
|
|
163
|
+
* @throws {Error} Throws an error if the project cannot be created or found.
|
|
164
|
+
*/
|
|
165
|
+
createProject: (name: string, taskType: OpenlayerTaskType, description?: string) => Promise<OpenlayerProject>;
|
|
166
|
+
/**
|
|
167
|
+
* Loads an existing inference pipeline from Openlayer based on its name and project ID.
|
|
168
|
+
* @param {string} projectId - The ID of the project containing the inference pipeline.
|
|
169
|
+
* @param {string} [name='production'] - The name of the inference pipeline, defaults to 'production'.
|
|
170
|
+
* @returns {Promise<OpenlayerInferencePipeline>} A promise that resolves to an OpenlayerInferencePipeline object.
|
|
171
|
+
* @throws {Error} Throws an error if the inference pipeline is not found.
|
|
172
|
+
*/
|
|
173
|
+
loadInferencePipeline: (projectId: string, name?: string) => Promise<OpenlayerInferencePipeline>;
|
|
174
|
+
/**
|
|
175
|
+
* Loads an existing project from Openlayer based on its name.
|
|
176
|
+
* @param {string} name - The name of the project.
|
|
177
|
+
* @returns {Promise<OpenlayerProject>} A promise that resolves to an OpenlayerProject object.
|
|
178
|
+
* @throws {Error} Throws an error if the project is not found.
|
|
179
|
+
*/
|
|
180
|
+
loadProject: (name: string) => Promise<OpenlayerProject>;
|
|
181
|
+
/**
|
|
182
|
+
* Streams data to the Openlayer inference pipeline.
|
|
183
|
+
* @param {StreamingData} data - The chat completion data to be streamed.
|
|
184
|
+
* @param {string} inferencePipelineId - The ID of the Openlayer inference pipeline to which data is streamed.
|
|
185
|
+
* @returns {Promise<void>} A promise that resolves when the data has been successfully streamed.
|
|
186
|
+
*/
|
|
187
|
+
streamData: (data: StreamingData, config: StreamingDataConfig, inferencePipelineId: string) => Promise<void>;
|
|
188
|
+
}
|
|
189
|
+
export declare class OpenAIMonitor {
|
|
190
|
+
private openlayerClient;
|
|
191
|
+
private openAIClient;
|
|
192
|
+
private openlayerProjectName?;
|
|
193
|
+
private openlayerInferencePipelineId?;
|
|
194
|
+
private openlayerInferencePipelineName;
|
|
195
|
+
/**
|
|
196
|
+
* Constructs an OpenAIMonitor instance.
|
|
197
|
+
* @param {OpenAIMonitorConstructorProps} props - The configuration properties for the OpenAI and Openlayer clients.
|
|
198
|
+
*/
|
|
199
|
+
constructor({ openAiApiKey, openlayerApiKey, openlayerProjectName, openlayerInferencePipelineId, openlayerInferencePipelineName, openlayerServerUrl, }: OpenAIMonitorConstructorProps);
|
|
200
|
+
private cost;
|
|
201
|
+
private chatCompletionPrompt;
|
|
202
|
+
private threadPrompt;
|
|
203
|
+
private inputVariables;
|
|
204
|
+
/**
|
|
205
|
+
* Creates a chat completion using the OpenAI client and streams the result to Openlayer.
|
|
206
|
+
* @param {ChatCompletionCreateParams} body - The parameters for creating a chat completion.
|
|
207
|
+
* @param {RequestOptions} [options] - Optional request options.
|
|
208
|
+
* @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
|
|
209
|
+
* @returns {Promise<ChatCompletion | Stream<ChatCompletionChunk>>} Promise of a ChatCompletion or a Stream
|
|
210
|
+
* @throws {Error} Throws errors from the OpenAI client.
|
|
211
|
+
*/
|
|
212
|
+
createChatCompletion: (body: ChatCompletionCreateParams, options?: RequestOptions, additionalLogs?: StreamingData) => Promise<ChatCompletion | Stream<ChatCompletionChunk>>;
|
|
213
|
+
/**
|
|
214
|
+
* Creates a completion using the OpenAI client and streams the result to Openlayer.
|
|
215
|
+
* @param {CompletionCreateParams} body - The parameters for creating a completion.
|
|
216
|
+
* @param {RequestOptions} [options] - Optional request options.
|
|
217
|
+
* @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
|
|
218
|
+
* @returns {Promise<Completion | Stream<Completion>>} Promise that resolves to a Completion or a Stream.
|
|
219
|
+
* @throws {Error} Throws errors from the OpenAI client.
|
|
220
|
+
*/
|
|
221
|
+
createCompletion: (body: CompletionCreateParams, options?: RequestOptions, additionalLogs?: StreamingData) => Promise<Completion | Stream<Completion>>;
|
|
222
|
+
/**
|
|
223
|
+
* Monitor a run from an OpenAI assistant.
|
|
224
|
+
* Once the run is completed, the thread data is published to Openlayer,
|
|
225
|
+
* along with the latency, cost, and number of tokens used.
|
|
226
|
+
* @param {Run} run - The run created by the OpenAI assistant.
|
|
227
|
+
* @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
|
|
228
|
+
* @returns {Promise<void>} A promise that resolves when the run data has been successfully published to Openlayer.
|
|
229
|
+
*/
|
|
230
|
+
monitorThreadRun(run: Run, additionalLogs?: StreamingData): Promise<void>;
|
|
231
|
+
/**
|
|
232
|
+
* Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
|
|
233
|
+
*/
|
|
234
|
+
initialize(): Promise<void>;
|
|
235
|
+
}
|
|
236
|
+
export {};
|
|
@@ -0,0 +1,608 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
var __asyncValues = (this && this.__asyncValues) || function (o) {
|
|
12
|
+
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
|
|
13
|
+
var m = o[Symbol.asyncIterator], i;
|
|
14
|
+
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
|
|
15
|
+
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
|
|
16
|
+
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
|
|
17
|
+
};
|
|
18
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
19
|
+
exports.OpenAIMonitor = exports.OpenlayerClient = void 0;
|
|
20
|
+
const node_fetch_1 = require("node-fetch");
|
|
21
|
+
const openai_1 = require("openai");
|
|
22
|
+
const uuid_1 = require("uuid");
|
|
23
|
+
const request_1 = require("./utils/request");
|
|
24
|
+
const OpenAIPricing = {
|
|
25
|
+
'babbage-002': {
|
|
26
|
+
input: 0.0004,
|
|
27
|
+
output: 0.0004,
|
|
28
|
+
},
|
|
29
|
+
'davinci-002': {
|
|
30
|
+
input: 0.002,
|
|
31
|
+
output: 0.002,
|
|
32
|
+
},
|
|
33
|
+
'gpt-3.5-turbo': {
|
|
34
|
+
input: 0.0005,
|
|
35
|
+
output: 0.0015,
|
|
36
|
+
},
|
|
37
|
+
'gpt-3.5-turbo-0125': {
|
|
38
|
+
input: 0.0005,
|
|
39
|
+
output: 0.0015,
|
|
40
|
+
},
|
|
41
|
+
'gpt-3.5-turbo-0301': {
|
|
42
|
+
input: 0.0015,
|
|
43
|
+
output: 0.002,
|
|
44
|
+
},
|
|
45
|
+
'gpt-3.5-turbo-0613': {
|
|
46
|
+
input: 0.0015,
|
|
47
|
+
output: 0.002,
|
|
48
|
+
},
|
|
49
|
+
'gpt-3.5-turbo-1106': {
|
|
50
|
+
input: 0.001,
|
|
51
|
+
output: 0.002,
|
|
52
|
+
},
|
|
53
|
+
'gpt-3.5-turbo-16k-0613': {
|
|
54
|
+
input: 0.003,
|
|
55
|
+
output: 0.004,
|
|
56
|
+
},
|
|
57
|
+
'gpt-3.5-turbo-instruct': {
|
|
58
|
+
input: 0.0015,
|
|
59
|
+
output: 0.002,
|
|
60
|
+
},
|
|
61
|
+
'gpt-4': {
|
|
62
|
+
input: 0.03,
|
|
63
|
+
output: 0.06,
|
|
64
|
+
},
|
|
65
|
+
'gpt-4-0125-preview': {
|
|
66
|
+
input: 0.01,
|
|
67
|
+
output: 0.03,
|
|
68
|
+
},
|
|
69
|
+
'gpt-4-0314': {
|
|
70
|
+
input: 0.03,
|
|
71
|
+
output: 0.06,
|
|
72
|
+
},
|
|
73
|
+
'gpt-4-0613': {
|
|
74
|
+
input: 0.03,
|
|
75
|
+
output: 0.06,
|
|
76
|
+
},
|
|
77
|
+
'gpt-4-1106-preview': {
|
|
78
|
+
input: 0.01,
|
|
79
|
+
output: 0.03,
|
|
80
|
+
},
|
|
81
|
+
'gpt-4-1106-vision-preview': {
|
|
82
|
+
input: 0.01,
|
|
83
|
+
output: 0.03,
|
|
84
|
+
},
|
|
85
|
+
'gpt-4-32k': {
|
|
86
|
+
input: 0.06,
|
|
87
|
+
output: 0.12,
|
|
88
|
+
},
|
|
89
|
+
'gpt-4-32k-0314': {
|
|
90
|
+
input: 0.06,
|
|
91
|
+
output: 0.12,
|
|
92
|
+
},
|
|
93
|
+
'gpt-4-32k-0613': {
|
|
94
|
+
input: 0.03,
|
|
95
|
+
output: 0.06,
|
|
96
|
+
},
|
|
97
|
+
};
|
|
98
|
+
class OpenlayerClient {
|
|
99
|
+
/**
|
|
100
|
+
* Constructs an OpenlayerClient instance.
|
|
101
|
+
* @param {OpenlayerClientConstructorProps} props - The config for the Openlayer client. The API key is required.
|
|
102
|
+
*/
|
|
103
|
+
constructor({ openlayerApiKey, openlayerServerUrl, }) {
|
|
104
|
+
this.defaultConfig = {
|
|
105
|
+
costColumnName: 'cost',
|
|
106
|
+
groundTruthColumnName: null,
|
|
107
|
+
inferenceIdColumnName: 'id',
|
|
108
|
+
latencyColumnName: 'latency',
|
|
109
|
+
numOfTokenColumnName: 'tokens',
|
|
110
|
+
outputColumnName: 'output',
|
|
111
|
+
timestampColumnName: 'timestamp',
|
|
112
|
+
};
|
|
113
|
+
this.openlayerServerUrl = 'https://api.openlayer.com/v1';
|
|
114
|
+
this.version = '0.1.0a21';
|
|
115
|
+
this.resolvedQuery = (endpoint, args = {}) => (0, request_1.resolvedQuery)(this.openlayerServerUrl, endpoint, args);
|
|
116
|
+
/**
|
|
117
|
+
* Creates a new inference pipeline in Openlayer or loads an existing one.
|
|
118
|
+
* @param {string} projectId - The ID of the project containing the inference pipeline.
|
|
119
|
+
* @param {string} [name='production'] - The name of the inference pipeline, defaults to 'production'.
|
|
120
|
+
* @returns {Promise<OpenlayerInferencePipeline>} A promise that resolves to an OpenlayerInferencePipeline object.
|
|
121
|
+
* @throws {Error} Throws an error if the inference pipeline cannot be created or found.
|
|
122
|
+
*/
|
|
123
|
+
this.createInferencePipeline = (projectId, name = 'production') => __awaiter(this, void 0, void 0, function* () {
|
|
124
|
+
try {
|
|
125
|
+
return yield this.loadInferencePipeline(projectId, name);
|
|
126
|
+
}
|
|
127
|
+
catch (_a) { }
|
|
128
|
+
const createInferencePipelineEndpoint = `/projects/${projectId}/inference-pipelines`;
|
|
129
|
+
const createInferencePipelineQuery = this.resolvedQuery(createInferencePipelineEndpoint, { version: this.version });
|
|
130
|
+
const createInferencePipelineResponse = yield (0, node_fetch_1.default)(createInferencePipelineQuery, {
|
|
131
|
+
body: JSON.stringify({
|
|
132
|
+
description: '',
|
|
133
|
+
name,
|
|
134
|
+
}),
|
|
135
|
+
headers: {
|
|
136
|
+
Authorization: `Bearer ${this.openlayerApiKey}`,
|
|
137
|
+
'Content-Type': 'application/json',
|
|
138
|
+
},
|
|
139
|
+
method: 'POST',
|
|
140
|
+
});
|
|
141
|
+
const inferencePipeline = (yield createInferencePipelineResponse.json());
|
|
142
|
+
if (!(inferencePipeline === null || inferencePipeline === void 0 ? void 0 : inferencePipeline.id)) {
|
|
143
|
+
throw new Error('Error creating inference pipeline');
|
|
144
|
+
}
|
|
145
|
+
return inferencePipeline;
|
|
146
|
+
});
|
|
147
|
+
/**
|
|
148
|
+
* Creates a new project in Openlayer or loads an existing one.
|
|
149
|
+
* @param {string} name - The name of the project.
|
|
150
|
+
* @param {OpenlayerTaskType} taskType - The type of task associated with the project.
|
|
151
|
+
* @param {string} [description] - Optional description of the project.
|
|
152
|
+
* @returns {Promise<OpenlayerProject>} A promise that resolves to an OpenlayerProject object.
|
|
153
|
+
* @throws {Error} Throws an error if the project cannot be created or found.
|
|
154
|
+
*/
|
|
155
|
+
this.createProject = (name, taskType, description) => __awaiter(this, void 0, void 0, function* () {
|
|
156
|
+
try {
|
|
157
|
+
return yield this.loadProject(name);
|
|
158
|
+
}
|
|
159
|
+
catch (_b) { }
|
|
160
|
+
const projectsEndpoint = '/projects';
|
|
161
|
+
const projectsQuery = this.resolvedQuery(projectsEndpoint);
|
|
162
|
+
const response = yield (0, node_fetch_1.default)(projectsQuery, {
|
|
163
|
+
body: JSON.stringify({
|
|
164
|
+
description,
|
|
165
|
+
name,
|
|
166
|
+
taskType,
|
|
167
|
+
}),
|
|
168
|
+
headers: {
|
|
169
|
+
Authorization: `Bearer ${this.openlayerApiKey}`,
|
|
170
|
+
'Content-Type': 'application/json',
|
|
171
|
+
},
|
|
172
|
+
method: 'POST',
|
|
173
|
+
});
|
|
174
|
+
const data = (yield response.json());
|
|
175
|
+
const { items: projects, error } = data;
|
|
176
|
+
if (!Array.isArray(projects)) {
|
|
177
|
+
throw new Error(typeof error === 'string' ? error : 'Invalid response from Openlayer');
|
|
178
|
+
}
|
|
179
|
+
const project = projects.find((p) => p.name === name);
|
|
180
|
+
if (!(project === null || project === void 0 ? void 0 : project.id)) {
|
|
181
|
+
throw new Error('Project not found');
|
|
182
|
+
}
|
|
183
|
+
return project;
|
|
184
|
+
});
|
|
185
|
+
/**
|
|
186
|
+
* Loads an existing inference pipeline from Openlayer based on its name and project ID.
|
|
187
|
+
* @param {string} projectId - The ID of the project containing the inference pipeline.
|
|
188
|
+
* @param {string} [name='production'] - The name of the inference pipeline, defaults to 'production'.
|
|
189
|
+
* @returns {Promise<OpenlayerInferencePipeline>} A promise that resolves to an OpenlayerInferencePipeline object.
|
|
190
|
+
* @throws {Error} Throws an error if the inference pipeline is not found.
|
|
191
|
+
*/
|
|
192
|
+
this.loadInferencePipeline = (projectId, name = 'production') => __awaiter(this, void 0, void 0, function* () {
|
|
193
|
+
const inferencePipelineEndpoint = `/projects/${projectId}/inference-pipelines`;
|
|
194
|
+
const inferencePipelineQueryParameters = {
|
|
195
|
+
name,
|
|
196
|
+
version: this.version,
|
|
197
|
+
};
|
|
198
|
+
const inferencePipelineQuery = this.resolvedQuery(inferencePipelineEndpoint, inferencePipelineQueryParameters);
|
|
199
|
+
const inferencePipelineResponse = yield (0, node_fetch_1.default)(inferencePipelineQuery, {
|
|
200
|
+
headers: {
|
|
201
|
+
Authorization: `Bearer ${this.openlayerApiKey}`,
|
|
202
|
+
'Content-Type': 'application/json',
|
|
203
|
+
},
|
|
204
|
+
method: 'GET',
|
|
205
|
+
});
|
|
206
|
+
const { items: inferencePipelines, error } = (yield inferencePipelineResponse.json());
|
|
207
|
+
const inferencePipeline = Array.isArray(inferencePipelines)
|
|
208
|
+
? inferencePipelines.find((p) => p.name === name)
|
|
209
|
+
: undefined;
|
|
210
|
+
if (!(inferencePipeline === null || inferencePipeline === void 0 ? void 0 : inferencePipeline.id)) {
|
|
211
|
+
throw new Error(typeof error === 'string' ? error : 'Inference pipeline not found');
|
|
212
|
+
}
|
|
213
|
+
return inferencePipeline;
|
|
214
|
+
});
|
|
215
|
+
/**
|
|
216
|
+
* Loads an existing project from Openlayer based on its name.
|
|
217
|
+
* @param {string} name - The name of the project.
|
|
218
|
+
* @returns {Promise<OpenlayerProject>} A promise that resolves to an OpenlayerProject object.
|
|
219
|
+
* @throws {Error} Throws an error if the project is not found.
|
|
220
|
+
*/
|
|
221
|
+
this.loadProject = (name) => __awaiter(this, void 0, void 0, function* () {
|
|
222
|
+
const projectsEndpoint = '/projects';
|
|
223
|
+
const projectsQueryParameters = {
|
|
224
|
+
name,
|
|
225
|
+
version: this.version,
|
|
226
|
+
};
|
|
227
|
+
const projectsQuery = this.resolvedQuery(projectsEndpoint, projectsQueryParameters);
|
|
228
|
+
const response = yield (0, node_fetch_1.default)(projectsQuery, {
|
|
229
|
+
headers: {
|
|
230
|
+
Authorization: `Bearer ${this.openlayerApiKey}`,
|
|
231
|
+
'Content-Type': 'application/json',
|
|
232
|
+
},
|
|
233
|
+
method: 'GET',
|
|
234
|
+
});
|
|
235
|
+
const data = yield response.json();
|
|
236
|
+
const { items: projects, error } = data;
|
|
237
|
+
if (!Array.isArray(projects)) {
|
|
238
|
+
throw new Error(typeof error === 'string' ? error : 'Invalid response from Openlayer');
|
|
239
|
+
}
|
|
240
|
+
const project = projects.find((p) => p.name === name);
|
|
241
|
+
if (!(project === null || project === void 0 ? void 0 : project.id)) {
|
|
242
|
+
throw new Error('Project not found');
|
|
243
|
+
}
|
|
244
|
+
return project;
|
|
245
|
+
});
|
|
246
|
+
/**
|
|
247
|
+
* Streams data to the Openlayer inference pipeline.
|
|
248
|
+
* @param {StreamingData} data - The chat completion data to be streamed.
|
|
249
|
+
* @param {string} inferencePipelineId - The ID of the Openlayer inference pipeline to which data is streamed.
|
|
250
|
+
* @returns {Promise<void>} A promise that resolves when the data has been successfully streamed.
|
|
251
|
+
*/
|
|
252
|
+
this.streamData = (data, config, inferencePipelineId) => __awaiter(this, void 0, void 0, function* () {
|
|
253
|
+
var _c;
|
|
254
|
+
if (!this.openlayerApiKey) {
|
|
255
|
+
console.error('Openlayer API key are required for streaming data.');
|
|
256
|
+
return;
|
|
257
|
+
}
|
|
258
|
+
try {
|
|
259
|
+
const dataStreamEndpoint = `/inference-pipelines/${inferencePipelineId}/data-stream`;
|
|
260
|
+
const dataStreamQuery = this.resolvedQuery(dataStreamEndpoint);
|
|
261
|
+
const response = yield (0, node_fetch_1.default)(dataStreamQuery, {
|
|
262
|
+
body: JSON.stringify({
|
|
263
|
+
config,
|
|
264
|
+
rows: [
|
|
265
|
+
Object.assign(Object.assign({}, data), { id: (0, uuid_1.v4)(), timestamp: Math.round(((_c = data.timestamp) !== null && _c !== void 0 ? _c : Date.now()) / 1000) }),
|
|
266
|
+
],
|
|
267
|
+
}),
|
|
268
|
+
headers: {
|
|
269
|
+
Authorization: `Bearer ${this.openlayerApiKey}`,
|
|
270
|
+
'Content-Type': 'application/json',
|
|
271
|
+
},
|
|
272
|
+
method: 'POST',
|
|
273
|
+
});
|
|
274
|
+
if (!response.ok) {
|
|
275
|
+
console.error('Error making POST request:', response.status);
|
|
276
|
+
console.error(`Error: ${response.status}`);
|
|
277
|
+
}
|
|
278
|
+
yield response.json();
|
|
279
|
+
}
|
|
280
|
+
catch (error) {
|
|
281
|
+
console.error('Error streaming data to Openlayer:', error);
|
|
282
|
+
}
|
|
283
|
+
});
|
|
284
|
+
this.openlayerApiKey = openlayerApiKey;
|
|
285
|
+
if (openlayerServerUrl) {
|
|
286
|
+
this.openlayerServerUrl = openlayerServerUrl;
|
|
287
|
+
}
|
|
288
|
+
if (!this.openlayerApiKey) {
|
|
289
|
+
console.error('Openlayer API key are required for publishing.');
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
exports.OpenlayerClient = OpenlayerClient;
|
|
294
|
+
class OpenAIMonitor {
|
|
295
|
+
/**
|
|
296
|
+
* Constructs an OpenAIMonitor instance.
|
|
297
|
+
* @param {OpenAIMonitorConstructorProps} props - The configuration properties for the OpenAI and Openlayer clients.
|
|
298
|
+
*/
|
|
299
|
+
constructor({ openAiApiKey, openlayerApiKey, openlayerProjectName, openlayerInferencePipelineId, openlayerInferencePipelineName, openlayerServerUrl, }) {
|
|
300
|
+
this.openlayerInferencePipelineName = 'production';
|
|
301
|
+
this.cost = (model, inputTokens, outputTokens) => {
|
|
302
|
+
const pricing = OpenAIPricing[model];
|
|
303
|
+
const inputCost = typeof pricing === 'undefined'
|
|
304
|
+
? undefined
|
|
305
|
+
: (inputTokens / 1000) * pricing.input;
|
|
306
|
+
const outputCost = typeof pricing === 'undefined'
|
|
307
|
+
? undefined
|
|
308
|
+
: (outputTokens / 1000) * pricing.output;
|
|
309
|
+
return typeof pricing === 'undefined'
|
|
310
|
+
? undefined
|
|
311
|
+
: (inputCost !== null && inputCost !== void 0 ? inputCost : 0) + (outputCost !== null && outputCost !== void 0 ? outputCost : 0);
|
|
312
|
+
};
|
|
313
|
+
this.chatCompletionPrompt = (fromMessages) => fromMessages.map(({ content, role }, i) => ({
|
|
314
|
+
content: role === 'user' ? `{{ message_${i} }}` : content,
|
|
315
|
+
role,
|
|
316
|
+
}));
|
|
317
|
+
this.threadPrompt = (fromMessages) => __awaiter(this, void 0, void 0, function* () {
|
|
318
|
+
var _a, e_1, _b, _c;
|
|
319
|
+
const messages = [];
|
|
320
|
+
try {
|
|
321
|
+
for (var _d = true, _e = __asyncValues(fromMessages.iterPages()), _f; _f = yield _e.next(), _a = _f.done, !_a; _d = true) {
|
|
322
|
+
_c = _f.value;
|
|
323
|
+
_d = false;
|
|
324
|
+
const page = _c;
|
|
325
|
+
messages.push(...page.getPaginatedItems());
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
329
|
+
finally {
|
|
330
|
+
try {
|
|
331
|
+
if (!_d && !_a && (_b = _e.return)) yield _b.call(_e);
|
|
332
|
+
}
|
|
333
|
+
finally { if (e_1) throw e_1.error; }
|
|
334
|
+
}
|
|
335
|
+
return messages
|
|
336
|
+
.map(({ content, role }) => content.map((item) => ({
|
|
337
|
+
content: (() => {
|
|
338
|
+
switch (item.type) {
|
|
339
|
+
case 'image_file':
|
|
340
|
+
return item.image_file.file_id;
|
|
341
|
+
case 'text':
|
|
342
|
+
default:
|
|
343
|
+
return item.text.value;
|
|
344
|
+
}
|
|
345
|
+
})(),
|
|
346
|
+
role,
|
|
347
|
+
})))
|
|
348
|
+
.flat();
|
|
349
|
+
});
|
|
350
|
+
this.inputVariables = (fromPrompt, andMessages) => {
|
|
351
|
+
const inputVariableNames = fromPrompt
|
|
352
|
+
.filter(({ role }) => role === 'user')
|
|
353
|
+
.map(({ content }) => String(content).replace(/{{\s*|\s*}}/g, ''));
|
|
354
|
+
const inputVariables = andMessages
|
|
355
|
+
.filter(({ role }) => role === 'user')
|
|
356
|
+
.map(({ content }) => content);
|
|
357
|
+
const inputVariablesMap = inputVariableNames.reduce((acc, name, i) => (Object.assign(Object.assign({}, acc), { [name]: inputVariables[i] })), {});
|
|
358
|
+
return { inputVariableNames, inputVariables, inputVariablesMap };
|
|
359
|
+
};
|
|
360
|
+
/**
|
|
361
|
+
* Creates a chat completion using the OpenAI client and streams the result to Openlayer.
|
|
362
|
+
* @param {ChatCompletionCreateParams} body - The parameters for creating a chat completion.
|
|
363
|
+
* @param {RequestOptions} [options] - Optional request options.
|
|
364
|
+
* @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
|
|
365
|
+
* @returns {Promise<ChatCompletion | Stream<ChatCompletionChunk>>} Promise of a ChatCompletion or a Stream
|
|
366
|
+
* @throws {Error} Throws errors from the OpenAI client.
|
|
367
|
+
*/
|
|
368
|
+
this.createChatCompletion = (body, options, additionalLogs) => __awaiter(this, void 0, void 0, function* () {
|
|
369
|
+
var _g, e_2, _h, _j;
|
|
370
|
+
var _k, _l, _m, _o, _p, _q, _r;
|
|
371
|
+
if (typeof this.openlayerInferencePipelineId === 'undefined') {
|
|
372
|
+
console.error('No inference pipeline found.');
|
|
373
|
+
}
|
|
374
|
+
// Start a timer to measure latency
|
|
375
|
+
const startTime = Date.now();
|
|
376
|
+
// Accumulate output for streamed responses
|
|
377
|
+
let streamedOutput = '';
|
|
378
|
+
const response = yield this.openAIClient.chat.completions.create(body, options);
|
|
379
|
+
try {
|
|
380
|
+
if (typeof this.openlayerInferencePipelineId !== 'undefined') {
|
|
381
|
+
const prompt = this.chatCompletionPrompt(body.messages);
|
|
382
|
+
const { inputVariableNames, inputVariablesMap } = this.inputVariables(prompt, body.messages);
|
|
383
|
+
const config = Object.assign(Object.assign({}, this.openlayerClient.defaultConfig), { inputVariableNames,
|
|
384
|
+
prompt });
|
|
385
|
+
if (body.stream) {
|
|
386
|
+
const streamedResponse = response;
|
|
387
|
+
try {
|
|
388
|
+
for (var _s = true, streamedResponse_1 = __asyncValues(streamedResponse), streamedResponse_1_1; streamedResponse_1_1 = yield streamedResponse_1.next(), _g = streamedResponse_1_1.done, !_g; _s = true) {
|
|
389
|
+
_j = streamedResponse_1_1.value;
|
|
390
|
+
_s = false;
|
|
391
|
+
const chunk = _j;
|
|
392
|
+
// Process each chunk - for example, accumulate input data
|
|
393
|
+
const chunkOutput = (_k = chunk.choices[0].delta.content) !== null && _k !== void 0 ? _k : '';
|
|
394
|
+
streamedOutput += chunkOutput;
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
|
398
|
+
finally {
|
|
399
|
+
try {
|
|
400
|
+
if (!_s && !_g && (_h = streamedResponse_1.return)) yield _h.call(streamedResponse_1);
|
|
401
|
+
}
|
|
402
|
+
finally { if (e_2) throw e_2.error; }
|
|
403
|
+
}
|
|
404
|
+
const endTime = Date.now();
|
|
405
|
+
const latency = endTime - startTime;
|
|
406
|
+
this.openlayerClient.streamData(Object.assign(Object.assign({ latency, output: streamedOutput, timestamp: startTime }, inputVariablesMap), additionalLogs), config, this.openlayerInferencePipelineId);
|
|
407
|
+
}
|
|
408
|
+
else {
|
|
409
|
+
const nonStreamedResponse = response;
|
|
410
|
+
// Handle regular (non-streamed) response
|
|
411
|
+
const endTime = Date.now();
|
|
412
|
+
const latency = endTime - startTime;
|
|
413
|
+
const output = nonStreamedResponse.choices[0].message.content;
|
|
414
|
+
const tokens = (_m = (_l = nonStreamedResponse.usage) === null || _l === void 0 ? void 0 : _l.total_tokens) !== null && _m !== void 0 ? _m : 0;
|
|
415
|
+
const inputTokens = (_p = (_o = nonStreamedResponse.usage) === null || _o === void 0 ? void 0 : _o.prompt_tokens) !== null && _p !== void 0 ? _p : 0;
|
|
416
|
+
const outputTokens = (_r = (_q = nonStreamedResponse.usage) === null || _q === void 0 ? void 0 : _q.completion_tokens) !== null && _r !== void 0 ? _r : 0;
|
|
417
|
+
const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
|
|
418
|
+
if (typeof output === 'string') {
|
|
419
|
+
this.openlayerClient.streamData(Object.assign(Object.assign({ cost,
|
|
420
|
+
latency, model: nonStreamedResponse.model, output, timestamp: startTime, tokens }, inputVariablesMap), additionalLogs), config, this.openlayerInferencePipelineId);
|
|
421
|
+
}
|
|
422
|
+
else {
|
|
423
|
+
console.error('No output received from OpenAI.');
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
catch (error) {
|
|
429
|
+
console.error(error);
|
|
430
|
+
}
|
|
431
|
+
return response;
|
|
432
|
+
});
|
|
433
|
+
/**
|
|
434
|
+
* Creates a completion using the OpenAI client and streams the result to Openlayer.
|
|
435
|
+
* @param {CompletionCreateParams} body - The parameters for creating a completion.
|
|
436
|
+
* @param {RequestOptions} [options] - Optional request options.
|
|
437
|
+
* @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
|
|
438
|
+
* @returns {Promise<Completion | Stream<Completion>>} Promise that resolves to a Completion or a Stream.
|
|
439
|
+
* @throws {Error} Throws errors from the OpenAI client.
|
|
440
|
+
*/
|
|
441
|
+
this.createCompletion = (body, options, additionalLogs) => __awaiter(this, void 0, void 0, function* () {
|
|
442
|
+
var _t, e_3, _u, _v;
|
|
443
|
+
var _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7;
|
|
444
|
+
if (!body.prompt) {
|
|
445
|
+
console.error('No prompt provided.');
|
|
446
|
+
}
|
|
447
|
+
if (typeof this.openlayerInferencePipelineId === 'undefined') {
|
|
448
|
+
console.error('No inference pipeline found.');
|
|
449
|
+
}
|
|
450
|
+
// Start a timer to measure latency
|
|
451
|
+
const startTime = Date.now();
|
|
452
|
+
// Accumulate output and tokens data for streamed responses
|
|
453
|
+
let streamedModel = body.model;
|
|
454
|
+
let streamedOutput = '';
|
|
455
|
+
let streamedTokens = 0;
|
|
456
|
+
let streamedInputTokens = 0;
|
|
457
|
+
let streamedOutputTokens = 0;
|
|
458
|
+
const response = yield this.openAIClient.completions.create(body, options);
|
|
459
|
+
try {
|
|
460
|
+
if (typeof this.openlayerInferencePipelineId !== 'undefined') {
|
|
461
|
+
const config = Object.assign(Object.assign({}, this.openlayerClient.defaultConfig), { inputVariableNames: ['input'] });
|
|
462
|
+
if (body.stream) {
|
|
463
|
+
const streamedResponse = response;
|
|
464
|
+
try {
|
|
465
|
+
for (var _8 = true, streamedResponse_2 = __asyncValues(streamedResponse), streamedResponse_2_1; streamedResponse_2_1 = yield streamedResponse_2.next(), _t = streamedResponse_2_1.done, !_t; _8 = true) {
|
|
466
|
+
_v = streamedResponse_2_1.value;
|
|
467
|
+
_8 = false;
|
|
468
|
+
const chunk = _v;
|
|
469
|
+
// Process each chunk - for example, accumulate input data
|
|
470
|
+
streamedModel = chunk.model;
|
|
471
|
+
streamedOutput += chunk.choices[0].text.trim();
|
|
472
|
+
streamedTokens += (_x = (_w = chunk.usage) === null || _w === void 0 ? void 0 : _w.total_tokens) !== null && _x !== void 0 ? _x : 0;
|
|
473
|
+
streamedInputTokens += (_z = (_y = chunk.usage) === null || _y === void 0 ? void 0 : _y.prompt_tokens) !== null && _z !== void 0 ? _z : 0;
|
|
474
|
+
streamedOutputTokens += (_1 = (_0 = chunk.usage) === null || _0 === void 0 ? void 0 : _0.completion_tokens) !== null && _1 !== void 0 ? _1 : 0;
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
catch (e_3_1) { e_3 = { error: e_3_1 }; }
|
|
478
|
+
finally {
|
|
479
|
+
try {
|
|
480
|
+
if (!_8 && !_t && (_u = streamedResponse_2.return)) yield _u.call(streamedResponse_2);
|
|
481
|
+
}
|
|
482
|
+
finally { if (e_3) throw e_3.error; }
|
|
483
|
+
}
|
|
484
|
+
const endTime = Date.now();
|
|
485
|
+
const latency = endTime - startTime;
|
|
486
|
+
const cost = this.cost(streamedModel, streamedInputTokens, streamedOutputTokens);
|
|
487
|
+
this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: streamedOutput, timestamp: startTime, tokens: streamedTokens }, additionalLogs), config, this.openlayerInferencePipelineId);
|
|
488
|
+
}
|
|
489
|
+
else {
|
|
490
|
+
const nonStreamedResponse = response;
|
|
491
|
+
// Handle regular (non-streamed) response
|
|
492
|
+
const endTime = Date.now();
|
|
493
|
+
const latency = endTime - startTime;
|
|
494
|
+
const tokens = (_3 = (_2 = nonStreamedResponse.usage) === null || _2 === void 0 ? void 0 : _2.total_tokens) !== null && _3 !== void 0 ? _3 : 0;
|
|
495
|
+
const inputTokens = (_5 = (_4 = nonStreamedResponse.usage) === null || _4 === void 0 ? void 0 : _4.prompt_tokens) !== null && _5 !== void 0 ? _5 : 0;
|
|
496
|
+
const outputTokens = (_7 = (_6 = nonStreamedResponse.usage) === null || _6 === void 0 ? void 0 : _6.completion_tokens) !== null && _7 !== void 0 ? _7 : 0;
|
|
497
|
+
const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
|
|
498
|
+
this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: nonStreamedResponse.choices[0].text, timestamp: startTime, tokens }, additionalLogs), config, this.openlayerInferencePipelineId);
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
}
|
|
502
|
+
catch (error) {
|
|
503
|
+
console.error(error);
|
|
504
|
+
}
|
|
505
|
+
return response;
|
|
506
|
+
});
|
|
507
|
+
this.openlayerProjectName = openlayerProjectName;
|
|
508
|
+
this.openlayerInferencePipelineId = openlayerInferencePipelineId;
|
|
509
|
+
if (openlayerInferencePipelineName) {
|
|
510
|
+
this.openlayerInferencePipelineName = openlayerInferencePipelineName;
|
|
511
|
+
}
|
|
512
|
+
this.openlayerClient = new OpenlayerClient({
|
|
513
|
+
openlayerApiKey,
|
|
514
|
+
openlayerServerUrl,
|
|
515
|
+
});
|
|
516
|
+
this.openAIClient = new openai_1.default({
|
|
517
|
+
apiKey: openAiApiKey,
|
|
518
|
+
dangerouslyAllowBrowser: true,
|
|
519
|
+
});
|
|
520
|
+
}
|
|
521
|
+
/**
|
|
522
|
+
* Monitor a run from an OpenAI assistant.
|
|
523
|
+
* Once the run is completed, the thread data is published to Openlayer,
|
|
524
|
+
* along with the latency, cost, and number of tokens used.
|
|
525
|
+
* @param {Run} run - The run created by the OpenAI assistant.
|
|
526
|
+
* @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
|
|
527
|
+
* @returns {Promise<void>} A promise that resolves when the run data has been successfully published to Openlayer.
|
|
528
|
+
*/
|
|
529
|
+
monitorThreadRun(run, additionalLogs) {
|
|
530
|
+
var _a;
|
|
531
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
532
|
+
if (run.status !== 'completed') {
|
|
533
|
+
return;
|
|
534
|
+
}
|
|
535
|
+
if (typeof this.openlayerInferencePipelineId === 'undefined') {
|
|
536
|
+
console.error('No inference pipeline found.');
|
|
537
|
+
return;
|
|
538
|
+
}
|
|
539
|
+
try {
|
|
540
|
+
const { assistant_id, completed_at, created_at, model, thread_id,
|
|
541
|
+
// @ts-ignore
|
|
542
|
+
usage, } = run;
|
|
543
|
+
// @ts-ignore
|
|
544
|
+
const { completion_tokens, prompt_tokens, total_tokens } = typeof usage === 'undefined' ||
|
|
545
|
+
typeof usage !== 'object' ||
|
|
546
|
+
usage === null
|
|
547
|
+
? {}
|
|
548
|
+
: usage;
|
|
549
|
+
const cost = this.cost(model, prompt_tokens, completion_tokens);
|
|
550
|
+
const latency = completed_at === null ||
|
|
551
|
+
created_at === null ||
|
|
552
|
+
isNaN(completed_at) ||
|
|
553
|
+
isNaN(created_at)
|
|
554
|
+
? undefined
|
|
555
|
+
: (completed_at - created_at) * 1000;
|
|
556
|
+
const messages = yield this.openAIClient.beta.threads.messages.list(thread_id, { order: 'asc' });
|
|
557
|
+
const populatedPrompt = yield this.threadPrompt(messages);
|
|
558
|
+
const prompt = this.chatCompletionPrompt(populatedPrompt);
|
|
559
|
+
const { inputVariableNames, inputVariablesMap } = this.inputVariables(prompt, populatedPrompt);
|
|
560
|
+
const config = Object.assign(Object.assign({}, this.openlayerClient.defaultConfig), { inputVariableNames, prompt: prompt.slice(0, prompt.length - 1) });
|
|
561
|
+
const output = (_a = prompt[prompt.length - 1]) === null || _a === void 0 ? void 0 : _a.content;
|
|
562
|
+
const resolvedOutput = typeof output === 'string'
|
|
563
|
+
? output
|
|
564
|
+
: typeof output === 'undefined' || output === null
|
|
565
|
+
? ''
|
|
566
|
+
: `${output}`;
|
|
567
|
+
this.openlayerClient.streamData(Object.assign(Object.assign({ cost,
|
|
568
|
+
latency, openai_assistant_id: assistant_id, openai_thread_id: thread_id, output: resolvedOutput, timestamp: run.created_at, tokens: total_tokens }, inputVariablesMap), additionalLogs), config, this.openlayerInferencePipelineId);
|
|
569
|
+
}
|
|
570
|
+
catch (error) {
|
|
571
|
+
console.error('Error logging thread run:', error);
|
|
572
|
+
}
|
|
573
|
+
});
|
|
574
|
+
}
|
|
575
|
+
/**
|
|
576
|
+
* Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
|
|
577
|
+
*/
|
|
578
|
+
initialize() {
|
|
579
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
580
|
+
console.info('Initializing monitor: creating or loading an Openlayer project and inference pipeline...');
|
|
581
|
+
if (typeof this.openlayerInferencePipelineId !== 'undefined') {
|
|
582
|
+
console.info('Monitor initialized: using inference pipeline ID provided.');
|
|
583
|
+
return;
|
|
584
|
+
}
|
|
585
|
+
try {
|
|
586
|
+
if (typeof this.openlayerProjectName === 'undefined') {
|
|
587
|
+
console.error('No project name provided.');
|
|
588
|
+
return;
|
|
589
|
+
}
|
|
590
|
+
const project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
|
|
591
|
+
if (typeof project !== 'undefined') {
|
|
592
|
+
const inferencePipeline = yield this.openlayerClient.createInferencePipeline(project.id, this.openlayerInferencePipelineName);
|
|
593
|
+
if (typeof (inferencePipeline === null || inferencePipeline === void 0 ? void 0 : inferencePipeline.id) === 'undefined') {
|
|
594
|
+
console.error('Unable to locate inference pipeline.');
|
|
595
|
+
}
|
|
596
|
+
else {
|
|
597
|
+
this.openlayerInferencePipelineId = inferencePipeline.id;
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
console.info('Monitor started');
|
|
601
|
+
}
|
|
602
|
+
catch (error) {
|
|
603
|
+
console.error('An error occurred while starting the monitor:', error);
|
|
604
|
+
}
|
|
605
|
+
});
|
|
606
|
+
}
|
|
607
|
+
}
|
|
608
|
+
exports.OpenAIMonitor = OpenAIMonitor;
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.queryParameters = exports.resolvedQuery = void 0;
|
|
4
|
+
const resolvedQuery = (baseUrl, endpoint, args = {}) => `${baseUrl}${endpoint}${(0, exports.queryParameters)(args)}`;
|
|
5
|
+
exports.resolvedQuery = resolvedQuery;
|
|
6
|
+
const queryParameters = (args) => {
|
|
7
|
+
const filteredArgs = Object.keys(args)
|
|
8
|
+
.filter((key) => typeof args[key] !== 'undefined')
|
|
9
|
+
.reduce((acc, arg) => {
|
|
10
|
+
if (Array.isArray(args[arg])) {
|
|
11
|
+
if (args[arg].length === 0) {
|
|
12
|
+
return acc;
|
|
13
|
+
}
|
|
14
|
+
acc[arg] = args[arg].join(`&${arg}=`);
|
|
15
|
+
}
|
|
16
|
+
else {
|
|
17
|
+
if ((typeof args[arg] === 'string' && args[arg].length === 0) ||
|
|
18
|
+
(typeof args[arg] === 'object' &&
|
|
19
|
+
Object.values(args[arg]).length === 0)) {
|
|
20
|
+
return acc;
|
|
21
|
+
}
|
|
22
|
+
acc[arg] = args[arg];
|
|
23
|
+
}
|
|
24
|
+
return acc;
|
|
25
|
+
}, {});
|
|
26
|
+
if (Object.keys(filteredArgs).length === 0) {
|
|
27
|
+
return '';
|
|
28
|
+
}
|
|
29
|
+
const resolvedArgs = Object.keys(filteredArgs)
|
|
30
|
+
.map((key) => `${key}=${filteredArgs[key]}`)
|
|
31
|
+
.join('&');
|
|
32
|
+
return `?${resolvedArgs}`;
|
|
33
|
+
};
|
|
34
|
+
exports.queryParameters = queryParameters;
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* This shows how to use the OpenAI monitor with Openlayer to create a CLI handler
|
|
3
|
+
* for processing datasets. The script can be called with the following command:
|
|
4
|
+
* node dist/run.js --dataset-path {{ path }} --dataset-name {{ name }}
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { RunReturn } from 'openlayer/lib/utils/run';
|
|
8
|
+
import { OpenAIMonitor } from 'openlayer';
|
|
9
|
+
import { CLIHandler } from 'openlayer/cli/cliHandler';
|
|
10
|
+
import { ChatCompletion } from 'openai/resources';
|
|
11
|
+
|
|
12
|
+
export class MyModel {
|
|
13
|
+
private monitor: OpenAIMonitor;
|
|
14
|
+
|
|
15
|
+
private openaiApiKey: string;
|
|
16
|
+
|
|
17
|
+
private openlayerApiKey: string;
|
|
18
|
+
|
|
19
|
+
constructor() {
|
|
20
|
+
this.openaiApiKey = process.env.OPENAI_API_KEY || '';
|
|
21
|
+
this.openlayerApiKey = process.env.OPENLAYER_API_KEY || '';
|
|
22
|
+
const openlayerProjectName = process.env.OPENLAYER_PROJECT_NAME || '';
|
|
23
|
+
|
|
24
|
+
this.monitor = new OpenAIMonitor({
|
|
25
|
+
openAiApiKey: this.openaiApiKey,
|
|
26
|
+
openlayerApiKey: this.openlayerApiKey,
|
|
27
|
+
openlayerProjectName,
|
|
28
|
+
});
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
async run({ userQuery }: { userQuery: string }): Promise<RunReturn> {
|
|
32
|
+
// Implement the model run logic here
|
|
33
|
+
const model = 'gpt-3.5-turbo';
|
|
34
|
+
const response = await this.monitor.createChatCompletion(
|
|
35
|
+
{
|
|
36
|
+
messages: [
|
|
37
|
+
{
|
|
38
|
+
content: userQuery,
|
|
39
|
+
role: 'user',
|
|
40
|
+
},
|
|
41
|
+
],
|
|
42
|
+
model,
|
|
43
|
+
},
|
|
44
|
+
undefined
|
|
45
|
+
);
|
|
46
|
+
const result = (response as ChatCompletion).choices[0].message.content;
|
|
47
|
+
return { otherFields: { model }, output: result };
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// User implements their model
|
|
52
|
+
const model = new MyModel();
|
|
53
|
+
|
|
54
|
+
// Initialize CLI handler with the user's model run method
|
|
55
|
+
const cliHandler = new CLIHandler(model.run.bind(model));
|
|
56
|
+
|
|
57
|
+
// Setup CLI and process dataset
|
|
58
|
+
cliHandler.runFromCLI();
|
package/package.json
CHANGED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
<svg width="2074" height="338" viewBox="0 0 2074 338" fill="none" xmlns="http://www.w3.org/2000/svg">
|
|
2
|
+
<path d="M78.2891 0V77.6246H248.238C255.317 77.6246 261.07 83.3779 261.07 90.4566V260.405H338.694V114.946L223.719 0H78.2891Z" fill="#592FEA"/>
|
|
3
|
+
<path d="M78.2892 247.543V77.6248H0.664551V156.243C0.664551 256.61 82.0243 338 182.421 338H261.04V260.375H91.0911C84.0124 260.375 78.259 254.622 78.259 247.543H78.2892Z" fill="#592FEA"/>
|
|
4
|
+
<path d="M1746.69 76.2997H1712.86L1678.49 173.263C1674.52 184.498 1669.16 200.433 1662.35 221.127L1651.29 187.812L1646.57 173.263L1612.38 76.2997H1577.83L1647.77 253.96V295.679H1589.63V328.391H1603.88C1645.66 328.391 1679.61 294.956 1680.48 253.387L1746.72 76.2695L1746.69 76.2997Z" fill="#592FEA"/>
|
|
5
|
+
<path d="M512.8 245.616C494.938 235.525 481.323 220.735 472.015 201.216C462.707 181.696 458.038 158.051 458.038 130.278C458.038 104.614 462.677 81.9622 471.925 62.3226C481.172 42.683 494.817 27.4111 512.8 16.4467C530.783 5.48222 552.381 0 577.593 0C602.805 0 623.83 5.51234 641.753 16.537C659.676 27.5617 673.351 42.8939 682.719 62.5937C692.087 82.2935 696.786 104.855 696.786 130.278C696.786 155.701 692.117 178.775 682.809 198.414C673.502 218.054 659.856 233.356 641.934 244.29C624.011 255.255 602.564 260.737 577.593 260.737C552.622 260.737 530.663 255.706 512.8 245.616V245.616ZM615.396 225.675C625.698 221.036 634.102 213.987 640.548 204.499C646.994 195.342 651.844 184.679 655.127 172.509C658.41 160.31 660.037 146.243 660.037 130.308C660.037 114.374 658.41 100.397 655.127 88.3782C651.844 76.3595 646.964 65.7264 640.548 56.4789C627.535 37.6225 607.022 28.1642 579.009 28.1642C563.767 28.1642 550.603 30.7245 539.549 35.8152C528.464 40.9058 519.879 47.3218 513.794 55.0632C507.71 63.1359 503.011 73.6786 499.727 86.6311C496.444 99.5836 494.817 114.133 494.817 130.308C494.817 146.484 496.504 161.786 499.908 174.798C503.312 187.811 507.589 197.721 512.74 204.499C519.879 213.535 528.976 220.464 540.001 225.343C551.025 230.223 563.556 232.633 577.623 232.633C592.503 232.633 605.094 230.313 615.426 225.675H615.396Z" fill="#592FEA"/>
|
|
6
|
+
<path d="M865.801 83.3481C878.332 90.969 888.152 101.903 895.261 116.151C902.339 130.399 905.894 147.117 905.894 166.334C905.894 185.552 902.46 201.607 895.622 215.915C888.754 230.223 879.145 241.248 866.795 249.05C854.445 256.851 840.228 260.737 824.172 260.737C798.629 260.737 779.291 251.429 766.157 232.784V328.421H733.806V76.2995H766.157V102.656C772.724 92.3546 780.797 84.6433 790.406 79.5226C800.014 74.432 810.919 71.8716 823.118 71.8716C839.053 71.8716 853.3 75.667 865.831 83.2879L865.801 83.3481ZM857.969 217.572C866.132 206.156 870.199 189.077 870.199 166.334C870.199 151.334 868.633 138.863 865.53 128.893C862.427 118.922 857.006 111.211 849.264 105.759C841.523 100.307 830.86 97.5958 817.274 97.5958C800.045 97.5958 787.032 103.349 778.236 114.826C769.441 126.302 765.043 143.381 765.043 165.973C765.043 186.366 768.657 202.933 775.857 215.644C783.056 228.356 796.641 234.712 816.551 234.712C836.462 234.712 849.806 228.988 857.939 217.572H857.969Z" fill="#592FEA"/>
|
|
7
|
+
<path d="M1339.47 4.2168H1371.82V256.339H1339.47V4.2168Z" fill="#592FEA"/>
|
|
8
|
+
<path d="M1074.64 203.625C1071.87 212.692 1067.32 219.741 1060.9 224.771C1052.47 231.398 1040.57 234.711 1025.21 234.711C1008.34 234.711 995.688 229.922 987.314 220.313C978.94 210.704 974.512 195.944 974.03 176.004H1110.63C1110.75 174.377 1110.81 171.847 1110.81 168.443C1110.81 156.966 1109.46 145.881 1106.78 135.218C1104.07 124.555 1100.33 115.338 1095.51 107.596C1088.25 96.1198 1078.34 87.294 1065.81 81.1491C1053.28 75.0042 1039.73 71.9016 1025.21 71.9016C1012.19 71.9016 1000.3 74.1909 989.513 78.7694C978.729 83.348 969.421 90.0652 961.559 98.9814C954.059 107.295 948.396 117.175 944.6 128.591C940.775 140.008 938.877 152.478 938.877 165.943C938.877 195.131 946.197 217.933 960.836 234.32C975.958 251.911 997.525 260.707 1025.54 260.707C1048.52 260.707 1067.02 255.315 1081.02 244.531C1093.49 234.922 1101.96 221.277 1106.48 203.595H1074.64V203.625ZM976.771 133.531C978.126 127.959 980.265 122.718 983.187 117.808C986.802 111.723 992.314 106.934 999.724 103.47C1007.1 100.006 1015.72 98.2886 1025.57 98.2886C1035.42 98.2886 1043.76 99.8549 1050.63 102.957C1057.5 106.06 1063.19 111.03 1067.77 117.808C1070.57 121.904 1072.86 126.965 1074.64 133.019C1076.41 139.044 1077.35 144.646 1077.44 149.797H974.602C974.723 144.526 975.446 139.104 976.801 133.531H976.771Z" fill="#592FEA"/>
|
|
9
|
+
<path d="M1892.78 203.625C1890.01 212.692 1885.46 219.741 1879.05 224.771C1870.61 231.398 1858.72 234.711 1843.35 234.711C1826.48 234.711 1813.83 229.922 1805.46 220.313C1797.09 210.704 1792.66 195.944 1792.18 176.004H1928.78C1928.9 174.377 1928.96 171.847 1928.96 168.443C1928.96 156.966 1927.6 145.881 1924.92 135.218C1922.21 124.555 1918.48 115.338 1913.66 107.596C1906.4 96.1198 1896.49 87.294 1883.96 81.1491C1871.43 75.0042 1857.87 71.9016 1843.35 71.9016C1830.34 71.9016 1818.44 74.1909 1807.66 78.7694C1796.87 83.348 1787.57 90.0652 1779.71 98.9814C1772.2 107.295 1766.54 117.175 1762.75 128.591C1758.92 140.008 1757.02 152.478 1757.02 165.943C1757.02 195.131 1764.34 217.933 1778.98 234.32C1794.1 251.911 1815.67 260.707 1843.68 260.707C1866.67 260.707 1885.16 255.315 1899.17 244.531C1911.64 234.922 1920.1 221.277 1924.62 203.595H1892.78V203.625ZM1794.92 133.531C1796.27 127.959 1798.41 122.718 1801.33 117.808C1804.95 111.723 1810.46 106.934 1817.87 103.47C1825.25 100.006 1833.86 98.2886 1843.71 98.2886C1853.56 98.2886 1861.91 99.8549 1868.78 102.957C1875.64 106.06 1881.34 111.03 1885.92 117.808C1888.72 121.904 1891.01 126.965 1892.78 133.019C1894.56 139.044 1895.49 144.646 1895.58 149.797H1792.75C1792.87 144.526 1793.59 139.104 1794.95 133.531H1794.92Z" fill="#592FEA"/>
|
|
10
|
+
<path d="M1995.29 105.066H2073.34V71.9314H2039.75C1996.89 71.9314 1962.15 106.662 1962.15 149.526V256.339H1995.23L1995.32 105.066H1995.29Z" fill="#592FEA"/>
|
|
11
|
+
<path d="M1176.36 104.223H1144.01V256.339H1176.36V104.223Z" fill="#592FEA"/>
|
|
12
|
+
<path d="M1223.86 71.9016H1176.36V104.253H1267.27V182.51V256.339H1299.62V147.629C1299.62 105.789 1265.7 71.9016 1223.89 71.9016H1223.86Z" fill="#592FEA"/>
|
|
13
|
+
<path d="M1578.34 231.308H1557.37V131.332C1557.37 111.753 1551.62 96.9632 1540.14 86.9325C1528.67 76.9019 1511.59 71.9016 1489 71.9016C1477.52 71.9016 1467.07 73.0462 1457.7 75.3355C1448.33 77.6248 1440.41 80.9382 1433.96 85.2758C1426.95 89.9749 1421.55 96.2403 1417.79 104.102C1414.44 111.06 1412.49 119.193 1411.82 128.471H1443.75C1444.87 118.862 1448.12 111.572 1453.57 106.753C1460.44 100.668 1471.65 97.5958 1487.25 97.5958C1495.11 97.5958 1501.89 98.65 1507.64 100.759C1513.4 102.867 1517.49 105.729 1519.93 109.373C1522.52 112.898 1524.18 117.024 1524.93 121.754C1525.69 126.513 1526.08 133.321 1526.08 142.237L1466.65 153.653C1455.05 155.882 1444.87 158.834 1436.13 162.539C1427.4 166.244 1420.11 171.847 1414.23 179.347C1408.36 186.847 1405.44 196.697 1405.44 208.867C1405.44 220.012 1408.27 229.5 1413.96 237.362C1419.66 245.224 1427.1 251.068 1436.28 254.954C1445.47 258.809 1455.35 260.737 1465.89 260.737C1496.14 260.737 1516.35 250.375 1526.56 229.621C1528.07 248.387 1538.82 257.755 1558.73 257.755C1566.11 257.755 1572.86 257.273 1578.94 256.339L1578.28 231.338L1578.34 231.308ZM1526.08 188.293C1526.08 202.119 1521.56 213.325 1512.55 221.879C1503.52 230.434 1489.69 234.711 1471.07 234.711C1461.83 234.711 1454.09 232.362 1447.88 227.693C1441.68 222.994 1438.57 216.608 1438.57 208.535C1438.57 201.969 1439.96 196.728 1442.7 192.812C1445.44 188.896 1449.15 185.884 1453.78 183.835C1458.42 181.787 1464.42 179.95 1471.8 178.293L1526.11 166.334V188.293H1526.08Z" fill="#592FEA"/>
|
|
14
|
+
</svg>
|
package/tsconfig.json
CHANGED
package/static/logo.png
DELETED
|
Binary file
|