modelfusion 0.131.0 → 0.132.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/README.md +3 -2
- package/core/getRun.cjs +2 -8
- package/core/getRun.d.ts +1 -1
- package/core/getRun.js +2 -8
- package/core/schema/ZodSchema.d.ts +1 -1
- package/model-function/generate-object/ObjectStream.cjs +14 -0
- package/model-function/generate-object/ObjectStream.d.ts +14 -0
- package/model-function/generate-object/ObjectStream.js +14 -0
- package/model-provider/openai/OpenAIChatModel.cjs +15 -0
- package/model-provider/openai/OpenAIChatModel.d.ts +15 -0
- package/model-provider/openai/OpenAIChatModel.js +15 -0
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +10 -0
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +12 -2
- package/model-provider/openai/OpenAITextEmbeddingModel.js +10 -0
- package/model-provider/openai/TikTokenTokenizer.cjs +5 -0
- package/model-provider/openai/TikTokenTokenizer.js +5 -0
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,18 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## v0.132.0 - 2024-02-15
|
4
|
+
|
5
|
+
### Added
|
6
|
+
|
7
|
+
- Support for OpenAI `text-embedding-3-small` and `text-embedding-3-large` embedding models.
|
8
|
+
- Support for OpenAI `gpt-4-turbo-preview`, `gpt-4-0125-preview`, and `gpt-3.5-turbo-0125` chat models.
|
9
|
+
|
10
|
+
## v0.131.1 - 2024-01-25
|
11
|
+
|
12
|
+
### Fixed
|
13
|
+
|
14
|
+
- Add `type-fest` as dependency to fix type inference errors.
|
15
|
+
|
3
16
|
## v0.131.0 - 2024-01-23
|
4
17
|
|
5
18
|
### Added
|
package/README.md
CHANGED
@@ -7,6 +7,7 @@
|
|
7
7
|
[](https://modelfusion.dev)
|
8
8
|
[](https://discord.gg/GqCwYZATem)
|
9
9
|
[](https://twitter.com/lgrammel)
|
10
|
+
[](https://twitter.com/modelfusionjs)
|
10
11
|
|
11
12
|
[Introduction](#introduction) | [Quick Install](#quick-install) | [Usage](#usage-examples) | [Documentation](#documentation) | [Examples](#more-examples) | [Contributing](#contributing) | [modelfusion.dev](https://modelfusion.dev)
|
12
13
|
|
@@ -30,7 +31,7 @@
|
|
30
31
|
npm install modelfusion
|
31
32
|
```
|
32
33
|
|
33
|
-
Or use a
|
34
|
+
Or use a starter template:
|
34
35
|
|
35
36
|
- [ModelFusion terminal app starter](https://github.com/lgrammel/modelfusion-terminal-app-starter)
|
36
37
|
- [Next.js, Vercel AI SDK, Llama.cpp & ModelFusion starter](https://github.com/lgrammel/modelfusion-llamacpp-nextjs-starter)
|
@@ -578,7 +579,7 @@ const text = await generateText({
|
|
578
579
|
- [Generate object](https://modelfusion.dev/guide/function/generate-object)
|
579
580
|
- [Generate image](https://modelfusion.dev/guide/function/generate-image)
|
580
581
|
- [Generate speech](https://modelfusion.dev/guide/function/generate-speech)
|
581
|
-
- [Generate transcription](https://modelfusion.dev/guide/function/
|
582
|
+
- [Generate transcription](https://modelfusion.dev/guide/function/generate-transcription)
|
582
583
|
- [Tokenize Text](https://modelfusion.dev/guide/function/tokenize-text)
|
583
584
|
- [Embed Value](https://modelfusion.dev/guide/function/embed)
|
584
585
|
- [Classify Value](https://modelfusion.dev/guide/function/classify)
|
package/core/getRun.cjs
CHANGED
@@ -24,16 +24,10 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
24
24
|
};
|
25
25
|
Object.defineProperty(exports, "__esModule", { value: true });
|
26
26
|
exports.withRun = exports.getRun = void 0;
|
27
|
+
const detectRuntime_js_1 = require("../util/detectRuntime.cjs");
|
27
28
|
let runStorage;
|
28
29
|
async function ensureLoaded() {
|
29
|
-
|
30
|
-
const versions = "versions";
|
31
|
-
const isNode = typeof process !== "undefined" &&
|
32
|
-
process[versions] != null &&
|
33
|
-
process[versions].node != null;
|
34
|
-
if (!isNode)
|
35
|
-
return Promise.resolve();
|
36
|
-
if (!runStorage) {
|
30
|
+
if ((0, detectRuntime_js_1.detectRuntime)() === "node" && !runStorage) {
|
37
31
|
// Note: using "async_hooks" instead of "node:async_hooks" to avoid webpack fallback problems.
|
38
32
|
const { AsyncLocalStorage } = await Promise.resolve().then(() => __importStar(require("async_hooks")));
|
39
33
|
runStorage = new AsyncLocalStorage();
|
package/core/getRun.d.ts
CHANGED
package/core/getRun.js
CHANGED
@@ -1,13 +1,7 @@
|
|
1
|
+
import { detectRuntime } from "../util/detectRuntime.js";
|
1
2
|
let runStorage;
|
2
3
|
async function ensureLoaded() {
|
3
|
-
|
4
|
-
const versions = "versions";
|
5
|
-
const isNode = typeof process !== "undefined" &&
|
6
|
-
process[versions] != null &&
|
7
|
-
process[versions].node != null;
|
8
|
-
if (!isNode)
|
9
|
-
return Promise.resolve();
|
10
|
-
if (!runStorage) {
|
4
|
+
if (detectRuntime() === "node" && !runStorage) {
|
11
5
|
// Note: using "async_hooks" instead of "node:async_hooks" to avoid webpack fallback problems.
|
12
6
|
const { AsyncLocalStorage } = await import("async_hooks");
|
13
7
|
runStorage = new AsyncLocalStorage();
|
@@ -1,7 +1,7 @@
|
|
1
|
+
import { PartialDeep } from "type-fest";
|
1
2
|
import { z } from "zod";
|
2
3
|
import { JsonSchemaProducer } from "./JsonSchemaProducer.js";
|
3
4
|
import { Schema } from "./Schema.js";
|
4
|
-
import { PartialDeep } from "type-fest";
|
5
5
|
export declare function zodSchema<OBJECT>(zodSchema: z.Schema<OBJECT>): ZodSchema<OBJECT>;
|
6
6
|
export declare class ZodSchema<OBJECT> implements Schema<OBJECT>, JsonSchemaProducer {
|
7
7
|
readonly zodSchema: z.Schema<OBJECT>;
|
@@ -2,6 +2,14 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.ObjectStreamFromResponse = exports.ObjectStreamResponse = void 0;
|
4
4
|
const parsePartialJson_js_1 = require("../../util/parsePartialJson.cjs");
|
5
|
+
/**
|
6
|
+
* Response for ObjectStream. The object stream is encoded as a text stream.
|
7
|
+
*
|
8
|
+
* Example:
|
9
|
+
* ```ts
|
10
|
+
* return new ObjectStreamResponse(objectStream);
|
11
|
+
* ```
|
12
|
+
*/
|
5
13
|
class ObjectStreamResponse extends Response {
|
6
14
|
constructor(stream, init) {
|
7
15
|
super(ObjectStreamToTextStream(stream), {
|
@@ -12,6 +20,12 @@ class ObjectStreamResponse extends Response {
|
|
12
20
|
}
|
13
21
|
}
|
14
22
|
exports.ObjectStreamResponse = ObjectStreamResponse;
|
23
|
+
/**
|
24
|
+
* Convert a Response to a lightweight ObjectStream. The response must be created
|
25
|
+
* using ObjectStreamResponse on the server.
|
26
|
+
*
|
27
|
+
* @see ObjectStreamResponse
|
28
|
+
*/
|
15
29
|
async function* ObjectStreamFromResponse({ response, }) {
|
16
30
|
let text = "";
|
17
31
|
const reader = response.body.getReader();
|
@@ -7,9 +7,23 @@ export type ObjectStream<OBJECT> = AsyncIterable<{
|
|
7
7
|
partialText: string;
|
8
8
|
textDelta: string;
|
9
9
|
}>;
|
10
|
+
/**
|
11
|
+
* Response for ObjectStream. The object stream is encoded as a text stream.
|
12
|
+
*
|
13
|
+
* Example:
|
14
|
+
* ```ts
|
15
|
+
* return new ObjectStreamResponse(objectStream);
|
16
|
+
* ```
|
17
|
+
*/
|
10
18
|
export declare class ObjectStreamResponse extends Response {
|
11
19
|
constructor(stream: ObjectStream<unknown>, init?: ResponseInit);
|
12
20
|
}
|
21
|
+
/**
|
22
|
+
* Convert a Response to a lightweight ObjectStream. The response must be created
|
23
|
+
* using ObjectStreamResponse on the server.
|
24
|
+
*
|
25
|
+
* @see ObjectStreamResponse
|
26
|
+
*/
|
13
27
|
export declare function ObjectStreamFromResponse<OBJECT>({ response, }: {
|
14
28
|
schema: Schema<OBJECT>;
|
15
29
|
response: Response;
|
@@ -1,4 +1,12 @@
|
|
1
1
|
import { parsePartialJson } from "../../util/parsePartialJson.js";
|
2
|
+
/**
|
3
|
+
* Response for ObjectStream. The object stream is encoded as a text stream.
|
4
|
+
*
|
5
|
+
* Example:
|
6
|
+
* ```ts
|
7
|
+
* return new ObjectStreamResponse(objectStream);
|
8
|
+
* ```
|
9
|
+
*/
|
2
10
|
export class ObjectStreamResponse extends Response {
|
3
11
|
constructor(stream, init) {
|
4
12
|
super(ObjectStreamToTextStream(stream), {
|
@@ -8,6 +16,12 @@ export class ObjectStreamResponse extends Response {
|
|
8
16
|
});
|
9
17
|
}
|
10
18
|
}
|
19
|
+
/**
|
20
|
+
* Convert a Response to a lightweight ObjectStream. The response must be created
|
21
|
+
* using ObjectStreamResponse on the server.
|
22
|
+
*
|
23
|
+
* @see ObjectStreamResponse
|
24
|
+
*/
|
11
25
|
export async function* ObjectStreamFromResponse({ response, }) {
|
12
26
|
let text = "";
|
13
27
|
const reader = response.body.getReader();
|
@@ -33,11 +33,21 @@ exports.OPENAI_CHAT_MODELS = {
|
|
33
33
|
fineTunedPromptTokenCostInMillicents: null,
|
34
34
|
fineTunedCompletionTokenCostInMillicents: null,
|
35
35
|
},
|
36
|
+
"gpt-4-turbo-preview": {
|
37
|
+
contextWindowSize: 128000,
|
38
|
+
promptTokenCostInMillicents: 1,
|
39
|
+
completionTokenCostInMillicents: 3,
|
40
|
+
},
|
36
41
|
"gpt-4-1106-preview": {
|
37
42
|
contextWindowSize: 128000,
|
38
43
|
promptTokenCostInMillicents: 1,
|
39
44
|
completionTokenCostInMillicents: 3,
|
40
45
|
},
|
46
|
+
"gpt-4-0125-preview": {
|
47
|
+
contextWindowSize: 128000,
|
48
|
+
promptTokenCostInMillicents: 1,
|
49
|
+
completionTokenCostInMillicents: 3,
|
50
|
+
},
|
41
51
|
"gpt-4-vision-preview": {
|
42
52
|
contextWindowSize: 128000,
|
43
53
|
promptTokenCostInMillicents: 1,
|
@@ -65,6 +75,11 @@ exports.OPENAI_CHAT_MODELS = {
|
|
65
75
|
fineTunedPromptTokenCostInMillicents: 0.3,
|
66
76
|
fineTunedCompletionTokenCostInMillicents: 0.6,
|
67
77
|
},
|
78
|
+
"gpt-3.5-turbo-0125": {
|
79
|
+
contextWindowSize: 16385,
|
80
|
+
promptTokenCostInMillicents: 0.05,
|
81
|
+
completionTokenCostInMillicents: 0.15,
|
82
|
+
},
|
68
83
|
"gpt-3.5-turbo-1106": {
|
69
84
|
contextWindowSize: 16385,
|
70
85
|
promptTokenCostInMillicents: 0.1,
|
@@ -26,11 +26,21 @@ export declare const OPENAI_CHAT_MODELS: {
|
|
26
26
|
fineTunedPromptTokenCostInMillicents: null;
|
27
27
|
fineTunedCompletionTokenCostInMillicents: null;
|
28
28
|
};
|
29
|
+
"gpt-4-turbo-preview": {
|
30
|
+
contextWindowSize: number;
|
31
|
+
promptTokenCostInMillicents: number;
|
32
|
+
completionTokenCostInMillicents: number;
|
33
|
+
};
|
29
34
|
"gpt-4-1106-preview": {
|
30
35
|
contextWindowSize: number;
|
31
36
|
promptTokenCostInMillicents: number;
|
32
37
|
completionTokenCostInMillicents: number;
|
33
38
|
};
|
39
|
+
"gpt-4-0125-preview": {
|
40
|
+
contextWindowSize: number;
|
41
|
+
promptTokenCostInMillicents: number;
|
42
|
+
completionTokenCostInMillicents: number;
|
43
|
+
};
|
34
44
|
"gpt-4-vision-preview": {
|
35
45
|
contextWindowSize: number;
|
36
46
|
promptTokenCostInMillicents: number;
|
@@ -58,6 +68,11 @@ export declare const OPENAI_CHAT_MODELS: {
|
|
58
68
|
fineTunedPromptTokenCostInMillicents: number;
|
59
69
|
fineTunedCompletionTokenCostInMillicents: number;
|
60
70
|
};
|
71
|
+
"gpt-3.5-turbo-0125": {
|
72
|
+
contextWindowSize: number;
|
73
|
+
promptTokenCostInMillicents: number;
|
74
|
+
completionTokenCostInMillicents: number;
|
75
|
+
};
|
61
76
|
"gpt-3.5-turbo-1106": {
|
62
77
|
contextWindowSize: number;
|
63
78
|
promptTokenCostInMillicents: number;
|
@@ -30,11 +30,21 @@ export const OPENAI_CHAT_MODELS = {
|
|
30
30
|
fineTunedPromptTokenCostInMillicents: null,
|
31
31
|
fineTunedCompletionTokenCostInMillicents: null,
|
32
32
|
},
|
33
|
+
"gpt-4-turbo-preview": {
|
34
|
+
contextWindowSize: 128000,
|
35
|
+
promptTokenCostInMillicents: 1,
|
36
|
+
completionTokenCostInMillicents: 3,
|
37
|
+
},
|
33
38
|
"gpt-4-1106-preview": {
|
34
39
|
contextWindowSize: 128000,
|
35
40
|
promptTokenCostInMillicents: 1,
|
36
41
|
completionTokenCostInMillicents: 3,
|
37
42
|
},
|
43
|
+
"gpt-4-0125-preview": {
|
44
|
+
contextWindowSize: 128000,
|
45
|
+
promptTokenCostInMillicents: 1,
|
46
|
+
completionTokenCostInMillicents: 3,
|
47
|
+
},
|
38
48
|
"gpt-4-vision-preview": {
|
39
49
|
contextWindowSize: 128000,
|
40
50
|
promptTokenCostInMillicents: 1,
|
@@ -62,6 +72,11 @@ export const OPENAI_CHAT_MODELS = {
|
|
62
72
|
fineTunedPromptTokenCostInMillicents: 0.3,
|
63
73
|
fineTunedCompletionTokenCostInMillicents: 0.6,
|
64
74
|
},
|
75
|
+
"gpt-3.5-turbo-0125": {
|
76
|
+
contextWindowSize: 16385,
|
77
|
+
promptTokenCostInMillicents: 0.05,
|
78
|
+
completionTokenCostInMillicents: 0.15,
|
79
|
+
},
|
65
80
|
"gpt-3.5-turbo-1106": {
|
66
81
|
contextWindowSize: 16385,
|
67
82
|
promptTokenCostInMillicents: 0.1,
|
@@ -5,6 +5,16 @@ const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens
|
|
5
5
|
const AbstractOpenAITextEmbeddingModel_js_1 = require("./AbstractOpenAITextEmbeddingModel.cjs");
|
6
6
|
const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
|
7
7
|
exports.OPENAI_TEXT_EMBEDDING_MODELS = {
|
8
|
+
"text-embedding-3-small": {
|
9
|
+
contextWindowSize: 8192,
|
10
|
+
embeddingDimensions: 1536,
|
11
|
+
tokenCostInMillicents: 0.002,
|
12
|
+
},
|
13
|
+
"text-embedding-3-large": {
|
14
|
+
contextWindowSize: 8192,
|
15
|
+
embeddingDimensions: 3072,
|
16
|
+
tokenCostInMillicents: 0.013,
|
17
|
+
},
|
8
18
|
"text-embedding-ada-002": {
|
9
19
|
contextWindowSize: 8192,
|
10
20
|
embeddingDimensions: 1536,
|
@@ -2,6 +2,16 @@ import { EmbeddingModel } from "../../model-function/embed/EmbeddingModel.js";
|
|
2
2
|
import { AbstractOpenAITextEmbeddingModel, AbstractOpenAITextEmbeddingModelSettings, OpenAITextEmbeddingResponse } from "./AbstractOpenAITextEmbeddingModel.js";
|
3
3
|
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
4
4
|
export declare const OPENAI_TEXT_EMBEDDING_MODELS: {
|
5
|
+
"text-embedding-3-small": {
|
6
|
+
contextWindowSize: number;
|
7
|
+
embeddingDimensions: number;
|
8
|
+
tokenCostInMillicents: number;
|
9
|
+
};
|
10
|
+
"text-embedding-3-large": {
|
11
|
+
contextWindowSize: number;
|
12
|
+
embeddingDimensions: number;
|
13
|
+
tokenCostInMillicents: number;
|
14
|
+
};
|
5
15
|
"text-embedding-ada-002": {
|
6
16
|
contextWindowSize: number;
|
7
17
|
embeddingDimensions: number;
|
@@ -9,7 +19,7 @@ export declare const OPENAI_TEXT_EMBEDDING_MODELS: {
|
|
9
19
|
};
|
10
20
|
};
|
11
21
|
export type OpenAITextEmbeddingModelType = keyof typeof OPENAI_TEXT_EMBEDDING_MODELS;
|
12
|
-
export declare const isOpenAIEmbeddingModel: (model: string) => model is "text-embedding-ada-002";
|
22
|
+
export declare const isOpenAIEmbeddingModel: (model: string) => model is "text-embedding-3-small" | "text-embedding-3-large" | "text-embedding-ada-002";
|
13
23
|
export declare const calculateOpenAIEmbeddingCostInMillicents: ({ model, responses, }: {
|
14
24
|
model: OpenAITextEmbeddingModelType;
|
15
25
|
responses: OpenAITextEmbeddingResponse[];
|
@@ -34,7 +44,7 @@ export interface OpenAITextEmbeddingModelSettings extends AbstractOpenAITextEmbe
|
|
34
44
|
export declare class OpenAITextEmbeddingModel extends AbstractOpenAITextEmbeddingModel<OpenAITextEmbeddingModelSettings> implements EmbeddingModel<string, OpenAITextEmbeddingModelSettings> {
|
35
45
|
constructor(settings: OpenAITextEmbeddingModelSettings);
|
36
46
|
readonly provider: "openai";
|
37
|
-
get modelName(): "text-embedding-ada-002";
|
47
|
+
get modelName(): "text-embedding-3-small" | "text-embedding-3-large" | "text-embedding-ada-002";
|
38
48
|
readonly embeddingDimensions: number;
|
39
49
|
readonly tokenizer: TikTokenTokenizer;
|
40
50
|
readonly contextWindowSize: number;
|
@@ -2,6 +2,16 @@ import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
|
2
2
|
import { AbstractOpenAITextEmbeddingModel, } from "./AbstractOpenAITextEmbeddingModel.js";
|
3
3
|
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
4
4
|
export const OPENAI_TEXT_EMBEDDING_MODELS = {
|
5
|
+
"text-embedding-3-small": {
|
6
|
+
contextWindowSize: 8192,
|
7
|
+
embeddingDimensions: 1536,
|
8
|
+
tokenCostInMillicents: 0.002,
|
9
|
+
},
|
10
|
+
"text-embedding-3-large": {
|
11
|
+
contextWindowSize: 8192,
|
12
|
+
embeddingDimensions: 3072,
|
13
|
+
tokenCostInMillicents: 0.013,
|
14
|
+
},
|
5
15
|
"text-embedding-ada-002": {
|
6
16
|
contextWindowSize: 8192,
|
7
17
|
embeddingDimensions: 1536,
|
@@ -58,17 +58,22 @@ function getTiktokenBPE(model) {
|
|
58
58
|
case "gpt-3.5-turbo-0301":
|
59
59
|
case "gpt-3.5-turbo-0613":
|
60
60
|
case "gpt-3.5-turbo-1106":
|
61
|
+
case "gpt-3.5-turbo-0125":
|
61
62
|
case "gpt-3.5-turbo-16k":
|
62
63
|
case "gpt-3.5-turbo-16k-0613":
|
63
64
|
case "gpt-3.5-turbo-instruct":
|
64
65
|
case "gpt-4":
|
65
66
|
case "gpt-4-0314":
|
66
67
|
case "gpt-4-0613":
|
68
|
+
case "gpt-4-turbo-preview":
|
67
69
|
case "gpt-4-1106-preview":
|
70
|
+
case "gpt-4-0125-preview":
|
68
71
|
case "gpt-4-vision-preview":
|
69
72
|
case "gpt-4-32k":
|
70
73
|
case "gpt-4-32k-0314":
|
71
74
|
case "gpt-4-32k-0613":
|
75
|
+
case "text-embedding-3-small":
|
76
|
+
case "text-embedding-3-large":
|
72
77
|
case "text-embedding-ada-002": {
|
73
78
|
return cl100k_base_1.default;
|
74
79
|
}
|
@@ -51,17 +51,22 @@ function getTiktokenBPE(model) {
|
|
51
51
|
case "gpt-3.5-turbo-0301":
|
52
52
|
case "gpt-3.5-turbo-0613":
|
53
53
|
case "gpt-3.5-turbo-1106":
|
54
|
+
case "gpt-3.5-turbo-0125":
|
54
55
|
case "gpt-3.5-turbo-16k":
|
55
56
|
case "gpt-3.5-turbo-16k-0613":
|
56
57
|
case "gpt-3.5-turbo-instruct":
|
57
58
|
case "gpt-4":
|
58
59
|
case "gpt-4-0314":
|
59
60
|
case "gpt-4-0613":
|
61
|
+
case "gpt-4-turbo-preview":
|
60
62
|
case "gpt-4-1106-preview":
|
63
|
+
case "gpt-4-0125-preview":
|
61
64
|
case "gpt-4-vision-preview":
|
62
65
|
case "gpt-4-32k":
|
63
66
|
case "gpt-4-32k-0314":
|
64
67
|
case "gpt-4-32k-0613":
|
68
|
+
case "text-embedding-3-small":
|
69
|
+
case "text-embedding-3-large":
|
65
70
|
case "text-embedding-ada-002": {
|
66
71
|
return cl100k_base;
|
67
72
|
}
|
package/package.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
{
|
2
2
|
"name": "modelfusion",
|
3
3
|
"description": "The TypeScript library for building AI applications.",
|
4
|
-
"version": "0.
|
4
|
+
"version": "0.132.0",
|
5
5
|
"author": "Lars Grammel",
|
6
6
|
"license": "MIT",
|
7
7
|
"keywords": [
|
@@ -67,6 +67,7 @@
|
|
67
67
|
"js-tiktoken": "1.0.7",
|
68
68
|
"nanoid": "3.3.6",
|
69
69
|
"secure-json-parse": "2.7.0",
|
70
|
+
"type-fest": "4.9.0",
|
70
71
|
"ws": "8.14.2",
|
71
72
|
"zod": "3.22.4",
|
72
73
|
"zod-to-json-schema": "3.22.3"
|
@@ -80,7 +81,6 @@
|
|
80
81
|
"@vitest/ui": "1.1.0",
|
81
82
|
"eslint": "^8.45.0",
|
82
83
|
"eslint-config-prettier": "9.1.0",
|
83
|
-
"msw": "2.1.2"
|
84
|
-
"type-fest": "4.9.0"
|
84
|
+
"msw": "2.1.2"
|
85
85
|
}
|
86
86
|
}
|