nuxt-chatgpt 0.2.1 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +88 -29
- package/dist/module.d.ts +14 -14
- package/dist/module.json +1 -1
- package/dist/runtime/composables/useChatgpt.d.ts +2 -2
- package/dist/runtime/composables/useChatgpt.mjs +2 -2
- package/dist/runtime/constants/models.d.ts +12 -8
- package/dist/runtime/constants/models.mjs +5 -1
- package/dist/runtime/constants/options.d.ts +7 -7
- package/dist/runtime/server/api/chat-completion.d.ts +3 -2
- package/dist/runtime/server/api/chat-completion.mjs +3 -3
- package/dist/runtime/server/api/chat.d.ts +2 -2
- package/dist/runtime/server/api/chat.mjs +5 -4
- package/dist/runtime/utils/model-map.d.ts +8 -4
- package/dist/runtime/utils/model-map.mjs +10 -2
- package/package.json +12 -12
package/README.md
CHANGED
|
@@ -60,26 +60,27 @@ To access the `chat`, and `chatCompletion` methods in the nuxt-chatgpt module, y
|
|
|
60
60
|
|
|
61
61
|
| Name | Type | Default | Description |
|
|
62
62
|
|--|--|--|--|
|
|
63
|
-
|**message**|`String
|
|
63
|
+
|**message**|`String`|available only for `chat()`|A string representing the text message that you want to send to the GPT model for processing.
|
|
64
|
+
|**messages**|`Array`|available only for `chatCompletion()`|An array of objects that contains `role` and `content`
|
|
64
65
|
|**model**|`String`|`text-davinci-003` for `chat()` and `gpt-3.5-turbo` for `chatCompletion()`|Represent certain model for different types of natural language processing tasks.
|
|
65
66
|
|**options**|`Object`|`{ temperature: 0.5, max_tokens: 2048, top_p: 1 frequency_penalty: 0, presence_penalty: 0 }`|An optional object that specifies any additional options you want to pass to the API request, such as the number of responses to generate, and the maximum length of each response.
|
|
66
67
|
|
|
67
|
-
Available models
|
|
68
|
+
Available models:
|
|
68
69
|
|
|
69
|
-
- text-davinci-003
|
|
70
70
|
- text-davinci-002
|
|
71
|
-
|
|
72
|
-
Available models for `chatCompletion`
|
|
73
|
-
|
|
71
|
+
- text-davinci-003
|
|
74
72
|
- gpt-3.5-turbo
|
|
75
73
|
- gpt-3.5-turbo-0301
|
|
76
|
-
|
|
77
|
-
You need to join waitlist to use gpt-4 models within `chatCompletion` method
|
|
78
|
-
|
|
74
|
+
- gpt-3.5-turbo-1106
|
|
79
75
|
- gpt-4
|
|
76
|
+
- gpt-4-1106-preview
|
|
80
77
|
- gpt-4-0314
|
|
78
|
+
- gpt-4-0613
|
|
81
79
|
- gpt-4-32k
|
|
82
80
|
- gpt-4-32k-0314
|
|
81
|
+
- gpt-4-32k-0613
|
|
82
|
+
|
|
83
|
+
You need to join waitlist to use gpt-4 models within `chatCompletion` method
|
|
83
84
|
|
|
84
85
|
### Simple `chat` usage
|
|
85
86
|
In the following example, the model is unspecified, and the text-davinci-003 model will be used by default.
|
|
@@ -88,11 +89,15 @@ In the following example, the model is unspecified, and the text-davinci-003 mod
|
|
|
88
89
|
const { chat } = useChatgpt()
|
|
89
90
|
|
|
90
91
|
const data = ref('')
|
|
91
|
-
const
|
|
92
|
+
const inputData = ref('')
|
|
92
93
|
|
|
93
94
|
async function sendMessage() {
|
|
94
|
-
|
|
95
|
-
|
|
95
|
+
try {
|
|
96
|
+
const response = await chat(inputData.value)
|
|
97
|
+
data.value = response
|
|
98
|
+
} catch(error) {
|
|
99
|
+
alert(`Join the waiting list if you want to use GPT-4 models: ${error}`)
|
|
100
|
+
}
|
|
96
101
|
}
|
|
97
102
|
|
|
98
103
|
```
|
|
@@ -100,7 +105,7 @@ async function sendMessage() {
|
|
|
100
105
|
```html
|
|
101
106
|
<template>
|
|
102
107
|
<div>
|
|
103
|
-
<input v-model="
|
|
108
|
+
<input v-model="inputData">
|
|
104
109
|
<button
|
|
105
110
|
@click="sendMessage"
|
|
106
111
|
v-text="'Send'"
|
|
@@ -116,11 +121,15 @@ async function sendMessage() {
|
|
|
116
121
|
const { chat } = useChatgpt()
|
|
117
122
|
|
|
118
123
|
const data = ref('')
|
|
119
|
-
const
|
|
124
|
+
const inputData = ref('')
|
|
120
125
|
|
|
121
126
|
async function sendMessage() {
|
|
122
|
-
|
|
123
|
-
|
|
127
|
+
try {
|
|
128
|
+
const response = await chat(inputData.value, 'gpt-3.5-turbo')
|
|
129
|
+
data.value = response
|
|
130
|
+
} catch(error) {
|
|
131
|
+
alert(`Join the waiting list if you want to use GPT-4 models: ${error}`)
|
|
132
|
+
}
|
|
124
133
|
}
|
|
125
134
|
|
|
126
135
|
```
|
|
@@ -128,7 +137,7 @@ async function sendMessage() {
|
|
|
128
137
|
```html
|
|
129
138
|
<template>
|
|
130
139
|
<div>
|
|
131
|
-
<input v-model="
|
|
140
|
+
<input v-model="inputData">
|
|
132
141
|
<button
|
|
133
142
|
@click="sendMessage"
|
|
134
143
|
v-text="'Send'"
|
|
@@ -144,12 +153,29 @@ In the following example, the model is unspecified, and the gpt-3.5-turbo model
|
|
|
144
153
|
```js
|
|
145
154
|
const { chatCompletion } = useChatgpt()
|
|
146
155
|
|
|
147
|
-
const
|
|
148
|
-
const
|
|
156
|
+
const chatTree = ref([])
|
|
157
|
+
const inputData = ref('')
|
|
149
158
|
|
|
150
159
|
async function sendMessage() {
|
|
151
|
-
|
|
152
|
-
|
|
160
|
+
try {
|
|
161
|
+
const message = {
|
|
162
|
+
role: 'user',
|
|
163
|
+
content: `${inputData.value}`,
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
chatTree.value.push(message)
|
|
167
|
+
|
|
168
|
+
const response = await chatCompletion(chatTree.value)
|
|
169
|
+
|
|
170
|
+
const responseMessage = {
|
|
171
|
+
role: response[0].message.role,
|
|
172
|
+
content: response[0].message.content
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
chatTree.value.push(responseMessage)
|
|
176
|
+
} catch(error) {
|
|
177
|
+
alert(`Join the waiting list if you want to use GPT-4 models: ${error}`)
|
|
178
|
+
}
|
|
153
179
|
}
|
|
154
180
|
|
|
155
181
|
```
|
|
@@ -157,12 +183,20 @@ async function sendMessage() {
|
|
|
157
183
|
```html
|
|
158
184
|
<template>
|
|
159
185
|
<div>
|
|
160
|
-
<input v-model="
|
|
186
|
+
<input v-model="inputData">
|
|
161
187
|
<button
|
|
162
188
|
@click="sendMessage"
|
|
163
189
|
v-text="'Send'"
|
|
164
190
|
/>
|
|
165
|
-
<div>
|
|
191
|
+
<div>
|
|
192
|
+
<div
|
|
193
|
+
v-for="chat in chatTree"
|
|
194
|
+
:key="chat"
|
|
195
|
+
>
|
|
196
|
+
<strong>{{ chat.role }} :</strong>
|
|
197
|
+
<div>{{ chat.content }} </div>
|
|
198
|
+
</div>
|
|
199
|
+
</div>
|
|
166
200
|
</div>
|
|
167
201
|
</template>
|
|
168
202
|
```
|
|
@@ -172,12 +206,29 @@ async function sendMessage() {
|
|
|
172
206
|
```js
|
|
173
207
|
const { chatCompletion } = useChatgpt()
|
|
174
208
|
|
|
175
|
-
const
|
|
176
|
-
const
|
|
209
|
+
const chatTree = ref([])
|
|
210
|
+
const inputData = ref('')
|
|
177
211
|
|
|
178
212
|
async function sendMessage() {
|
|
179
|
-
|
|
180
|
-
|
|
213
|
+
try {
|
|
214
|
+
const message = {
|
|
215
|
+
role: 'user',
|
|
216
|
+
content: `${inputData.value}`,
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
chatTree.value.push(message)
|
|
220
|
+
|
|
221
|
+
const response = await chatCompletion(chatTree.value, 'gpt-3.5-turbo-0301')
|
|
222
|
+
|
|
223
|
+
const responseMessage = {
|
|
224
|
+
role: response[0].message.role,
|
|
225
|
+
content: response[0].message.content
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
chatTree.value.push(responseMessage)
|
|
229
|
+
} catch(error) {
|
|
230
|
+
alert(`Join the waiting list if you want to use GPT-4 models: ${error}`)
|
|
231
|
+
}
|
|
181
232
|
}
|
|
182
233
|
|
|
183
234
|
```
|
|
@@ -185,12 +236,20 @@ async function sendMessage() {
|
|
|
185
236
|
```html
|
|
186
237
|
<template>
|
|
187
238
|
<div>
|
|
188
|
-
<input v-model="
|
|
239
|
+
<input v-model="inputData">
|
|
189
240
|
<button
|
|
190
241
|
@click="sendMessage"
|
|
191
242
|
v-text="'Send'"
|
|
192
243
|
/>
|
|
193
|
-
<div>
|
|
244
|
+
<div>
|
|
245
|
+
<div
|
|
246
|
+
v-for="chat in chatTree"
|
|
247
|
+
:key="chat"
|
|
248
|
+
>
|
|
249
|
+
<strong>{{ chat.role }} :</strong>
|
|
250
|
+
<div>{{ chat.content }} </div>
|
|
251
|
+
</div>
|
|
252
|
+
</div>
|
|
194
253
|
</div>
|
|
195
254
|
</template>
|
|
196
255
|
```
|
package/dist/module.d.ts
CHANGED
|
@@ -1,19 +1,19 @@
|
|
|
1
1
|
import * as _nuxt_schema from '@nuxt/schema';
|
|
2
2
|
|
|
3
|
-
interface ModuleOptions {
|
|
4
|
-
/**
|
|
5
|
-
* Set chatGPT apiKey
|
|
6
|
-
* @default undefined
|
|
7
|
-
* @description Set your chatGPT apiKey
|
|
8
|
-
*/
|
|
9
|
-
apiKey?: string;
|
|
10
|
-
/**
|
|
11
|
-
* Setting to `false` disables the module.
|
|
12
|
-
* @default true
|
|
13
|
-
* @description Use this setting to disable the module.
|
|
14
|
-
*/
|
|
15
|
-
isEnabled?: boolean;
|
|
16
|
-
}
|
|
3
|
+
interface ModuleOptions {
|
|
4
|
+
/**
|
|
5
|
+
* Set chatGPT apiKey
|
|
6
|
+
* @default undefined
|
|
7
|
+
* @description Set your chatGPT apiKey
|
|
8
|
+
*/
|
|
9
|
+
apiKey?: string;
|
|
10
|
+
/**
|
|
11
|
+
* Setting to `false` disables the module.
|
|
12
|
+
* @default true
|
|
13
|
+
* @description Use this setting to disable the module.
|
|
14
|
+
*/
|
|
15
|
+
isEnabled?: boolean;
|
|
16
|
+
}
|
|
17
17
|
declare const _default: _nuxt_schema.NuxtModule<ModuleOptions>;
|
|
18
18
|
|
|
19
19
|
export { ModuleOptions, _default as default };
|
package/dist/module.json
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
import type { IChatgptClient } from "../types";
|
|
2
|
-
export declare const useChatgpt: () => IChatgptClient;
|
|
1
|
+
import type { IChatgptClient } from "../types";
|
|
2
|
+
export declare const useChatgpt: () => IChatgptClient;
|
|
@@ -17,12 +17,12 @@ export const useChatgpt = () => {
|
|
|
17
17
|
});
|
|
18
18
|
}
|
|
19
19
|
};
|
|
20
|
-
const chatCompletion = async (
|
|
20
|
+
const chatCompletion = async (messages, model, options) => {
|
|
21
21
|
try {
|
|
22
22
|
return await $fetch("/api/chat-completion", {
|
|
23
23
|
method: "POST",
|
|
24
24
|
body: {
|
|
25
|
-
|
|
25
|
+
messages,
|
|
26
26
|
model,
|
|
27
27
|
options
|
|
28
28
|
}
|
|
@@ -1,8 +1,12 @@
|
|
|
1
|
-
export declare const
|
|
2
|
-
export declare const
|
|
3
|
-
export declare const MODEL_GPT_TURBO_3_5: string;
|
|
4
|
-
export declare const MODEL_GPT_TURBO_3_5_0301: string;
|
|
5
|
-
export declare const
|
|
6
|
-
export declare const
|
|
7
|
-
export declare const
|
|
8
|
-
export declare const
|
|
1
|
+
export declare const MODEL_TEXT_DAVINCI_002: string;
|
|
2
|
+
export declare const MODEL_TEXT_DAVINCI_003: string;
|
|
3
|
+
export declare const MODEL_GPT_TURBO_3_5: string;
|
|
4
|
+
export declare const MODEL_GPT_TURBO_3_5_0301: string;
|
|
5
|
+
export declare const MODEL_GPT_TURBO_3_5_1106 = "gpt-3.5-turbo-1106";
|
|
6
|
+
export declare const MODEL_GPT_4: string;
|
|
7
|
+
export declare const MODEL_GPT_4_1106_PREVIEW = "gpt-4-1106-preview";
|
|
8
|
+
export declare const MODEL_GPT_4_0314: string;
|
|
9
|
+
export declare const MODEL_GPT_4_0613 = "gpt-4-0613";
|
|
10
|
+
export declare const MODEL_GPT_4_32k: string;
|
|
11
|
+
export declare const MODEL_GPT_4_32k_0314: string;
|
|
12
|
+
export declare const MODEL_GPT_4_32k_0613 = "gpt-4-32k-0613";
|
|
@@ -1,8 +1,12 @@
|
|
|
1
|
-
export const MODEL_TEXT_DAVINCI_003 = "text-davinci-003";
|
|
2
1
|
export const MODEL_TEXT_DAVINCI_002 = "text-davinci-002";
|
|
2
|
+
export const MODEL_TEXT_DAVINCI_003 = "text-davinci-003";
|
|
3
3
|
export const MODEL_GPT_TURBO_3_5 = "gpt-3.5-turbo";
|
|
4
4
|
export const MODEL_GPT_TURBO_3_5_0301 = "gpt-3.5-turbo-0301";
|
|
5
|
+
export const MODEL_GPT_TURBO_3_5_1106 = "gpt-3.5-turbo-1106";
|
|
5
6
|
export const MODEL_GPT_4 = "gpt-4";
|
|
7
|
+
export const MODEL_GPT_4_1106_PREVIEW = "gpt-4-1106-preview";
|
|
6
8
|
export const MODEL_GPT_4_0314 = "gpt-4-0314";
|
|
9
|
+
export const MODEL_GPT_4_0613 = "gpt-4-0613";
|
|
7
10
|
export const MODEL_GPT_4_32k = "gpt-4-32k";
|
|
8
11
|
export const MODEL_GPT_4_32k_0314 = "gpt-4-32k-0314";
|
|
12
|
+
export const MODEL_GPT_4_32k_0613 = "gpt-4-32k-0613";
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
export declare const defaultOptions: {
|
|
2
|
-
temperature: number;
|
|
3
|
-
max_tokens: number;
|
|
4
|
-
top_p: number;
|
|
5
|
-
frequency_penalty: number;
|
|
6
|
-
presence_penalty: number;
|
|
7
|
-
};
|
|
1
|
+
export declare const defaultOptions: {
|
|
2
|
+
temperature: number;
|
|
3
|
+
max_tokens: number;
|
|
4
|
+
top_p: number;
|
|
5
|
+
frequency_penalty: number;
|
|
6
|
+
presence_penalty: number;
|
|
7
|
+
};
|
|
@@ -1,2 +1,3 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
declare const _default: import("h3").EventHandler<import("h3").EventHandlerRequest, Promise<OpenAI.Chat.Completions.ChatCompletion.Choice[]>>;
|
|
3
|
+
export default _default;
|
|
@@ -5,7 +5,7 @@ import { MODEL_GPT_TURBO_3_5 } from "../../constants/models.mjs";
|
|
|
5
5
|
import { modelMap } from "../../utils/model-map.mjs";
|
|
6
6
|
import { useRuntimeConfig } from "#imports";
|
|
7
7
|
export default defineEventHandler(async (event) => {
|
|
8
|
-
const {
|
|
8
|
+
const { messages, model, options } = await readBody(event);
|
|
9
9
|
if (!useRuntimeConfig().chatgpt.apiKey) {
|
|
10
10
|
throw createError({
|
|
11
11
|
statusCode: 403,
|
|
@@ -16,13 +16,13 @@ export default defineEventHandler(async (event) => {
|
|
|
16
16
|
apiKey: useRuntimeConfig().chatgpt.apiKey
|
|
17
17
|
});
|
|
18
18
|
const requestOptions = {
|
|
19
|
-
messages
|
|
19
|
+
messages,
|
|
20
20
|
model: !model ? modelMap[MODEL_GPT_TURBO_3_5] : modelMap[model],
|
|
21
21
|
...options || defaultOptions
|
|
22
22
|
};
|
|
23
23
|
try {
|
|
24
24
|
const chatCompletion = await openai.chat.completions.create(requestOptions);
|
|
25
|
-
return chatCompletion.choices
|
|
25
|
+
return chatCompletion.choices;
|
|
26
26
|
} catch (error) {
|
|
27
27
|
throw createError({
|
|
28
28
|
statusCode: 500,
|
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
declare const _default: import("h3").EventHandler<string
|
|
2
|
-
export default _default;
|
|
1
|
+
declare const _default: import("h3").EventHandler<import("h3").EventHandlerRequest, Promise<string>>;
|
|
2
|
+
export default _default;
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import OpenAI from "openai";
|
|
2
2
|
import { createError, defineEventHandler, readBody } from "h3";
|
|
3
3
|
import { defaultOptions } from "../../constants/options.mjs";
|
|
4
|
+
import { MODEL_GPT_TURBO_3_5 } from "../../constants/models.mjs";
|
|
4
5
|
import { modelMap } from "../../utils/model-map.mjs";
|
|
5
6
|
import { useRuntimeConfig } from "#imports";
|
|
6
7
|
export default defineEventHandler(async (event) => {
|
|
@@ -15,13 +16,13 @@ export default defineEventHandler(async (event) => {
|
|
|
15
16
|
apiKey: useRuntimeConfig().chatgpt.apiKey
|
|
16
17
|
});
|
|
17
18
|
const requestOptions = {
|
|
18
|
-
|
|
19
|
-
model: !model ? modelMap
|
|
19
|
+
messages: [{ role: "user", content: message }],
|
|
20
|
+
model: !model ? modelMap[MODEL_GPT_TURBO_3_5] : modelMap[model],
|
|
20
21
|
...options || defaultOptions
|
|
21
22
|
};
|
|
22
23
|
try {
|
|
23
|
-
const
|
|
24
|
-
return
|
|
24
|
+
const chatCompletion = await openai.chat.completions.create(requestOptions);
|
|
25
|
+
return chatCompletion.choices[0].message?.content;
|
|
25
26
|
} catch (error) {
|
|
26
27
|
throw createError({
|
|
27
28
|
statusCode: 500,
|
|
@@ -1,4 +1,8 @@
|
|
|
1
|
-
export declare const modelMap: {
|
|
2
|
-
[x: string]: string;
|
|
3
|
-
|
|
4
|
-
|
|
1
|
+
export declare const modelMap: {
|
|
2
|
+
[x: string]: string;
|
|
3
|
+
"gpt-3.5-turbo-1106": string;
|
|
4
|
+
"gpt-4-1106-preview": string;
|
|
5
|
+
"gpt-4-0613": string;
|
|
6
|
+
"gpt-4-32k-0613": string;
|
|
7
|
+
default: string;
|
|
8
|
+
};
|
|
@@ -3,19 +3,27 @@ import {
|
|
|
3
3
|
MODEL_TEXT_DAVINCI_002,
|
|
4
4
|
MODEL_GPT_TURBO_3_5,
|
|
5
5
|
MODEL_GPT_TURBO_3_5_0301,
|
|
6
|
+
MODEL_GPT_TURBO_3_5_1106,
|
|
6
7
|
MODEL_GPT_4,
|
|
8
|
+
MODEL_GPT_4_1106_PREVIEW,
|
|
7
9
|
MODEL_GPT_4_0314,
|
|
10
|
+
MODEL_GPT_4_0613,
|
|
8
11
|
MODEL_GPT_4_32k,
|
|
9
|
-
MODEL_GPT_4_32k_0314
|
|
12
|
+
MODEL_GPT_4_32k_0314,
|
|
13
|
+
MODEL_GPT_4_32k_0613
|
|
10
14
|
} from "../constants/models.mjs";
|
|
11
15
|
export const modelMap = {
|
|
12
16
|
[MODEL_TEXT_DAVINCI_003]: MODEL_TEXT_DAVINCI_003,
|
|
13
17
|
[MODEL_TEXT_DAVINCI_002]: MODEL_TEXT_DAVINCI_002,
|
|
14
18
|
[MODEL_GPT_TURBO_3_5]: MODEL_GPT_TURBO_3_5,
|
|
15
19
|
[MODEL_GPT_TURBO_3_5_0301]: MODEL_GPT_TURBO_3_5_0301,
|
|
20
|
+
[MODEL_GPT_TURBO_3_5_1106]: MODEL_GPT_TURBO_3_5_1106,
|
|
16
21
|
[MODEL_GPT_4]: MODEL_GPT_4,
|
|
22
|
+
[MODEL_GPT_4_1106_PREVIEW]: MODEL_GPT_4_1106_PREVIEW,
|
|
17
23
|
[MODEL_GPT_4_0314]: MODEL_GPT_4_0314,
|
|
24
|
+
[MODEL_GPT_4_0613]: MODEL_GPT_4_0613,
|
|
18
25
|
[MODEL_GPT_4_32k]: MODEL_GPT_4_32k,
|
|
19
26
|
[MODEL_GPT_4_32k_0314]: MODEL_GPT_4_32k_0314,
|
|
20
|
-
|
|
27
|
+
[MODEL_GPT_4_32k_0613]: MODEL_GPT_4_32k_0613,
|
|
28
|
+
default: MODEL_GPT_TURBO_3_5
|
|
21
29
|
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "nuxt-chatgpt",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.2",
|
|
4
4
|
"description": "ChatGPT integration for Nuxt 3",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"type": "module",
|
|
@@ -53,18 +53,18 @@
|
|
|
53
53
|
"test:watch": "vitest watch"
|
|
54
54
|
},
|
|
55
55
|
"dependencies": {
|
|
56
|
-
"@nuxt/kit": "
|
|
57
|
-
"defu": "
|
|
58
|
-
"openai": "
|
|
56
|
+
"@nuxt/kit": "latest",
|
|
57
|
+
"defu": "latest",
|
|
58
|
+
"openai": "4.0.0"
|
|
59
59
|
},
|
|
60
60
|
"devDependencies": {
|
|
61
|
-
"@nuxt/eslint-config": "
|
|
61
|
+
"@nuxt/eslint-config": "latest",
|
|
62
62
|
"@nuxt/module-builder": "^0.2.1",
|
|
63
|
-
"@nuxt/schema": "
|
|
64
|
-
"@nuxt/test-utils": "
|
|
65
|
-
"changelogen": "
|
|
66
|
-
"eslint": "
|
|
67
|
-
"nuxt": "
|
|
68
|
-
"vitest": "
|
|
63
|
+
"@nuxt/schema": "latest",
|
|
64
|
+
"@nuxt/test-utils": "latest",
|
|
65
|
+
"changelogen": "latest",
|
|
66
|
+
"eslint": "latest",
|
|
67
|
+
"nuxt": "latest",
|
|
68
|
+
"vitest": "latest"
|
|
69
69
|
}
|
|
70
|
-
}
|
|
70
|
+
}
|