nuxt-chatgpt 0.2.5 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +40 -16
- package/dist/{module.d.ts → module.d.mts} +3 -2
- package/dist/module.json +6 -2
- package/dist/module.mjs +3 -3
- package/dist/runtime/composables/useChatgpt.d.ts +1 -1
- package/dist/runtime/constants/models.d.ts +9 -1
- package/dist/runtime/constants/{models.mjs → models.js} +9 -1
- package/dist/runtime/constants/options.d.ts +2 -14
- package/dist/runtime/constants/options.js +14 -0
- package/dist/runtime/server/api/{chat-completion.mjs → chat-completion.js} +5 -5
- package/dist/runtime/server/api/{chat.mjs → chat.js} +5 -5
- package/dist/runtime/server/api/image-generate.d.ts +1 -1
- package/dist/runtime/server/api/{image-generate.mjs → image-generate.js} +5 -5
- package/dist/runtime/utils/model-map.d.ts +21 -2
- package/dist/runtime/utils/{model-map.mjs → model-map.js} +21 -5
- package/dist/types.d.mts +3 -0
- package/package.json +72 -72
- package/dist/module.cjs +0 -5
- package/dist/runtime/constants/options.mjs +0 -14
- package/dist/types.d.ts +0 -10
- /package/dist/runtime/composables/{useChatgpt.mjs → useChatgpt.js} +0 -0
package/README.md
CHANGED
|
@@ -2,7 +2,8 @@
|
|
|
2
2
|
<br />
|
|
3
3
|
<div>
|
|
4
4
|
<div>
|
|
5
|
-
<h1>
|
|
5
|
+
<h1>Hausly + Image Generator<a href="https://hausly.io" target="_blank">🔥(IMAGE DEMO)🔥</a></h2></h1>
|
|
6
|
+
<h2>Nuxt Chatgpt + Image Generator<a href="https://nuxtchatgpt.com" target="_blank">🔥(CHATGPT DEMO)🔥</a></h2>
|
|
6
7
|
|
|
7
8
|
</div>
|
|
8
9
|
<div style="display:flex; width:100%; justify-content:center">
|
|
@@ -33,6 +34,12 @@ This user-friendly module boasts of an easy integration process that enables sea
|
|
|
33
34
|
- 🔥 Ensures security by routing requests through a [Nitro Server](https://nuxt.com/docs/guide/concepts/server-engine), preventing the <b>API Key</b> from being exposed.
|
|
34
35
|
- 🧱 It is lightweight and performs well.
|
|
35
36
|
|
|
37
|
+
|
|
38
|
+
## Recommended Node Version
|
|
39
|
+
|
|
40
|
+
### min `v18.20.5 or higher`
|
|
41
|
+
### recommended `v20.19.0`
|
|
42
|
+
|
|
36
43
|
## Getting Started
|
|
37
44
|
|
|
38
45
|
1. Add nuxt-chatgpt dependency to your project
|
|
@@ -72,7 +79,7 @@ The `chat`, and `chatCompletion` methods requires three parameters:
|
|
|
72
79
|
|--|--|--|--|
|
|
73
80
|
|**message**|`String`|available only for `chat()`|A string representing the text message that you want to send to the GPT model for processing.
|
|
74
81
|
|**messages**|`Array`|available only for `chatCompletion()`|An array of objects that contains `role` and `content`
|
|
75
|
-
|**model**|`String`|`gpt-
|
|
82
|
+
|**model**|`String`|`gpt-5-mini` for `chat()` and `gpt-5-mini` for `chatCompletion()`|Represent certain model for different types of natural language processing tasks.
|
|
76
83
|
|**options**|`Object`|`{ temperature: 0.5, max_tokens: 2048, top_p: 1 frequency_penalty: 0, presence_penalty: 0 }`|An optional object that specifies any additional options you want to pass to the API request, such as, the number of responses to generate, and the maximum length of each response.
|
|
77
84
|
|
|
78
85
|
The `generateImage` method requires one parameters:
|
|
@@ -80,7 +87,7 @@ The `generateImage` method requires one parameters:
|
|
|
80
87
|
| Name | Type | Default | Description |
|
|
81
88
|
|--|--|--|--|
|
|
82
89
|
|**message**|`String`| A text description of the desired image(s). The maximum length is 1000 characters.
|
|
83
|
-
|**model**|`String`|`
|
|
90
|
+
|**model**|`String`|`gpt-image-1-mini`| The model to use for image generation.
|
|
84
91
|
|**options**|`Object`|`{ n: 1, quality: 'standard', response_format: 'url', size: '1024x1024', style: 'natural' }`|An optional object that specifies any additional options you want to pass to the API request, such as, the number of images to generate, quality, size and style of the generated images.
|
|
85
92
|
|
|
86
93
|
Available models:
|
|
@@ -100,8 +107,16 @@ Available models:
|
|
|
100
107
|
- gpt-4-32k
|
|
101
108
|
- gpt-4-32k-0314
|
|
102
109
|
- gpt-4-32k-0613
|
|
103
|
-
-
|
|
110
|
+
- gpt-5-nano
|
|
111
|
+
- gpt-5-mini
|
|
112
|
+
- gpt-5-pro
|
|
113
|
+
- gpt-5.1
|
|
114
|
+
- gpt-5.2-pro
|
|
115
|
+
- gpt-5.2
|
|
104
116
|
- dall-e-3
|
|
117
|
+
- gpt-image-1
|
|
118
|
+
- gpt-image-1-mini
|
|
119
|
+
- gpt-image-1.5
|
|
105
120
|
|
|
106
121
|
### Simple `chat` usage
|
|
107
122
|
In the following example, the model is unspecified, and the gpt-4o-mini model will be used by default.
|
|
@@ -117,7 +132,7 @@ async function sendMessage() {
|
|
|
117
132
|
const response = await chat(inputData.value)
|
|
118
133
|
data.value = response
|
|
119
134
|
} catch(error) {
|
|
120
|
-
alert(`
|
|
135
|
+
alert(`Verify your organization if you want to use GPT-5 models: ${error}`)
|
|
121
136
|
}
|
|
122
137
|
}
|
|
123
138
|
|
|
@@ -146,10 +161,10 @@ const inputData = ref('')
|
|
|
146
161
|
|
|
147
162
|
async function sendMessage() {
|
|
148
163
|
try {
|
|
149
|
-
const response = await chat(inputData.value, 'gpt-
|
|
164
|
+
const response = await chat(inputData.value, 'gpt-5-mini')
|
|
150
165
|
data.value = response
|
|
151
166
|
} catch(error) {
|
|
152
|
-
alert(`
|
|
167
|
+
alert(`Verify your organization if you want to use GPT-5 models: ${error}`)
|
|
153
168
|
}
|
|
154
169
|
}
|
|
155
170
|
|
|
@@ -195,7 +210,7 @@ async function sendMessage() {
|
|
|
195
210
|
|
|
196
211
|
chatTree.value.push(responseMessage)
|
|
197
212
|
} catch(error) {
|
|
198
|
-
alert(`
|
|
213
|
+
alert(`Verify your organization if you want to use GPT-5 models: ${error}`)
|
|
199
214
|
}
|
|
200
215
|
}
|
|
201
216
|
|
|
@@ -239,7 +254,7 @@ async function sendMessage() {
|
|
|
239
254
|
|
|
240
255
|
chatTree.value.push(message)
|
|
241
256
|
|
|
242
|
-
const response = await chatCompletion(chatTree.value, 'gpt-
|
|
257
|
+
const response = await chatCompletion(chatTree.value, 'gpt-5-mini')
|
|
243
258
|
|
|
244
259
|
const responseMessage = {
|
|
245
260
|
role: response[0].message.role,
|
|
@@ -248,7 +263,7 @@ async function sendMessage() {
|
|
|
248
263
|
|
|
249
264
|
chatTree.value.push(responseMessage)
|
|
250
265
|
} catch(error) {
|
|
251
|
-
alert(`
|
|
266
|
+
alert(`Verify your organization if you want to use GPT-5 models: ${error}`)
|
|
252
267
|
}
|
|
253
268
|
}
|
|
254
269
|
|
|
@@ -276,7 +291,7 @@ async function sendMessage() {
|
|
|
276
291
|
```
|
|
277
292
|
|
|
278
293
|
### Simple `generateImage` usage
|
|
279
|
-
In the following example, the model is unspecified, and the
|
|
294
|
+
In the following example, the model is unspecified, and the `gpt-image-1-mini` model will be used by default.
|
|
280
295
|
|
|
281
296
|
```js
|
|
282
297
|
const { generateImage } = useChatgpt()
|
|
@@ -285,14 +300,23 @@ const images = ref([])
|
|
|
285
300
|
const inputData = ref('')
|
|
286
301
|
const loading = ref(false)
|
|
287
302
|
|
|
303
|
+
function b64ToBlobUrl(b64) {
|
|
304
|
+
const bytes = Uint8Array.from(atob(b64), (c) => c.charCodeAt(0));
|
|
305
|
+
const blob = new Blob([bytes], { type: "image/png" });
|
|
306
|
+
return URL.createObjectURL(blob);
|
|
307
|
+
}
|
|
308
|
+
|
|
288
309
|
async function sendPrompt() {
|
|
289
|
-
loading.value = true
|
|
310
|
+
loading.value = true;
|
|
290
311
|
try {
|
|
291
|
-
|
|
312
|
+
const result = await generateImage(inputData.value);
|
|
313
|
+
images.value = result.map((img) => ({
|
|
314
|
+
url: b64ToBlobUrl(img.b64_json),
|
|
315
|
+
}));
|
|
292
316
|
} catch (error) {
|
|
293
|
-
alert(`Error: ${error}`)
|
|
317
|
+
alert(`Error: ${error}`);
|
|
294
318
|
}
|
|
295
|
-
loading.value = false
|
|
319
|
+
loading.value = false;
|
|
296
320
|
}
|
|
297
321
|
|
|
298
322
|
```
|
|
@@ -327,7 +351,7 @@ const loading = ref(false)
|
|
|
327
351
|
async function sendPrompt() {
|
|
328
352
|
loading.value = true
|
|
329
353
|
try {
|
|
330
|
-
images.value = await generateImage(inputData.value, 'dall-e-
|
|
354
|
+
images.value = await generateImage(inputData.value, 'dall-e-3', {
|
|
331
355
|
n: 1,
|
|
332
356
|
quality: 'standard',
|
|
333
357
|
response_format: 'url',
|
|
@@ -14,6 +14,7 @@ interface ModuleOptions {
|
|
|
14
14
|
*/
|
|
15
15
|
isEnabled?: boolean;
|
|
16
16
|
}
|
|
17
|
-
declare const _default: _nuxt_schema.NuxtModule<ModuleOptions>;
|
|
17
|
+
declare const _default: _nuxt_schema.NuxtModule<ModuleOptions, ModuleOptions, false>;
|
|
18
18
|
|
|
19
|
-
export {
|
|
19
|
+
export { _default as default };
|
|
20
|
+
export type { ModuleOptions };
|
package/dist/module.json
CHANGED
package/dist/module.mjs
CHANGED
|
@@ -4,9 +4,9 @@ import { defu } from 'defu';
|
|
|
4
4
|
|
|
5
5
|
const configKey = "chatgpt";
|
|
6
6
|
const moduleName = "nuxt-chatgpt";
|
|
7
|
-
const nuxtVersion = "
|
|
7
|
+
const nuxtVersion = ">=3.0.0 <5.0.0";
|
|
8
8
|
|
|
9
|
-
const module = defineNuxtModule({
|
|
9
|
+
const module$1 = defineNuxtModule({
|
|
10
10
|
meta: {
|
|
11
11
|
name: moduleName,
|
|
12
12
|
configKey,
|
|
@@ -56,4 +56,4 @@ const module = defineNuxtModule({
|
|
|
56
56
|
}
|
|
57
57
|
});
|
|
58
58
|
|
|
59
|
-
export { module as default };
|
|
59
|
+
export { module$1 as default };
|
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
import type { IChatgptClient } from "../types";
|
|
1
|
+
import type { IChatgptClient } from "../types/index.js";
|
|
2
2
|
export declare const useChatgpt: () => IChatgptClient;
|
|
@@ -13,5 +13,13 @@ export declare const MODEL_GPT_4_0613 = "gpt-4-0613";
|
|
|
13
13
|
export declare const MODEL_GPT_4_32k: string;
|
|
14
14
|
export declare const MODEL_GPT_4_32k_0314: string;
|
|
15
15
|
export declare const MODEL_GPT_4_32k_0613 = "gpt-4-32k-0613";
|
|
16
|
-
export declare const
|
|
16
|
+
export declare const MODEL_GPT_5_NANO = "gpt-5-nano";
|
|
17
|
+
export declare const MODEL_GPT_5_MINI = "gpt-5-mini";
|
|
18
|
+
export declare const MODEL_GPT_5_PRO = "gpt-5-pro";
|
|
19
|
+
export declare const MODEL_GPT_5_1 = "gpt-5.1";
|
|
20
|
+
export declare const MODEL_GPT_5_2_PRO = "gpt-5.2-pro";
|
|
21
|
+
export declare const MODEL_GPT_5_2 = "gpt-5.2";
|
|
17
22
|
export declare const MODEL_GPT_DALL_E_3 = "dall-e-3";
|
|
23
|
+
export declare const MODEL_GPT_IMAGE_1 = "gpt-image-1";
|
|
24
|
+
export declare const MODEL_GPT_IMAGE_1_MINI = "gpt-image-1-mini";
|
|
25
|
+
export declare const MODEL_GPT_IMAGE_1_5 = "gpt-image-1.5";
|
|
@@ -13,5 +13,13 @@ export const MODEL_GPT_4_0613 = "gpt-4-0613";
|
|
|
13
13
|
export const MODEL_GPT_4_32k = "gpt-4-32k";
|
|
14
14
|
export const MODEL_GPT_4_32k_0314 = "gpt-4-32k-0314";
|
|
15
15
|
export const MODEL_GPT_4_32k_0613 = "gpt-4-32k-0613";
|
|
16
|
-
export const
|
|
16
|
+
export const MODEL_GPT_5_NANO = "gpt-5-nano";
|
|
17
|
+
export const MODEL_GPT_5_MINI = "gpt-5-mini";
|
|
18
|
+
export const MODEL_GPT_5_PRO = "gpt-5-pro";
|
|
19
|
+
export const MODEL_GPT_5_1 = "gpt-5.1";
|
|
20
|
+
export const MODEL_GPT_5_2_PRO = "gpt-5.2-pro";
|
|
21
|
+
export const MODEL_GPT_5_2 = "gpt-5.2";
|
|
17
22
|
export const MODEL_GPT_DALL_E_3 = "dall-e-3";
|
|
23
|
+
export const MODEL_GPT_IMAGE_1 = "gpt-image-1";
|
|
24
|
+
export const MODEL_GPT_IMAGE_1_MINI = "gpt-image-1-mini";
|
|
25
|
+
export const MODEL_GPT_IMAGE_1_5 = "gpt-image-1.5";
|
|
@@ -1,14 +1,2 @@
|
|
|
1
|
-
export declare const defaultOptions: {
|
|
2
|
-
|
|
3
|
-
max_tokens: number;
|
|
4
|
-
top_p: number;
|
|
5
|
-
frequency_penalty: number;
|
|
6
|
-
presence_penalty: number;
|
|
7
|
-
};
|
|
8
|
-
export declare const defaultDaleOptions: {
|
|
9
|
-
n: number;
|
|
10
|
-
quality: string;
|
|
11
|
-
response_format: string;
|
|
12
|
-
size: string;
|
|
13
|
-
style: string;
|
|
14
|
-
};
|
|
1
|
+
export declare const defaultOptions: {};
|
|
2
|
+
export declare const defaultDaleOptions: {};
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
export const defaultOptions = {
|
|
2
|
+
// temperature: 0.5,
|
|
3
|
+
// max_tokens: 2048,
|
|
4
|
+
// top_p: 1,
|
|
5
|
+
// frequency_penalty: 0,
|
|
6
|
+
// presence_penalty: 0
|
|
7
|
+
};
|
|
8
|
+
export const defaultDaleOptions = {
|
|
9
|
+
// n: 1,
|
|
10
|
+
// quality: 'standard',
|
|
11
|
+
// response_format: 'url',
|
|
12
|
+
// size: '1024x1024',
|
|
13
|
+
// style: 'natural'
|
|
14
|
+
};
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import OpenAI from "openai";
|
|
2
2
|
import { createError, defineEventHandler, readBody } from "h3";
|
|
3
|
-
import { defaultOptions } from "../../constants/options.
|
|
4
|
-
import {
|
|
5
|
-
import { modelMap } from "../../utils/model-map.
|
|
3
|
+
import { defaultOptions } from "../../constants/options.js";
|
|
4
|
+
import { MODEL_GPT_5_MINI } from "../../constants/models.js";
|
|
5
|
+
import { modelMap } from "../../utils/model-map.js";
|
|
6
6
|
import { useRuntimeConfig } from "#imports";
|
|
7
7
|
export default defineEventHandler(async (event) => {
|
|
8
8
|
const { messages, model, options } = await readBody(event);
|
|
@@ -17,7 +17,7 @@ export default defineEventHandler(async (event) => {
|
|
|
17
17
|
});
|
|
18
18
|
const requestOptions = {
|
|
19
19
|
messages,
|
|
20
|
-
model: !model ? modelMap[
|
|
20
|
+
model: !model ? modelMap[MODEL_GPT_5_MINI] : modelMap[model],
|
|
21
21
|
...options || defaultOptions
|
|
22
22
|
};
|
|
23
23
|
try {
|
|
@@ -26,7 +26,7 @@ export default defineEventHandler(async (event) => {
|
|
|
26
26
|
} catch (error) {
|
|
27
27
|
throw createError({
|
|
28
28
|
statusCode: 500,
|
|
29
|
-
message:
|
|
29
|
+
message: error
|
|
30
30
|
});
|
|
31
31
|
}
|
|
32
32
|
});
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import OpenAI from "openai";
|
|
2
2
|
import { createError, defineEventHandler, readBody } from "h3";
|
|
3
|
-
import { defaultOptions } from "../../constants/options.
|
|
4
|
-
import {
|
|
5
|
-
import { modelMap } from "../../utils/model-map.
|
|
3
|
+
import { defaultOptions } from "../../constants/options.js";
|
|
4
|
+
import { MODEL_GPT_5_MINI } from "../../constants/models.js";
|
|
5
|
+
import { modelMap } from "../../utils/model-map.js";
|
|
6
6
|
import { useRuntimeConfig } from "#imports";
|
|
7
7
|
export default defineEventHandler(async (event) => {
|
|
8
8
|
const { message, model, options } = await readBody(event);
|
|
@@ -17,7 +17,7 @@ export default defineEventHandler(async (event) => {
|
|
|
17
17
|
});
|
|
18
18
|
const requestOptions = {
|
|
19
19
|
messages: [{ role: "user", content: message }],
|
|
20
|
-
model: !model ? modelMap[
|
|
20
|
+
model: !model ? modelMap[MODEL_GPT_5_MINI] : modelMap[model],
|
|
21
21
|
...options || defaultOptions
|
|
22
22
|
};
|
|
23
23
|
try {
|
|
@@ -26,7 +26,7 @@ export default defineEventHandler(async (event) => {
|
|
|
26
26
|
} catch (error) {
|
|
27
27
|
throw createError({
|
|
28
28
|
statusCode: 500,
|
|
29
|
-
message:
|
|
29
|
+
message: error
|
|
30
30
|
});
|
|
31
31
|
}
|
|
32
32
|
});
|
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
import OpenAI from 'openai';
|
|
2
|
-
declare const _default: import("h3").EventHandler<import("h3").EventHandlerRequest, Promise<OpenAI.Images.Image[]>>;
|
|
2
|
+
declare const _default: import("h3").EventHandler<import("h3").EventHandlerRequest, Promise<OpenAI.Images.Image[] | undefined>>;
|
|
3
3
|
export default _default;
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import OpenAI from "openai";
|
|
2
2
|
import { createError, defineEventHandler, readBody } from "h3";
|
|
3
|
-
import { defaultDaleOptions } from "../../constants/options.
|
|
4
|
-
import {
|
|
5
|
-
import { modelMap } from "../../utils/model-map.
|
|
3
|
+
import { defaultDaleOptions } from "../../constants/options.js";
|
|
4
|
+
import { MODEL_GPT_IMAGE_1_MINI } from "../../constants/models.js";
|
|
5
|
+
import { modelMap } from "../../utils/model-map.js";
|
|
6
6
|
import { useRuntimeConfig } from "#imports";
|
|
7
7
|
export default defineEventHandler(async (event) => {
|
|
8
8
|
const { message, model, options } = await readBody(event);
|
|
@@ -17,7 +17,7 @@ export default defineEventHandler(async (event) => {
|
|
|
17
17
|
});
|
|
18
18
|
const requestOptions = {
|
|
19
19
|
prompt: message,
|
|
20
|
-
model: !model ? modelMap[
|
|
20
|
+
model: !model ? modelMap[MODEL_GPT_IMAGE_1_MINI] : modelMap[model],
|
|
21
21
|
...options || defaultDaleOptions
|
|
22
22
|
};
|
|
23
23
|
try {
|
|
@@ -26,7 +26,7 @@ export default defineEventHandler(async (event) => {
|
|
|
26
26
|
} catch (error) {
|
|
27
27
|
throw createError({
|
|
28
28
|
statusCode: 500,
|
|
29
|
-
message:
|
|
29
|
+
message: error
|
|
30
30
|
});
|
|
31
31
|
}
|
|
32
32
|
});
|
|
@@ -1,10 +1,29 @@
|
|
|
1
|
+
import { MODEL_TEXT_DAVINCI_003, MODEL_TEXT_DAVINCI_002, MODEL_GPT_TURBO_3_5, MODEL_GPT_TURBO_3_5_0301, MODEL_GPT_4, MODEL_GPT_4_O, MODEL_GPT_4_MINI, MODEL_GPT_4_TURBO, MODEL_GPT_4_0314, MODEL_GPT_4_32k, MODEL_GPT_4_32k_0314 } from "../constants/models.js";
|
|
1
2
|
export declare const modelMap: {
|
|
2
|
-
[
|
|
3
|
+
[MODEL_TEXT_DAVINCI_003]: string;
|
|
4
|
+
[MODEL_TEXT_DAVINCI_002]: string;
|
|
5
|
+
[MODEL_GPT_TURBO_3_5]: string;
|
|
6
|
+
[MODEL_GPT_TURBO_3_5_0301]: string;
|
|
7
|
+
[MODEL_GPT_4]: string;
|
|
8
|
+
[MODEL_GPT_4_O]: string;
|
|
9
|
+
[MODEL_GPT_4_MINI]: string;
|
|
10
|
+
[MODEL_GPT_4_TURBO]: string;
|
|
11
|
+
[MODEL_GPT_4_0314]: string;
|
|
12
|
+
[MODEL_GPT_4_32k]: string;
|
|
13
|
+
[MODEL_GPT_4_32k_0314]: string;
|
|
3
14
|
"gpt-3.5-turbo-1106": string;
|
|
4
15
|
"gpt-4-1106-preview": string;
|
|
5
16
|
"gpt-4-0613": string;
|
|
6
17
|
"gpt-4-32k-0613": string;
|
|
7
|
-
"dall-e-2": string;
|
|
8
18
|
"dall-e-3": string;
|
|
19
|
+
"gpt-image-1": string;
|
|
20
|
+
"gpt-image-1-mini": string;
|
|
21
|
+
"gpt-image-1.5": string;
|
|
22
|
+
"gpt-5-nano": string;
|
|
23
|
+
"gpt-5-mini": string;
|
|
24
|
+
"gpt-5-pro": string;
|
|
25
|
+
"gpt-5.1": string;
|
|
26
|
+
"gpt-5.2-pro": string;
|
|
27
|
+
"gpt-5.2": string;
|
|
9
28
|
default: string;
|
|
10
29
|
};
|
|
@@ -14,9 +14,17 @@ import {
|
|
|
14
14
|
MODEL_GPT_4_32k,
|
|
15
15
|
MODEL_GPT_4_32k_0314,
|
|
16
16
|
MODEL_GPT_4_32k_0613,
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
17
|
+
MODEL_GPT_5_NANO,
|
|
18
|
+
MODEL_GPT_5_MINI,
|
|
19
|
+
MODEL_GPT_5_PRO,
|
|
20
|
+
MODEL_GPT_5_1,
|
|
21
|
+
MODEL_GPT_5_2_PRO,
|
|
22
|
+
MODEL_GPT_5_2,
|
|
23
|
+
MODEL_GPT_DALL_E_3,
|
|
24
|
+
MODEL_GPT_IMAGE_1,
|
|
25
|
+
MODEL_GPT_IMAGE_1_MINI,
|
|
26
|
+
MODEL_GPT_IMAGE_1_5
|
|
27
|
+
} from "../constants/models.js";
|
|
20
28
|
export const modelMap = {
|
|
21
29
|
[MODEL_TEXT_DAVINCI_003]: MODEL_TEXT_DAVINCI_003,
|
|
22
30
|
[MODEL_TEXT_DAVINCI_002]: MODEL_TEXT_DAVINCI_002,
|
|
@@ -33,7 +41,15 @@ export const modelMap = {
|
|
|
33
41
|
[MODEL_GPT_4_32k]: MODEL_GPT_4_32k,
|
|
34
42
|
[MODEL_GPT_4_32k_0314]: MODEL_GPT_4_32k_0314,
|
|
35
43
|
[MODEL_GPT_4_32k_0613]: MODEL_GPT_4_32k_0613,
|
|
36
|
-
[MODEL_GPT_DALL_E_2]: MODEL_GPT_DALL_E_2,
|
|
37
44
|
[MODEL_GPT_DALL_E_3]: MODEL_GPT_DALL_E_3,
|
|
38
|
-
|
|
45
|
+
[MODEL_GPT_IMAGE_1]: MODEL_GPT_IMAGE_1,
|
|
46
|
+
[MODEL_GPT_IMAGE_1_MINI]: MODEL_GPT_IMAGE_1_MINI,
|
|
47
|
+
[MODEL_GPT_IMAGE_1_5]: MODEL_GPT_IMAGE_1_5,
|
|
48
|
+
[MODEL_GPT_5_NANO]: MODEL_GPT_5_NANO,
|
|
49
|
+
[MODEL_GPT_5_MINI]: MODEL_GPT_5_MINI,
|
|
50
|
+
[MODEL_GPT_5_PRO]: MODEL_GPT_5_PRO,
|
|
51
|
+
[MODEL_GPT_5_1]: MODEL_GPT_5_1,
|
|
52
|
+
[MODEL_GPT_5_2_PRO]: MODEL_GPT_5_2_PRO,
|
|
53
|
+
[MODEL_GPT_5_2]: MODEL_GPT_5_2,
|
|
54
|
+
default: MODEL_GPT_5_MINI
|
|
39
55
|
};
|
package/dist/types.d.mts
ADDED
package/package.json
CHANGED
|
@@ -1,72 +1,72 @@
|
|
|
1
|
-
{
|
|
2
|
-
"name": "nuxt-chatgpt",
|
|
3
|
-
"version": "0.
|
|
4
|
-
"description": "ChatGPT integration for Nuxt 3",
|
|
5
|
-
"license": "MIT",
|
|
6
|
-
"type": "module",
|
|
7
|
-
"homepage": "https://vuemadness.com/nuxt-chatgpt",
|
|
8
|
-
"bugs": {
|
|
9
|
-
"url": "https://github.com/schnapsterdog/nuxt-chatgpt/issues"
|
|
10
|
-
},
|
|
11
|
-
"repository": {
|
|
12
|
-
"type": "git",
|
|
13
|
-
"url": "git+https://github.com/schnapsterdog/nuxt-chatgpt"
|
|
14
|
-
},
|
|
15
|
-
"contributors": [
|
|
16
|
-
{
|
|
17
|
-
"name": "Oliver Trajceski (@schnapsterdog)"
|
|
18
|
-
}
|
|
19
|
-
],
|
|
20
|
-
"author": {
|
|
21
|
-
"name": "Oliver Trajceski",
|
|
22
|
-
"email": "oliver@akrinum.com"
|
|
23
|
-
},
|
|
24
|
-
"keywords": [
|
|
25
|
-
"vue3",
|
|
26
|
-
"nuxt3",
|
|
27
|
-
"nuxt",
|
|
28
|
-
"nuxt.js",
|
|
29
|
-
"nuxt-chatgpt",
|
|
30
|
-
"image",
|
|
31
|
-
"image-generator"
|
|
32
|
-
],
|
|
33
|
-
"exports": {
|
|
34
|
-
".": {
|
|
35
|
-
"types": "./dist/types.d.
|
|
36
|
-
"import": "./dist/module.mjs",
|
|
37
|
-
"
|
|
38
|
-
}
|
|
39
|
-
},
|
|
40
|
-
"main": "./dist/module.
|
|
41
|
-
"types": "./dist/types.d.
|
|
42
|
-
"files": [
|
|
43
|
-
"dist"
|
|
44
|
-
],
|
|
45
|
-
"scripts": {
|
|
46
|
-
"prepack": "nuxt-module-build",
|
|
47
|
-
"dev": "nuxi dev playground",
|
|
48
|
-
"dev:build": "nuxi build playground",
|
|
49
|
-
"dev:generate": "nuxi generate playground",
|
|
50
|
-
"dev:prepare": "nuxt-module-build --stub && nuxi prepare playground",
|
|
51
|
-
"dev:preview": "nuxi preview playground",
|
|
52
|
-
"release": "npm run lint && npm run test && npm run prepack && changelogen --release --minor && npm publish && git push --follow-tags",
|
|
53
|
-
"lint": "eslint .",
|
|
54
|
-
"test": "vitest run",
|
|
55
|
-
"test:watch": "vitest watch"
|
|
56
|
-
},
|
|
57
|
-
"dependencies": {
|
|
58
|
-
"@nuxt/kit": "latest",
|
|
59
|
-
"defu": "latest",
|
|
60
|
-
"openai": "4.
|
|
61
|
-
},
|
|
62
|
-
"devDependencies": {
|
|
63
|
-
"@nuxt/eslint-config": "latest",
|
|
64
|
-
"@nuxt/module-builder": "^0.
|
|
65
|
-
"@nuxt/schema": "latest",
|
|
66
|
-
"@nuxt/test-utils": "
|
|
67
|
-
"changelogen": "latest",
|
|
68
|
-
"eslint": "latest",
|
|
69
|
-
"nuxt": "
|
|
70
|
-
"vitest": "latest"
|
|
71
|
-
}
|
|
72
|
-
}
|
|
1
|
+
{
|
|
2
|
+
"name": "nuxt-chatgpt",
|
|
3
|
+
"version": "0.3.0",
|
|
4
|
+
"description": "ChatGPT integration for Nuxt 3",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"homepage": "https://vuemadness.com/nuxt-chatgpt",
|
|
8
|
+
"bugs": {
|
|
9
|
+
"url": "https://github.com/schnapsterdog/nuxt-chatgpt/issues"
|
|
10
|
+
},
|
|
11
|
+
"repository": {
|
|
12
|
+
"type": "git",
|
|
13
|
+
"url": "git+https://github.com/schnapsterdog/nuxt-chatgpt"
|
|
14
|
+
},
|
|
15
|
+
"contributors": [
|
|
16
|
+
{
|
|
17
|
+
"name": "Oliver Trajceski (@schnapsterdog)"
|
|
18
|
+
}
|
|
19
|
+
],
|
|
20
|
+
"author": {
|
|
21
|
+
"name": "Oliver Trajceski",
|
|
22
|
+
"email": "oliver@akrinum.com"
|
|
23
|
+
},
|
|
24
|
+
"keywords": [
|
|
25
|
+
"vue3",
|
|
26
|
+
"nuxt3",
|
|
27
|
+
"nuxt",
|
|
28
|
+
"nuxt.js",
|
|
29
|
+
"nuxt-chatgpt",
|
|
30
|
+
"image",
|
|
31
|
+
"image-generator"
|
|
32
|
+
],
|
|
33
|
+
"exports": {
|
|
34
|
+
".": {
|
|
35
|
+
"types": "./dist/types.d.mts",
|
|
36
|
+
"import": "./dist/module.mjs",
|
|
37
|
+
"default": "./dist/module.mjs"
|
|
38
|
+
}
|
|
39
|
+
},
|
|
40
|
+
"main": "./dist/module.mjs",
|
|
41
|
+
"types": "./dist/types.d.mts",
|
|
42
|
+
"files": [
|
|
43
|
+
"dist"
|
|
44
|
+
],
|
|
45
|
+
"scripts": {
|
|
46
|
+
"prepack": "nuxt-module-build",
|
|
47
|
+
"dev": "nuxi dev playground",
|
|
48
|
+
"dev:build": "nuxi build playground",
|
|
49
|
+
"dev:generate": "nuxi generate playground",
|
|
50
|
+
"dev:prepare": "nuxt-module-build --stub && nuxi prepare playground",
|
|
51
|
+
"dev:preview": "nuxi preview playground",
|
|
52
|
+
"release": "npm run lint && npm run test && npm run prepack && changelogen --release --minor && npm publish && git push --follow-tags",
|
|
53
|
+
"lint": "eslint .",
|
|
54
|
+
"test": "vitest run",
|
|
55
|
+
"test:watch": "vitest watch"
|
|
56
|
+
},
|
|
57
|
+
"dependencies": {
|
|
58
|
+
"@nuxt/kit": "latest",
|
|
59
|
+
"defu": "latest",
|
|
60
|
+
"openai": "^4.96.2"
|
|
61
|
+
},
|
|
62
|
+
"devDependencies": {
|
|
63
|
+
"@nuxt/eslint-config": "latest",
|
|
64
|
+
"@nuxt/module-builder": "^1.0.1",
|
|
65
|
+
"@nuxt/schema": "latest",
|
|
66
|
+
"@nuxt/test-utils": "^3.18.0",
|
|
67
|
+
"changelogen": "latest",
|
|
68
|
+
"eslint": "latest",
|
|
69
|
+
"nuxt": "^3.16.2",
|
|
70
|
+
"vitest": "latest"
|
|
71
|
+
}
|
|
72
|
+
}
|
package/dist/module.cjs
DELETED
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
export const defaultOptions = {
|
|
2
|
-
temperature: 0.5,
|
|
3
|
-
max_tokens: 2048,
|
|
4
|
-
top_p: 1,
|
|
5
|
-
frequency_penalty: 0,
|
|
6
|
-
presence_penalty: 0
|
|
7
|
-
};
|
|
8
|
-
export const defaultDaleOptions = {
|
|
9
|
-
n: 1,
|
|
10
|
-
quality: "standard",
|
|
11
|
-
response_format: "url",
|
|
12
|
-
size: "1024x1024",
|
|
13
|
-
style: "natural"
|
|
14
|
-
};
|
package/dist/types.d.ts
DELETED
|
File without changes
|