nuxt-chatgpt 0.2.4 â 0.2.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +98 -4
- package/dist/module.json +1 -1
- package/dist/module.mjs +7 -0
- package/dist/runtime/composables/useChatgpt.mjs +18 -1
- package/dist/runtime/constants/models.d.ts +1 -0
- package/dist/runtime/constants/models.mjs +1 -0
- package/dist/runtime/constants/options.d.ts +7 -0
- package/dist/runtime/constants/options.mjs +7 -0
- package/dist/runtime/server/api/chat-completion.mjs +1 -1
- package/dist/runtime/server/api/chat.mjs +1 -1
- package/dist/runtime/server/api/image-generate.d.ts +3 -0
- package/dist/runtime/server/api/image-generate.mjs +32 -0
- package/dist/runtime/types/index.d.ts +1 -0
- package/dist/runtime/utils/model-map.d.ts +1 -0
- package/dist/runtime/utils/model-map.mjs +4 -2
- package/package.json +6 -4
package/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
<br />
|
|
3
3
|
<div>
|
|
4
4
|
<div>
|
|
5
|
-
<h1>Nuxt Chatgpt <a href="https://nuxtchatgpt.com" target="_blank">đĨ(VIEW DEMO)đĨ</a></h3>
|
|
5
|
+
<h1>Nuxt Chatgpt + Image Generator<a href="https://nuxtchatgpt.com" target="_blank">đĨ(VIEW DEMO)đĨ</a></h3>
|
|
6
6
|
|
|
7
7
|
</div>
|
|
8
8
|
<div style="display:flex; width:100%; justify-content:center">
|
|
@@ -29,7 +29,7 @@ This user-friendly module boasts of an easy integration process that enables sea
|
|
|
29
29
|
|
|
30
30
|
- đĒ Easy implementation into any [Nuxt 3](https://nuxt.com) project.
|
|
31
31
|
- đ Type-safe integration of Chatgpt into your [Nuxt 3](https://nuxt.com) project.
|
|
32
|
-
- đšī¸ Provides a `useChatgpt()` composable that grants easy access to the `chat`, and `chatCompletion` methods.
|
|
32
|
+
- đšī¸ Provides a `useChatgpt()` composable that grants easy access to the `chat`, and `chatCompletion`, and `generateImage` methods.
|
|
33
33
|
- đĨ Ensures security by routing requests through a [Nitro Server](https://nuxt.com/docs/guide/concepts/server-engine), preventing the <b>API Key</b> from being exposed.
|
|
34
34
|
- đ§ą It is lightweight and performs well.
|
|
35
35
|
|
|
@@ -64,15 +64,24 @@ That's it! You can now use Nuxt Chatgpt in your Nuxt app đĨ
|
|
|
64
64
|
|
|
65
65
|
## Usage & Examples
|
|
66
66
|
|
|
67
|
-
To access the `chat`, and `
|
|
67
|
+
To access the `chat`, `chatCompletion`, and `generateImage` methods in the nuxt-chatgpt module, you can use the `useChatgpt()` composable, which provides easy access to them.
|
|
68
68
|
|
|
69
|
+
The `chat`, and `chatCompletion` methods requires three parameters:
|
|
69
70
|
|
|
70
71
|
| Name | Type | Default | Description |
|
|
71
72
|
|--|--|--|--|
|
|
72
73
|
|**message**|`String`|available only for `chat()`|A string representing the text message that you want to send to the GPT model for processing.
|
|
73
74
|
|**messages**|`Array`|available only for `chatCompletion()`|An array of objects that contains `role` and `content`
|
|
74
75
|
|**model**|`String`|`gpt-4o-mini` for `chat()` and `gpt-4o-mini` for `chatCompletion()`|Represent certain model for different types of natural language processing tasks.
|
|
75
|
-
|**options**|`Object`|`{ temperature: 0.5, max_tokens: 2048, top_p: 1 frequency_penalty: 0, presence_penalty: 0 }`|An optional object that specifies any additional options you want to pass to the API request, such as the number of responses to generate, and the maximum length of each response.
|
|
76
|
+
|**options**|`Object`|`{ temperature: 0.5, max_tokens: 2048, top_p: 1 frequency_penalty: 0, presence_penalty: 0 }`|An optional object that specifies any additional options you want to pass to the API request, such as, the number of responses to generate, and the maximum length of each response.
|
|
77
|
+
|
|
78
|
+
The `generateImage` method requires one parameters:
|
|
79
|
+
|
|
80
|
+
| Name | Type | Default | Description |
|
|
81
|
+
|--|--|--|--|
|
|
82
|
+
|**message**|`String`| A text description of the desired image(s). The maximum length is 1000 characters.
|
|
83
|
+
|**model**|`String`|`dall-e-3`| The model to use for image generation. Only dall-e-3 is supported at this time.
|
|
84
|
+
|**options**|`Object`|`{ n: 1, quality: 'standard', response_format: 'url', size: '1024x1024', style: 'natural' }`|An optional object that specifies any additional options you want to pass to the API request, such as, the number of images to generate, quality, size and style of the generated images.
|
|
76
85
|
|
|
77
86
|
Available models:
|
|
78
87
|
|
|
@@ -91,6 +100,7 @@ Available models:
|
|
|
91
100
|
- gpt-4-32k
|
|
92
101
|
- gpt-4-32k-0314
|
|
93
102
|
- gpt-4-32k-0613
|
|
103
|
+
- dall-e-3
|
|
94
104
|
|
|
95
105
|
### Simple `chat` usage
|
|
96
106
|
In the following example, the model is unspecified, and the gpt-4o-mini model will be used by default.
|
|
@@ -264,6 +274,90 @@ async function sendMessage() {
|
|
|
264
274
|
</template>
|
|
265
275
|
```
|
|
266
276
|
|
|
277
|
+
### Simple `generateImage` usage
|
|
278
|
+
In the following example, the model is unspecified, and the dall-e-3 model will be used by default.
|
|
279
|
+
|
|
280
|
+
```js
|
|
281
|
+
const { generateImage } = useChatgpt()
|
|
282
|
+
|
|
283
|
+
const images = ref([])
|
|
284
|
+
const inputData = ref('')
|
|
285
|
+
const loading = ref(false)
|
|
286
|
+
|
|
287
|
+
async function sendPrompt() {
|
|
288
|
+
loading.value = true
|
|
289
|
+
try {
|
|
290
|
+
images.value = await generateImage(inputData.value)
|
|
291
|
+
} catch (error) {
|
|
292
|
+
alert(`Error: ${error}`)
|
|
293
|
+
}
|
|
294
|
+
loading.value = false
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
```
|
|
298
|
+
|
|
299
|
+
```html
|
|
300
|
+
<template>
|
|
301
|
+
<div>
|
|
302
|
+
<div v-if="!loading && !images.length">
|
|
303
|
+
<input v-model="inputData">
|
|
304
|
+
<button
|
|
305
|
+
@click="sendPrompt"
|
|
306
|
+
v-text="'Send Prompt'"
|
|
307
|
+
/>
|
|
308
|
+
</div>
|
|
309
|
+
<div v-else-if="loading">Generating, please wait ...</div>
|
|
310
|
+
<div v-if="images && !loading" >
|
|
311
|
+
<img v-for="image in images" :key="image.url" :src="image.url" alt="generated-image"/>
|
|
312
|
+
</div>
|
|
313
|
+
</div>
|
|
314
|
+
</template>
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
### Usage of `generateImage` with different model, and options
|
|
318
|
+
|
|
319
|
+
```js
|
|
320
|
+
const { generateImage } = useChatgpt()
|
|
321
|
+
|
|
322
|
+
const images = ref([])
|
|
323
|
+
const inputData = ref('')
|
|
324
|
+
const loading = ref(false)
|
|
325
|
+
|
|
326
|
+
async function sendPrompt() {
|
|
327
|
+
loading.value = true
|
|
328
|
+
try {
|
|
329
|
+
images.value = await generateImage(inputData.value, 'dall-e-3', {
|
|
330
|
+
n: 1,
|
|
331
|
+
quality: 'standard',
|
|
332
|
+
response_format: 'url',
|
|
333
|
+
size: '1024x1024',
|
|
334
|
+
style: 'natural'
|
|
335
|
+
})
|
|
336
|
+
} catch (error) {
|
|
337
|
+
alert(`Error: ${error}`)
|
|
338
|
+
}
|
|
339
|
+
loading.value = false
|
|
340
|
+
}
|
|
341
|
+
```
|
|
342
|
+
|
|
343
|
+
```html
|
|
344
|
+
<template>
|
|
345
|
+
<div>
|
|
346
|
+
<div v-if="!loading && !images.length">
|
|
347
|
+
<input v-model="inputData">
|
|
348
|
+
<button
|
|
349
|
+
@click="sendPrompt"
|
|
350
|
+
v-text="'Send Prompt'"
|
|
351
|
+
/>
|
|
352
|
+
</div>
|
|
353
|
+
<div v-else-if="loading">Generating, please wait ...</div>
|
|
354
|
+
<div v-if="images && !loading" >
|
|
355
|
+
<img v-for="image in images" :key="image.url" :src="image.url" alt="generated-image"/>
|
|
356
|
+
</div>
|
|
357
|
+
</div>
|
|
358
|
+
</template>
|
|
359
|
+
```
|
|
360
|
+
|
|
267
361
|
## chat vs chatCompletion
|
|
268
362
|
|
|
269
363
|
The `chat` method allows the user to send a prompt to the OpenAI API and receive a response. You can use this endpoint to build conversational interfaces that can interact with users in a natural way. For example, you could use the chat method to build a chatbot that can answer customer service questions or provide information about a product or service.
|
package/dist/module.json
CHANGED
package/dist/module.mjs
CHANGED
|
@@ -45,6 +45,13 @@ const module = defineNuxtModule({
|
|
|
45
45
|
handler: resolve(runtimeDir, "server/api/chat-completion")
|
|
46
46
|
}
|
|
47
47
|
);
|
|
48
|
+
addServerHandler(
|
|
49
|
+
{
|
|
50
|
+
route: "/api/image-generate",
|
|
51
|
+
method: "post",
|
|
52
|
+
handler: resolve(runtimeDir, "server/api/image-generate")
|
|
53
|
+
}
|
|
54
|
+
);
|
|
48
55
|
nuxt.options.build.transpile.push(runtimeDir);
|
|
49
56
|
}
|
|
50
57
|
});
|
|
@@ -34,5 +34,22 @@ export const useChatgpt = () => {
|
|
|
34
34
|
});
|
|
35
35
|
}
|
|
36
36
|
};
|
|
37
|
-
|
|
37
|
+
const generateImage = async (message, model, options) => {
|
|
38
|
+
try {
|
|
39
|
+
return await $fetch("/api/image-generate", {
|
|
40
|
+
method: "POST",
|
|
41
|
+
body: {
|
|
42
|
+
message,
|
|
43
|
+
model,
|
|
44
|
+
options
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
} catch (error) {
|
|
48
|
+
throw createError({
|
|
49
|
+
statusCode: 500,
|
|
50
|
+
message: "Failed to forward request to server"
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
};
|
|
54
|
+
return { chat, chatCompletion, generateImage };
|
|
38
55
|
};
|
|
@@ -13,3 +13,4 @@ export declare const MODEL_GPT_4_0613 = "gpt-4-0613";
|
|
|
13
13
|
export declare const MODEL_GPT_4_32k: string;
|
|
14
14
|
export declare const MODEL_GPT_4_32k_0314: string;
|
|
15
15
|
export declare const MODEL_GPT_4_32k_0613 = "gpt-4-32k-0613";
|
|
16
|
+
export declare const MODEL_GPT_DALL_E_3 = "dall-e-3";
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
import { createError, defineEventHandler, readBody } from "h3";
|
|
3
|
+
import { defaultDaleOptions } from "../../constants/options.mjs";
|
|
4
|
+
import { MODEL_GPT_DALL_E_3 } from "../../constants/models.mjs";
|
|
5
|
+
import { modelMap } from "../../utils/model-map.mjs";
|
|
6
|
+
import { useRuntimeConfig } from "#imports";
|
|
7
|
+
export default defineEventHandler(async (event) => {
|
|
8
|
+
const { message, model, options } = await readBody(event);
|
|
9
|
+
if (!useRuntimeConfig().chatgpt.apiKey) {
|
|
10
|
+
throw createError({
|
|
11
|
+
statusCode: 403,
|
|
12
|
+
message: "Missing OpenAI API Key"
|
|
13
|
+
});
|
|
14
|
+
}
|
|
15
|
+
const openai = new OpenAI({
|
|
16
|
+
apiKey: useRuntimeConfig().chatgpt.apiKey
|
|
17
|
+
});
|
|
18
|
+
const requestOptions = {
|
|
19
|
+
prompt: message,
|
|
20
|
+
model: !model ? modelMap[MODEL_GPT_DALL_E_3] : modelMap[model],
|
|
21
|
+
...options || defaultDaleOptions
|
|
22
|
+
};
|
|
23
|
+
try {
|
|
24
|
+
const response = await openai.images.generate(requestOptions);
|
|
25
|
+
return response.data;
|
|
26
|
+
} catch (error) {
|
|
27
|
+
throw createError({
|
|
28
|
+
statusCode: 500,
|
|
29
|
+
message: error
|
|
30
|
+
});
|
|
31
|
+
}
|
|
32
|
+
});
|
|
@@ -13,7 +13,8 @@ import {
|
|
|
13
13
|
MODEL_GPT_4_0613,
|
|
14
14
|
MODEL_GPT_4_32k,
|
|
15
15
|
MODEL_GPT_4_32k_0314,
|
|
16
|
-
MODEL_GPT_4_32k_0613
|
|
16
|
+
MODEL_GPT_4_32k_0613,
|
|
17
|
+
MODEL_GPT_DALL_E_3
|
|
17
18
|
} from "../constants/models.mjs";
|
|
18
19
|
export const modelMap = {
|
|
19
20
|
[MODEL_TEXT_DAVINCI_003]: MODEL_TEXT_DAVINCI_003,
|
|
@@ -31,5 +32,6 @@ export const modelMap = {
|
|
|
31
32
|
[MODEL_GPT_4_32k]: MODEL_GPT_4_32k,
|
|
32
33
|
[MODEL_GPT_4_32k_0314]: MODEL_GPT_4_32k_0314,
|
|
33
34
|
[MODEL_GPT_4_32k_0613]: MODEL_GPT_4_32k_0613,
|
|
34
|
-
|
|
35
|
+
[MODEL_GPT_DALL_E_3]: MODEL_GPT_DALL_E_3,
|
|
36
|
+
default: MODEL_GPT_4_MINI
|
|
35
37
|
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "nuxt-chatgpt",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.7",
|
|
4
4
|
"description": "ChatGPT integration for Nuxt 3",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"type": "module",
|
|
@@ -26,7 +26,9 @@
|
|
|
26
26
|
"nuxt3",
|
|
27
27
|
"nuxt",
|
|
28
28
|
"nuxt.js",
|
|
29
|
-
"nuxt-chatgpt"
|
|
29
|
+
"nuxt-chatgpt",
|
|
30
|
+
"image",
|
|
31
|
+
"image-generator"
|
|
30
32
|
],
|
|
31
33
|
"exports": {
|
|
32
34
|
".": {
|
|
@@ -47,7 +49,7 @@
|
|
|
47
49
|
"dev:generate": "nuxi generate playground",
|
|
48
50
|
"dev:prepare": "nuxt-module-build --stub && nuxi prepare playground",
|
|
49
51
|
"dev:preview": "nuxi preview playground",
|
|
50
|
-
"release": "npm run lint && npm run test && npm run prepack && changelogen --release && npm publish && git push --follow-tags",
|
|
52
|
+
"release": "npm run lint && npm run test && npm run prepack && changelogen --release --minor && npm publish && git push --follow-tags",
|
|
51
53
|
"lint": "eslint .",
|
|
52
54
|
"test": "vitest run",
|
|
53
55
|
"test:watch": "vitest watch"
|
|
@@ -55,7 +57,7 @@
|
|
|
55
57
|
"dependencies": {
|
|
56
58
|
"@nuxt/kit": "latest",
|
|
57
59
|
"defu": "latest",
|
|
58
|
-
"openai": "4.
|
|
60
|
+
"openai": "^4.96.2"
|
|
59
61
|
},
|
|
60
62
|
"devDependencies": {
|
|
61
63
|
"@nuxt/eslint-config": "latest",
|