nuxt-chatgpt 0.1.10 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  ## About the module
16
16
 
17
- This user-friendly module boasts of an easy integration process that enables seamless implementation into any [Nuxt 3](https://nuxt.com) project. With type-safe integration, you can integrate [ChatGPT](https://openai.com/) into your [Nuxt 3](https://nuxt.com) project without breaking a <b>sweat</b>. Enjoy easy access to the `chat`, and `chatCompletion` methods through the `useChatgpt()` composable. Additionally, the module guarantees <b><i>security</i></b> as requests are routed through a [Nitro Server](https://nuxt.com/docs/guide/concepts/server-engine), thus preventing the exposure of your <b>API Key</b>.
17
+ This user-friendly module boasts of an easy integration process that enables seamless implementation into any [Nuxt 3](https://nuxt.com) project. With type-safe integration, you can integrate [ChatGPT](https://openai.com/) into your [Nuxt 3](https://nuxt.com) project without breaking a <b>sweat</b>. Enjoy easy access to the `chat`, and `chatCompletion` methods through the `useChatgpt()` composable. Additionally, the module guarantees <b><i>security</i></b> as requests are routed through a [Nitro Server](https://nuxt.com/docs/guide/concepts/server-engine), thus preventing the exposure of your <b>API Key</b>. The module use [openai](https://github.com/openai/openai-node) library version 4.0.0 behind the scene.
18
18
 
19
19
  ## Features
20
20
 
@@ -60,26 +60,27 @@ To access the `chat`, and `chatCompletion` methods in the nuxt-chatgpt module, y
60
60
 
61
61
  | Name | Type | Default | Description |
62
62
  |--|--|--|--|
63
- |**message**|`String`||A string representing the text message that you want to send to the GPT model for processing.
63
+ |**message**|`String`|available only for `chat()`|A string representing the text message that you want to send to the GPT model for processing.
64
+ |**messages**|`Array`|available only for `chatCompletion()`|An array of objects that contains `role` and `content`
64
65
  |**model**|`String`|`text-davinci-003` for `chat()` and `gpt-3.5-turbo` for `chatCompletion()`|Represent certain model for different types of natural language processing tasks.
65
66
  |**options**|`Object`|`{ temperature: 0.5, max_tokens: 2048, top_p: 1 frequency_penalty: 0, presence_penalty: 0 }`|An optional object that specifies any additional options you want to pass to the API request, such as the number of responses to generate, and the maximum length of each response.
66
67
 
67
- Available models for `chat`
68
+ Available models:
68
69
 
69
- - text-davinci-003
70
70
  - text-davinci-002
71
-
72
- Available models for `chatCompletion`
73
-
71
+ - text-davinci-003
74
72
  - gpt-3.5-turbo
75
73
  - gpt-3.5-turbo-0301
76
-
77
- You need to join waitlist to use gpt-4 models within `chatCompletion` method
78
-
74
+ - gpt-3.5-turbo-1106
79
75
  - gpt-4
76
+ - gpt-4-1106-preview
80
77
  - gpt-4-0314
78
+ - gpt-4-0613
81
79
  - gpt-4-32k
82
80
  - gpt-4-32k-0314
81
+ - gpt-4-32k-0613
82
+
83
+ You need to join waitlist to use gpt-4 models within `chatCompletion` method
83
84
 
84
85
  ### Simple `chat` usage
85
86
  In the following example, the model is unspecified, and the text-davinci-003 model will be used by default.
@@ -88,11 +89,15 @@ In the following example, the model is unspecified, and the text-davinci-003 mod
88
89
  const { chat } = useChatgpt()
89
90
 
90
91
  const data = ref('')
91
- const message = ref('')
92
+ const inputData = ref('')
92
93
 
93
94
  async function sendMessage() {
94
- const response = await chat(message.value)
95
- data.value = response
95
+ try {
96
+ const response = await chat(inputData.value)
97
+ data.value = response
98
+ } catch(error) {
99
+ alert(`Join the waiting list if you want to use GPT-4 models: ${error}`)
100
+ }
96
101
  }
97
102
 
98
103
  ```
@@ -100,7 +105,7 @@ async function sendMessage() {
100
105
  ```html
101
106
  <template>
102
107
  <div>
103
- <input v-model="message">
108
+ <input v-model="inputData">
104
109
  <button
105
110
  @click="sendMessage"
106
111
  v-text="'Send'"
@@ -116,11 +121,15 @@ async function sendMessage() {
116
121
  const { chat } = useChatgpt()
117
122
 
118
123
  const data = ref('')
119
- const message = ref('')
124
+ const inputData = ref('')
120
125
 
121
126
  async function sendMessage() {
122
- const response = await chat(message.value, 'text-davinci-002')
123
- data.value = response
127
+ try {
128
+ const response = await chat(inputData.value, 'gpt-3.5-turbo')
129
+ data.value = response
130
+ } catch(error) {
131
+ alert(`Join the waiting list if you want to use GPT-4 models: ${error}`)
132
+ }
124
133
  }
125
134
 
126
135
  ```
@@ -128,7 +137,7 @@ async function sendMessage() {
128
137
  ```html
129
138
  <template>
130
139
  <div>
131
- <input v-model="message">
140
+ <input v-model="inputData">
132
141
  <button
133
142
  @click="sendMessage"
134
143
  v-text="'Send'"
@@ -144,12 +153,29 @@ In the following example, the model is unspecified, and the gpt-3.5-turbo model
144
153
  ```js
145
154
  const { chatCompletion } = useChatgpt()
146
155
 
147
- const data = ref('')
148
- const message = ref('')
156
+ const chatTree = ref([])
157
+ const inputData = ref('')
149
158
 
150
159
  async function sendMessage() {
151
- const response = await chatCompletion(message.value)
152
- data.value = response
160
+ try {
161
+ const message = {
162
+ role: 'user',
163
+ content: `${inputData.value}`,
164
+ }
165
+
166
+ chatTree.value.push(message)
167
+
168
+ const response = await chatCompletion(chatTree.value)
169
+
170
+ const responseMessage = {
171
+ role: response[0].message.role,
172
+ content: response[0].message.content
173
+ }
174
+
175
+ chatTree.value.push(responseMessage)
176
+ } catch(error) {
177
+ alert(`Join the waiting list if you want to use GPT-4 models: ${error}`)
178
+ }
153
179
  }
154
180
 
155
181
  ```
@@ -157,12 +183,20 @@ async function sendMessage() {
157
183
  ```html
158
184
  <template>
159
185
  <div>
160
- <input v-model="message">
186
+ <input v-model="inputData">
161
187
  <button
162
188
  @click="sendMessage"
163
189
  v-text="'Send'"
164
190
  />
165
- <div>{{ data }}</div>
191
+ <div>
192
+ <div
193
+ v-for="chat in chatTree"
194
+ :key="chat"
195
+ >
196
+ <strong>{{ chat.role }} :</strong>
197
+ <div>{{ chat.content }} </div>
198
+ </div>
199
+ </div>
166
200
  </div>
167
201
  </template>
168
202
  ```
@@ -172,12 +206,29 @@ async function sendMessage() {
172
206
  ```js
173
207
  const { chatCompletion } = useChatgpt()
174
208
 
175
- const data = ref('')
176
- const message = ref('')
209
+ const chatTree = ref([])
210
+ const inputData = ref('')
177
211
 
178
212
  async function sendMessage() {
179
- const response = await chatCompletion(message.value, 'gpt-3.5-turbo-0301')
180
- data.value = response
213
+ try {
214
+ const message = {
215
+ role: 'user',
216
+ content: `${inputData.value}`,
217
+ }
218
+
219
+ chatTree.value.push(message)
220
+
221
+ const response = await chatCompletion(chatTree.value, 'gpt-3.5-turbo-0301')
222
+
223
+ const responseMessage = {
224
+ role: response[0].message.role,
225
+ content: response[0].message.content
226
+ }
227
+
228
+ chatTree.value.push(responseMessage)
229
+ } catch(error) {
230
+ alert(`Join the waiting list if you want to use GPT-4 models: ${error}`)
231
+ }
181
232
  }
182
233
 
183
234
  ```
@@ -185,12 +236,20 @@ async function sendMessage() {
185
236
  ```html
186
237
  <template>
187
238
  <div>
188
- <input v-model="message">
239
+ <input v-model="inputData">
189
240
  <button
190
241
  @click="sendMessage"
191
242
  v-text="'Send'"
192
243
  />
193
- <div>{{ data }}</div>
244
+ <div>
245
+ <div
246
+ v-for="chat in chatTree"
247
+ :key="chat"
248
+ >
249
+ <strong>{{ chat.role }} :</strong>
250
+ <div>{{ chat.content }} </div>
251
+ </div>
252
+ </div>
194
253
  </div>
195
254
  </template>
196
255
  ```
package/dist/module.d.ts CHANGED
@@ -1,19 +1,19 @@
1
1
  import * as _nuxt_schema from '@nuxt/schema';
2
2
 
3
- interface ModuleOptions {
4
- /**
5
- * Set chatGPT apiKey
6
- * @default undefined
7
- * @description Set your chatGPT apiKey
8
- */
9
- apiKey?: string;
10
- /**
11
- * Setting to `false` disables the module.
12
- * @default true
13
- * @description Use this setting to disable the module.
14
- */
15
- isEnabled?: boolean;
16
- }
3
+ interface ModuleOptions {
4
+ /**
5
+ * Set chatGPT apiKey
6
+ * @default undefined
7
+ * @description Set your chatGPT apiKey
8
+ */
9
+ apiKey?: string;
10
+ /**
11
+ * Setting to `false` disables the module.
12
+ * @default true
13
+ * @description Use this setting to disable the module.
14
+ */
15
+ isEnabled?: boolean;
16
+ }
17
17
  declare const _default: _nuxt_schema.NuxtModule<ModuleOptions>;
18
18
 
19
19
  export { ModuleOptions, _default as default };
package/dist/module.json CHANGED
@@ -4,5 +4,5 @@
4
4
  "compatibility": {
5
5
  "nuxt": "^3.0.0"
6
6
  },
7
- "version": "0.1.10"
7
+ "version": "0.2.2"
8
8
  }
@@ -1,2 +1,2 @@
1
- import type { IChatgptClient } from "../types";
2
- export declare const useChatgpt: () => IChatgptClient;
1
+ import type { IChatgptClient } from "../types";
2
+ export declare const useChatgpt: () => IChatgptClient;
@@ -17,12 +17,12 @@ export const useChatgpt = () => {
17
17
  });
18
18
  }
19
19
  };
20
- const chatCompletion = async (message, model, options) => {
20
+ const chatCompletion = async (messages, model, options) => {
21
21
  try {
22
22
  return await $fetch("/api/chat-completion", {
23
23
  method: "POST",
24
24
  body: {
25
- message,
25
+ messages,
26
26
  model,
27
27
  options
28
28
  }
@@ -1,8 +1,12 @@
1
- export declare const MODEL_TEXT_DAVINCI_003: string;
2
- export declare const MODEL_TEXT_DAVINCI_002: string;
3
- export declare const MODEL_GPT_TURBO_3_5: string;
4
- export declare const MODEL_GPT_TURBO_3_5_0301: string;
5
- export declare const MODEL_GPT_4: string;
6
- export declare const MODEL_GPT_4_0314: string;
7
- export declare const MODEL_GPT_4_32k: string;
8
- export declare const MODEL_GPT_4_32k_0314: string;
1
+ export declare const MODEL_TEXT_DAVINCI_002: string;
2
+ export declare const MODEL_TEXT_DAVINCI_003: string;
3
+ export declare const MODEL_GPT_TURBO_3_5: string;
4
+ export declare const MODEL_GPT_TURBO_3_5_0301: string;
5
+ export declare const MODEL_GPT_TURBO_3_5_1106 = "gpt-3.5-turbo-1106";
6
+ export declare const MODEL_GPT_4: string;
7
+ export declare const MODEL_GPT_4_1106_PREVIEW = "gpt-4-1106-preview";
8
+ export declare const MODEL_GPT_4_0314: string;
9
+ export declare const MODEL_GPT_4_0613 = "gpt-4-0613";
10
+ export declare const MODEL_GPT_4_32k: string;
11
+ export declare const MODEL_GPT_4_32k_0314: string;
12
+ export declare const MODEL_GPT_4_32k_0613 = "gpt-4-32k-0613";
@@ -1,8 +1,12 @@
1
- export const MODEL_TEXT_DAVINCI_003 = "text-davinci-003";
2
1
  export const MODEL_TEXT_DAVINCI_002 = "text-davinci-002";
2
+ export const MODEL_TEXT_DAVINCI_003 = "text-davinci-003";
3
3
  export const MODEL_GPT_TURBO_3_5 = "gpt-3.5-turbo";
4
4
  export const MODEL_GPT_TURBO_3_5_0301 = "gpt-3.5-turbo-0301";
5
+ export const MODEL_GPT_TURBO_3_5_1106 = "gpt-3.5-turbo-1106";
5
6
  export const MODEL_GPT_4 = "gpt-4";
7
+ export const MODEL_GPT_4_1106_PREVIEW = "gpt-4-1106-preview";
6
8
  export const MODEL_GPT_4_0314 = "gpt-4-0314";
9
+ export const MODEL_GPT_4_0613 = "gpt-4-0613";
7
10
  export const MODEL_GPT_4_32k = "gpt-4-32k";
8
11
  export const MODEL_GPT_4_32k_0314 = "gpt-4-32k-0314";
12
+ export const MODEL_GPT_4_32k_0613 = "gpt-4-32k-0613";
@@ -1,7 +1,7 @@
1
- export declare const defaultOptions: {
2
- temperature: number;
3
- max_tokens: number;
4
- top_p: number;
5
- frequency_penalty: number;
6
- presence_penalty: number;
7
- };
1
+ export declare const defaultOptions: {
2
+ temperature: number;
3
+ max_tokens: number;
4
+ top_p: number;
5
+ frequency_penalty: number;
6
+ presence_penalty: number;
7
+ };
@@ -1,2 +1,3 @@
1
- declare const _default: import("h3").EventHandler<string>;
2
- export default _default;
1
+ import OpenAI from 'openai';
2
+ declare const _default: import("h3").EventHandler<import("h3").EventHandlerRequest, Promise<OpenAI.Chat.Completions.ChatCompletion.Choice[]>>;
3
+ export default _default;
@@ -1,29 +1,28 @@
1
+ import OpenAI from "openai";
1
2
  import { createError, defineEventHandler, readBody } from "h3";
2
- import { Configuration, OpenAIApi } from "openai";
3
3
  import { defaultOptions } from "../../constants/options.mjs";
4
4
  import { MODEL_GPT_TURBO_3_5 } from "../../constants/models.mjs";
5
5
  import { modelMap } from "../../utils/model-map.mjs";
6
6
  import { useRuntimeConfig } from "#imports";
7
7
  export default defineEventHandler(async (event) => {
8
- const { message, model, options } = await readBody(event);
8
+ const { messages, model, options } = await readBody(event);
9
9
  if (!useRuntimeConfig().chatgpt.apiKey) {
10
10
  throw createError({
11
11
  statusCode: 403,
12
12
  message: "Missing OpenAI API Key"
13
13
  });
14
14
  }
15
- const configuration = new Configuration({
15
+ const openai = new OpenAI({
16
16
  apiKey: useRuntimeConfig().chatgpt.apiKey
17
17
  });
18
- const openai = new OpenAIApi(configuration);
19
18
  const requestOptions = {
20
- messages: [{ role: "user", content: message }],
19
+ messages,
21
20
  model: !model ? modelMap[MODEL_GPT_TURBO_3_5] : modelMap[model],
22
21
  ...options || defaultOptions
23
22
  };
24
23
  try {
25
- const { data } = await openai.createChatCompletion(requestOptions);
26
- return data.choices[0].message?.content;
24
+ const chatCompletion = await openai.chat.completions.create(requestOptions);
25
+ return chatCompletion.choices;
27
26
  } catch (error) {
28
27
  throw createError({
29
28
  statusCode: 500,
@@ -1,2 +1,2 @@
1
- declare const _default: import("h3").EventHandler<string>;
2
- export default _default;
1
+ declare const _default: import("h3").EventHandler<import("h3").EventHandlerRequest, Promise<string>>;
2
+ export default _default;
@@ -1,6 +1,7 @@
1
+ import OpenAI from "openai";
1
2
  import { createError, defineEventHandler, readBody } from "h3";
2
- import { Configuration, OpenAIApi } from "openai";
3
3
  import { defaultOptions } from "../../constants/options.mjs";
4
+ import { MODEL_GPT_TURBO_3_5 } from "../../constants/models.mjs";
4
5
  import { modelMap } from "../../utils/model-map.mjs";
5
6
  import { useRuntimeConfig } from "#imports";
6
7
  export default defineEventHandler(async (event) => {
@@ -11,18 +12,17 @@ export default defineEventHandler(async (event) => {
11
12
  message: "Missing OpenAI API Key"
12
13
  });
13
14
  }
14
- const configuration = new Configuration({
15
+ const openai = new OpenAI({
15
16
  apiKey: useRuntimeConfig().chatgpt.apiKey
16
17
  });
17
- const openai = new OpenAIApi(configuration);
18
18
  const requestOptions = {
19
- prompt: message,
20
- model: !model ? modelMap.default : modelMap[model],
19
+ messages: [{ role: "user", content: message }],
20
+ model: !model ? modelMap[MODEL_GPT_TURBO_3_5] : modelMap[model],
21
21
  ...options || defaultOptions
22
22
  };
23
23
  try {
24
- const { data } = await openai.createCompletion(requestOptions);
25
- return data.choices[0].text?.slice(2);
24
+ const chatCompletion = await openai.chat.completions.create(requestOptions);
25
+ return chatCompletion.choices[0].message?.content;
26
26
  } catch (error) {
27
27
  throw createError({
28
28
  statusCode: 500,
@@ -1,4 +1,8 @@
1
- export declare const modelMap: {
2
- [x: string]: string;
3
- default: string;
4
- };
1
+ export declare const modelMap: {
2
+ [x: string]: string;
3
+ "gpt-3.5-turbo-1106": string;
4
+ "gpt-4-1106-preview": string;
5
+ "gpt-4-0613": string;
6
+ "gpt-4-32k-0613": string;
7
+ default: string;
8
+ };
@@ -3,19 +3,27 @@ import {
3
3
  MODEL_TEXT_DAVINCI_002,
4
4
  MODEL_GPT_TURBO_3_5,
5
5
  MODEL_GPT_TURBO_3_5_0301,
6
+ MODEL_GPT_TURBO_3_5_1106,
6
7
  MODEL_GPT_4,
8
+ MODEL_GPT_4_1106_PREVIEW,
7
9
  MODEL_GPT_4_0314,
10
+ MODEL_GPT_4_0613,
8
11
  MODEL_GPT_4_32k,
9
- MODEL_GPT_4_32k_0314
12
+ MODEL_GPT_4_32k_0314,
13
+ MODEL_GPT_4_32k_0613
10
14
  } from "../constants/models.mjs";
11
15
  export const modelMap = {
12
16
  [MODEL_TEXT_DAVINCI_003]: MODEL_TEXT_DAVINCI_003,
13
17
  [MODEL_TEXT_DAVINCI_002]: MODEL_TEXT_DAVINCI_002,
14
18
  [MODEL_GPT_TURBO_3_5]: MODEL_GPT_TURBO_3_5,
15
19
  [MODEL_GPT_TURBO_3_5_0301]: MODEL_GPT_TURBO_3_5_0301,
20
+ [MODEL_GPT_TURBO_3_5_1106]: MODEL_GPT_TURBO_3_5_1106,
16
21
  [MODEL_GPT_4]: MODEL_GPT_4,
22
+ [MODEL_GPT_4_1106_PREVIEW]: MODEL_GPT_4_1106_PREVIEW,
17
23
  [MODEL_GPT_4_0314]: MODEL_GPT_4_0314,
24
+ [MODEL_GPT_4_0613]: MODEL_GPT_4_0613,
18
25
  [MODEL_GPT_4_32k]: MODEL_GPT_4_32k,
19
26
  [MODEL_GPT_4_32k_0314]: MODEL_GPT_4_32k_0314,
20
- default: MODEL_TEXT_DAVINCI_003
27
+ [MODEL_GPT_4_32k_0613]: MODEL_GPT_4_32k_0613,
28
+ default: MODEL_GPT_TURBO_3_5
21
29
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "nuxt-chatgpt",
3
- "version": "0.1.10",
3
+ "version": "0.2.2",
4
4
  "description": "ChatGPT integration for Nuxt 3",
5
5
  "license": "MIT",
6
6
  "type": "module",
@@ -53,18 +53,18 @@
53
53
  "test:watch": "vitest watch"
54
54
  },
55
55
  "dependencies": {
56
- "@nuxt/kit": "^3.1.1",
57
- "openai": "^3.2.1",
58
- "defu": "^6.1.2"
56
+ "@nuxt/kit": "latest",
57
+ "defu": "latest",
58
+ "openai": "4.0.0"
59
59
  },
60
60
  "devDependencies": {
61
- "@nuxt/eslint-config": "^0.1.1",
61
+ "@nuxt/eslint-config": "latest",
62
62
  "@nuxt/module-builder": "^0.2.1",
63
- "@nuxt/schema": "^3.1.1",
64
- "@nuxt/test-utils": "^3.1.1",
65
- "changelogen": "^0.4.1",
66
- "eslint": "^8.32.0",
67
- "nuxt": "^3.1.1",
68
- "vitest": "^0.28.2"
63
+ "@nuxt/schema": "latest",
64
+ "@nuxt/test-utils": "latest",
65
+ "changelogen": "latest",
66
+ "eslint": "latest",
67
+ "nuxt": "latest",
68
+ "vitest": "latest"
69
69
  }
70
- }
70
+ }