fg-lib-gpt 1.1.2 → 1.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/main.js +26 -26
- package/package.json +1 -1
package/main.js
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
const axios = require('axios')
|
|
1
|
+
const axios = require('axios');
|
|
2
2
|
|
|
3
|
-
const OPENAI_API_KEY = process.env.OPENAI_API_KEY
|
|
3
|
+
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
|
|
4
4
|
|
|
5
|
-
const DEFAULT_MODEL = process.env.GPT_DEFAULT_MODEL || 'gpt-4o-mini'
|
|
6
|
-
const DEFAULT_TIMEOUT = process.env.GPT_TIMEOUT || 180000
|
|
7
|
-
const DEFAULT_TEMPERATURE = process.env.GPT_TEMPERATURE || 0.7
|
|
5
|
+
const DEFAULT_MODEL = process.env.GPT_DEFAULT_MODEL || 'gpt-4o-mini';
|
|
6
|
+
const DEFAULT_TIMEOUT = process.env.GPT_TIMEOUT || 180000;
|
|
7
|
+
const DEFAULT_TEMPERATURE = process.env.GPT_TEMPERATURE || 0.7;
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
const doGpt = async ({
|
|
@@ -17,43 +17,43 @@ const doGpt = async ({
|
|
|
17
17
|
max_tokens
|
|
18
18
|
}) => {
|
|
19
19
|
|
|
20
|
-
const json = response_format.type === 'json' || response_format.type === 'json_schema'
|
|
20
|
+
const json = response_format.type === 'json' || response_format.type === 'json_schema';
|
|
21
21
|
|
|
22
22
|
try {
|
|
23
23
|
const response = await Promise.race([
|
|
24
24
|
getGptResponse({ messages, model, temperature, response_format, max_tokens }),
|
|
25
25
|
new Promise((_, reject) =>
|
|
26
26
|
setTimeout(() => reject(new Error('Custom timeout')), timeout))
|
|
27
|
-
])
|
|
27
|
+
]);
|
|
28
28
|
|
|
29
29
|
if (typeof validate === 'function')
|
|
30
|
-
validate(response)
|
|
30
|
+
validate(response);
|
|
31
31
|
|
|
32
32
|
// JSON.parse might also fail
|
|
33
33
|
return {
|
|
34
34
|
success: true,
|
|
35
35
|
content: json ? JSON.parse(response) : response
|
|
36
|
-
}
|
|
36
|
+
};
|
|
37
37
|
}
|
|
38
38
|
catch (e) {
|
|
39
|
-
console.error('**** ERROR ***** OpenAI Error (1): ' + e.message)
|
|
39
|
+
console.error('**** ERROR ***** OpenAI Error (1): ' + e.message);
|
|
40
40
|
try {
|
|
41
|
-
const response = await getGptResponse({ messages, model, temperature, response_format, max_tokens })
|
|
41
|
+
const response = await getGptResponse({ messages, model, temperature, response_format, max_tokens });
|
|
42
42
|
|
|
43
43
|
return {
|
|
44
44
|
success: true,
|
|
45
45
|
content: json ? JSON.parse(response) : response
|
|
46
|
-
}
|
|
46
|
+
};
|
|
47
47
|
}
|
|
48
48
|
catch (e2) {
|
|
49
|
-
console.error('**** ERROR ***** OpenAI Error (2): ' + e2.message)
|
|
49
|
+
console.error('**** ERROR ***** OpenAI Error (2): ' + e2.message);
|
|
50
50
|
return {
|
|
51
51
|
success: false,
|
|
52
52
|
content: '🚫 An error occurred. Please try again. If the problem persists, please contact @fgalkov.'
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
}
|
|
56
|
-
}
|
|
53
|
+
};
|
|
54
|
+
};
|
|
55
|
+
};
|
|
56
|
+
};
|
|
57
57
|
|
|
58
58
|
|
|
59
59
|
|
|
@@ -66,7 +66,7 @@ const getGptResponse = async ({ messages, model, response_format, temperature, m
|
|
|
66
66
|
model,
|
|
67
67
|
response_format,
|
|
68
68
|
temperature,
|
|
69
|
-
max_tokens
|
|
69
|
+
max_completion_tokens: max_tokens
|
|
70
70
|
},
|
|
71
71
|
{
|
|
72
72
|
headers: {
|
|
@@ -74,9 +74,9 @@ const getGptResponse = async ({ messages, model, response_format, temperature, m
|
|
|
74
74
|
'Content-Type': 'application/json',
|
|
75
75
|
}
|
|
76
76
|
}
|
|
77
|
-
)
|
|
78
|
-
return res.data.choices[0].message.content.trim()
|
|
79
|
-
}
|
|
77
|
+
);
|
|
78
|
+
return res.data.choices[0].message.content.trim();
|
|
79
|
+
};
|
|
80
80
|
|
|
81
81
|
|
|
82
82
|
|
|
@@ -122,18 +122,18 @@ const generateSchema = (schemaObj, name = 'schema') => {
|
|
|
122
122
|
return { type: 'boolean' }
|
|
123
123
|
|
|
124
124
|
else
|
|
125
|
-
return {}
|
|
126
|
-
}
|
|
125
|
+
return {};
|
|
126
|
+
};
|
|
127
127
|
|
|
128
128
|
return {
|
|
129
129
|
name,
|
|
130
130
|
strict: true,
|
|
131
131
|
schema: schema(schemaObj)
|
|
132
|
-
}
|
|
133
|
-
}
|
|
132
|
+
};
|
|
133
|
+
};
|
|
134
134
|
|
|
135
135
|
|
|
136
136
|
module.exports = {
|
|
137
137
|
doGpt: doGpt,
|
|
138
138
|
generateSchema: generateSchema,
|
|
139
|
-
}
|
|
139
|
+
};
|