@vitkuz/vitkuz-chat-gpt-apapter 1.2.1 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +12 -1
- package/dist/index.d.ts +12 -1
- package/dist/index.js +90 -21
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +90 -22
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -8,6 +8,12 @@ interface Logger {
|
|
|
8
8
|
}) => void;
|
|
9
9
|
[key: string]: any;
|
|
10
10
|
}
|
|
11
|
+
interface PriceInfo {
|
|
12
|
+
total: number;
|
|
13
|
+
inputCost: number;
|
|
14
|
+
outputCost: number;
|
|
15
|
+
currency: string;
|
|
16
|
+
}
|
|
11
17
|
interface ChatGptDefaults {
|
|
12
18
|
model?: string;
|
|
13
19
|
temperature?: number;
|
|
@@ -71,7 +77,11 @@ interface CreateChatCompletionOutput {
|
|
|
71
77
|
prompt_tokens: number;
|
|
72
78
|
completion_tokens: number;
|
|
73
79
|
total_tokens: number;
|
|
80
|
+
prompt_tokens_details?: {
|
|
81
|
+
cached_tokens?: number;
|
|
82
|
+
};
|
|
74
83
|
};
|
|
84
|
+
price?: PriceInfo;
|
|
75
85
|
}
|
|
76
86
|
|
|
77
87
|
declare const createChatGptClient: (config: ChatGptConfig) => OpenAI;
|
|
@@ -93,5 +103,6 @@ declare const CHAT_GPT_MODELS: {
|
|
|
93
103
|
readonly GPT_3_5_TURBO: "gpt-3.5-turbo";
|
|
94
104
|
};
|
|
95
105
|
type ChatGptModel = (typeof CHAT_GPT_MODELS)[keyof typeof CHAT_GPT_MODELS];
|
|
106
|
+
declare const isStructuredOutputSupported: (model: string) => boolean;
|
|
96
107
|
|
|
97
|
-
export { CHAT_GPT_MODELS, type ChatGptAdapter, type ChatGptConfig, type ChatGptContext, type ChatGptDefaults, type ChatGptModel, type ChatMessage, type ChatMessageRole, type CreateChatCompletionInput, type CreateChatCompletionOutput, type Logger, createAdapter, createChatCompletion, createChatGptClient };
|
|
108
|
+
export { CHAT_GPT_MODELS, type ChatGptAdapter, type ChatGptConfig, type ChatGptContext, type ChatGptDefaults, type ChatGptModel, type ChatMessage, type ChatMessageRole, type CreateChatCompletionInput, type CreateChatCompletionOutput, type Logger, type PriceInfo, createAdapter, createChatCompletion, createChatGptClient, isStructuredOutputSupported };
|
package/dist/index.d.ts
CHANGED
|
@@ -8,6 +8,12 @@ interface Logger {
|
|
|
8
8
|
}) => void;
|
|
9
9
|
[key: string]: any;
|
|
10
10
|
}
|
|
11
|
+
interface PriceInfo {
|
|
12
|
+
total: number;
|
|
13
|
+
inputCost: number;
|
|
14
|
+
outputCost: number;
|
|
15
|
+
currency: string;
|
|
16
|
+
}
|
|
11
17
|
interface ChatGptDefaults {
|
|
12
18
|
model?: string;
|
|
13
19
|
temperature?: number;
|
|
@@ -71,7 +77,11 @@ interface CreateChatCompletionOutput {
|
|
|
71
77
|
prompt_tokens: number;
|
|
72
78
|
completion_tokens: number;
|
|
73
79
|
total_tokens: number;
|
|
80
|
+
prompt_tokens_details?: {
|
|
81
|
+
cached_tokens?: number;
|
|
82
|
+
};
|
|
74
83
|
};
|
|
84
|
+
price?: PriceInfo;
|
|
75
85
|
}
|
|
76
86
|
|
|
77
87
|
declare const createChatGptClient: (config: ChatGptConfig) => OpenAI;
|
|
@@ -93,5 +103,6 @@ declare const CHAT_GPT_MODELS: {
|
|
|
93
103
|
readonly GPT_3_5_TURBO: "gpt-3.5-turbo";
|
|
94
104
|
};
|
|
95
105
|
type ChatGptModel = (typeof CHAT_GPT_MODELS)[keyof typeof CHAT_GPT_MODELS];
|
|
106
|
+
declare const isStructuredOutputSupported: (model: string) => boolean;
|
|
96
107
|
|
|
97
|
-
export { CHAT_GPT_MODELS, type ChatGptAdapter, type ChatGptConfig, type ChatGptContext, type ChatGptDefaults, type ChatGptModel, type ChatMessage, type ChatMessageRole, type CreateChatCompletionInput, type CreateChatCompletionOutput, type Logger, createAdapter, createChatCompletion, createChatGptClient };
|
|
108
|
+
export { CHAT_GPT_MODELS, type ChatGptAdapter, type ChatGptConfig, type ChatGptContext, type ChatGptDefaults, type ChatGptModel, type ChatMessage, type ChatMessageRole, type CreateChatCompletionInput, type CreateChatCompletionOutput, type Logger, type PriceInfo, createAdapter, createChatCompletion, createChatGptClient, isStructuredOutputSupported };
|
package/dist/index.js
CHANGED
|
@@ -15,6 +15,74 @@ var createChatGptClient = (config) => {
|
|
|
15
15
|
project: config.project
|
|
16
16
|
});
|
|
17
17
|
};
|
|
18
|
+
|
|
19
|
+
// src/models.ts
|
|
20
|
+
var CHAT_GPT_MODELS = {
|
|
21
|
+
GPT_4o: "gpt-4o",
|
|
22
|
+
GPT_4o_2024_08_06: "gpt-4o-2024-08-06",
|
|
23
|
+
GPT_4o_MINI: "gpt-4o-mini",
|
|
24
|
+
GPT_4o_MINI_2024_07_18: "gpt-4o-mini-2024-07-18",
|
|
25
|
+
GPT_4_TURBO: "gpt-4-turbo",
|
|
26
|
+
GPT_4: "gpt-4",
|
|
27
|
+
GPT_3_5_TURBO: "gpt-3.5-turbo"
|
|
28
|
+
};
|
|
29
|
+
var isStructuredOutputSupported = (model) => {
|
|
30
|
+
if (model.startsWith("gpt-4o-mini")) return true;
|
|
31
|
+
if (model === "gpt-4o") return true;
|
|
32
|
+
if (model.startsWith("gpt-4o-2024-08-06")) return true;
|
|
33
|
+
if (model.includes("2024-08-06")) return true;
|
|
34
|
+
return false;
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
// src/pricing.ts
|
|
38
|
+
var CHAT_GPT_PRICING = {
|
|
39
|
+
// GPT-5 (Standard)
|
|
40
|
+
"gpt-5.2": { input: 1.75, cachedInput: 0.175, output: 14 },
|
|
41
|
+
"gpt-5.1": { input: 1.25, cachedInput: 0.125, output: 10 },
|
|
42
|
+
"gpt-5": { input: 1.25, cachedInput: 0.125, output: 10 },
|
|
43
|
+
"gpt-5-mini": { input: 0.25, cachedInput: 0.025, output: 2 },
|
|
44
|
+
"gpt-5-nano": { input: 0.05, cachedInput: 5e-3, output: 0.4 },
|
|
45
|
+
// GPT-4.1
|
|
46
|
+
"gpt-4.1": { input: 2, cachedInput: 0.5, output: 8 },
|
|
47
|
+
"gpt-4.1-mini": { input: 0.4, cachedInput: 0.1, output: 1.6 },
|
|
48
|
+
"gpt-4.1-nano": { input: 0.1, cachedInput: 0.025, output: 0.4 },
|
|
49
|
+
// GPT-4o
|
|
50
|
+
"gpt-4o": { input: 2.5, cachedInput: 1.25, output: 10 },
|
|
51
|
+
"gpt-4o-2024-05-13": { input: 5, output: 15 },
|
|
52
|
+
"gpt-4o-2024-08-06": { input: 2.5, cachedInput: 1.25, output: 10 },
|
|
53
|
+
"gpt-4o-mini": { input: 0.15, cachedInput: 0.075, output: 0.6 },
|
|
54
|
+
"gpt-4o-mini-2024-07-18": { input: 0.15, cachedInput: 0.075, output: 0.6 },
|
|
55
|
+
// Reasoning Models
|
|
56
|
+
o1: { input: 15, cachedInput: 7.5, output: 60 },
|
|
57
|
+
"o1-mini": { input: 1.1, cachedInput: 0.55, output: 4.4 },
|
|
58
|
+
o3: { input: 2, cachedInput: 0.5, output: 8 },
|
|
59
|
+
"o3-mini": { input: 1.1, cachedInput: 0.55, output: 4.4 },
|
|
60
|
+
"o4-mini": { input: 1.1, cachedInput: 0.275, output: 4.4 },
|
|
61
|
+
// Legacy/Other
|
|
62
|
+
"gpt-4-turbo": { input: 10, output: 30 },
|
|
63
|
+
"gpt-4-turbo-2024-04-09": { input: 10, output: 30 },
|
|
64
|
+
"gpt-4": { input: 30, output: 60 },
|
|
65
|
+
"gpt-3.5-turbo": { input: 0.5, output: 1.5 }
|
|
66
|
+
};
|
|
67
|
+
var calculatePrice = (model, usage) => {
|
|
68
|
+
if (!usage) return void 0;
|
|
69
|
+
const pricing = CHAT_GPT_PRICING[model];
|
|
70
|
+
if (!pricing) return void 0;
|
|
71
|
+
const inputTokens = usage.input_tokens ?? usage.prompt_tokens ?? 0;
|
|
72
|
+
const outputTokens = usage.output_tokens ?? usage.completion_tokens ?? 0;
|
|
73
|
+
const cachedTokens = usage.input_tokens_details?.cached_tokens ?? usage.prompt_tokens_details?.cached_tokens ?? 0;
|
|
74
|
+
const regularInputTokens = inputTokens - cachedTokens;
|
|
75
|
+
const inputCost = regularInputTokens / 1e6 * pricing.input + cachedTokens / 1e6 * (pricing.cachedInput ?? pricing.input);
|
|
76
|
+
const outputCost = outputTokens / 1e6 * pricing.output;
|
|
77
|
+
return {
|
|
78
|
+
total: Number((inputCost + outputCost).toFixed(6)),
|
|
79
|
+
inputCost: Number(inputCost.toFixed(6)),
|
|
80
|
+
outputCost: Number(outputCost.toFixed(6)),
|
|
81
|
+
currency: "USD"
|
|
82
|
+
};
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
// src/operations/create-chat-completion.ts
|
|
18
86
|
var createChatCompletion = (ctx) => async (input) => {
|
|
19
87
|
const { client, logger, defaults } = ctx;
|
|
20
88
|
const mergedInput = {
|
|
@@ -27,25 +95,36 @@ var createChatCompletion = (ctx) => async (input) => {
|
|
|
27
95
|
const { schema, schemaName, messages, ...rest } = mergedInput;
|
|
28
96
|
logger?.debug("chat-gpt:createChatCompletion:start", { data: mergedInput });
|
|
29
97
|
try {
|
|
98
|
+
let response;
|
|
30
99
|
if (schema) {
|
|
31
|
-
|
|
100
|
+
if (!isStructuredOutputSupported(mergedInput.model)) {
|
|
101
|
+
throw new Error(
|
|
102
|
+
`chat-gpt:createChatCompletion: model ${mergedInput.model} does not support Structured Outputs. See https://platform.openai.com/docs/guides/structured-outputs#supported-models`
|
|
103
|
+
);
|
|
104
|
+
}
|
|
105
|
+
response = await client.responses.parse({
|
|
32
106
|
...rest,
|
|
33
107
|
input: messages,
|
|
34
108
|
text: {
|
|
35
109
|
format: zod.zodTextFormat(schema, schemaName || "output")
|
|
36
110
|
}
|
|
37
111
|
});
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
parsed: response2.output_parsed
|
|
112
|
+
response = {
|
|
113
|
+
...response,
|
|
114
|
+
parsed: response.output_parsed
|
|
42
115
|
};
|
|
116
|
+
} else {
|
|
117
|
+
response = await client.chat.completions.create({
|
|
118
|
+
...mergedInput
|
|
119
|
+
});
|
|
43
120
|
}
|
|
44
|
-
const
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
121
|
+
const price = calculatePrice(mergedInput.model, response.usage);
|
|
122
|
+
const output = {
|
|
123
|
+
...response,
|
|
124
|
+
price
|
|
125
|
+
};
|
|
126
|
+
logger?.debug("chat-gpt:createChatCompletion:success", { data: output });
|
|
127
|
+
return output;
|
|
49
128
|
} catch (error) {
|
|
50
129
|
logger?.debug("chat-gpt:createChatCompletion:error", { error });
|
|
51
130
|
throw error;
|
|
@@ -61,20 +140,10 @@ var createAdapter = (config, logger) => {
|
|
|
61
140
|
};
|
|
62
141
|
};
|
|
63
142
|
|
|
64
|
-
// src/models.ts
|
|
65
|
-
var CHAT_GPT_MODELS = {
|
|
66
|
-
GPT_4o: "gpt-4o",
|
|
67
|
-
GPT_4o_2024_08_06: "gpt-4o-2024-08-06",
|
|
68
|
-
GPT_4o_MINI: "gpt-4o-mini",
|
|
69
|
-
GPT_4o_MINI_2024_07_18: "gpt-4o-mini-2024-07-18",
|
|
70
|
-
GPT_4_TURBO: "gpt-4-turbo",
|
|
71
|
-
GPT_4: "gpt-4",
|
|
72
|
-
GPT_3_5_TURBO: "gpt-3.5-turbo"
|
|
73
|
-
};
|
|
74
|
-
|
|
75
143
|
exports.CHAT_GPT_MODELS = CHAT_GPT_MODELS;
|
|
76
144
|
exports.createAdapter = createAdapter;
|
|
77
145
|
exports.createChatCompletion = createChatCompletion;
|
|
78
146
|
exports.createChatGptClient = createChatGptClient;
|
|
147
|
+
exports.isStructuredOutputSupported = isStructuredOutputSupported;
|
|
79
148
|
//# sourceMappingURL=index.js.map
|
|
80
149
|
//# sourceMappingURL=index.js.map
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/client.ts","../src/operations/create-chat-completion.ts","../src/adapter.ts","../src/models.ts"],"names":["OpenAI","response","zodTextFormat"],"mappings":";;;;;;;;;;AAGO,IAAM,mBAAA,GAAsB,CAAC,MAAA,KAAkC;AAClE,EAAA,OAAO,IAAIA,uBAAA,CAAO;AAAA,IACd,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,cAAc,MAAA,CAAO,YAAA;AAAA,IACrB,SAAS,MAAA,CAAO;AAAA,GACnB,CAAA;AACL;ACNO,IAAM,oBAAA,GACT,CAAC,GAAA,KACD,OAAO,KAAA,KAA0E;AAC7E,EAAA,MAAM,EAAE,MAAA,EAAQ,MAAA,EAAQ,QAAA,EAAS,GAAI,GAAA;AAErC,EAAA,MAAM,WAAA,GAAc;AAAA,IAChB,GAAG,QAAA;AAAA,IACH,GAAG;AAAA,GACP;AAEA,EAAA,IAAI,CAAC,YAAY,KAAA,EAAO;AACpB,IAAA,MAAM,IAAI,MAAM,kDAAkD,CAAA;AAAA,EACtE;AAEA,EAAA,MAAM,EAAE,MAAA,EAAQ,UAAA,EAAY,QAAA,EAAU,GAAG,MAAK,GAAI,WAAA;AAElD,EAAA,MAAA,EAAQ,KAAA,CAAM,qCAAA,EAAuC,EAAE,IAAA,EAAM,aAAa,CAAA;AAE1E,EAAA,IAAI;AACA,IAAA,IAAI,MAAA,EAAQ;AACR,MAAA,MAAMC,SAAAA,GAAW,MAAO,MAAA,CAAO,SAAA,CAAkB,KAAA,CAAM;AAAA,QACnD,GAAG,IAAA;AAAA,QACH,KAAA,EAAO,QAAA;AAAA,QACP,IAAA,EAAM;AAAA,UACF,MAAA,EAAQC,iBAAA,CAAc,MAAA,EAAQ,UAAA,IAAc,QAAQ;AAAA;AACxD,OACH,CAAA;AAED,MAAA,MAAA,EAAQ,KAAA,CAAM,uCAAA,EAAyC,EAAE,IAAA,EAAMD,WAAU,CAAA;AAEzE,MAAA,OAAO;AAAA,QACH,GAAGA,SAAAA;AAAA,QACH,QAAQA,SAAAA,CAAS;AAAA,OACrB;AAAA,IACJ;AAIA,IAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MAClD,GAAG;AAAA,KACC,CAAA;AAER,IAAA,MAAA,EAAQ,KAAA,CAAM,uCAAA,EAAyC,EAAE,IAAA,EAAM,UAAU,CAAA;AAEzE,IAAA,OAAO,QAAA;AAAA,EACX,SAAS,KAAA,EAAO;AACZ,IAAA,MAAA,EAAQ,KAAA,CAAM,qCAAA,EAAuC,EAAE,KAAA,EAAO,CAAA;AAC9D,IAAA,MAAM,KAAA;AAAA,EACV;AACJ;;;AC3CG,IAAM,aAAA,GAAgB,CAAC,MAAA,EAAuB,MAAA,KAAoC;AACrF,EAAA,MAAM,MAAA,GAAS,oBAAoB,MAAM,CAAA;AACzC,EAAA,MAAM,UAA0B,EAAE,MAAA,EAAQ,MAAA,EAAQ,QAAA,EAAU,OAAO,QAAA,EAAS;AAE5E,EAAA,OAAO;AAAA,IACH,oBAAA,EAAsB,qBAAqB,OAAO;AAAA,GACtD;AACJ;;;AChBO,IAAM,eAAA,GAAkB;AAAA,EAC3B,MAAA,EAAQ,QAAA;AAAA,EACR,iBAAA,EAAmB,mBAAA;AAAA,EACnB,WAAA,EAAa,aAAA;AAAA,EACb,sBAAA,EAAwB,wBAAA;AAAA,EACxB,WAAA,EAAa,aAAA;AAAA,EACb,KAAA,EAAO,OAAA;AAAA,EACP,aAAA,EAAe;AACnB","file":"index.js","sourcesContent":["import OpenAI from 'openai';\nimport { ChatGptConfig } from './types';\n\nexport const createChatGptClient = (config: ChatGptConfig): OpenAI => {\n return new OpenAI({\n apiKey: config.apiKey,\n organization: config.organization,\n project: config.project,\n });\n};\n","import { zodTextFormat } from 'openai/helpers/zod';\nimport { ChatGptContext, CreateChatCompletionInput, CreateChatCompletionOutput } from '../types';\n\nexport const createChatCompletion =\n (ctx: ChatGptContext) =>\n async (input: CreateChatCompletionInput): Promise<CreateChatCompletionOutput> => {\n const { client, logger, defaults } = ctx;\n\n const mergedInput = {\n ...defaults,\n ...input,\n };\n\n if (!mergedInput.model) {\n throw new Error('chat-gpt:createChatCompletion: model is required');\n }\n\n const { schema, schemaName, messages, ...rest } = mergedInput;\n\n logger?.debug('chat-gpt:createChatCompletion:start', { data: mergedInput });\n\n try {\n if (schema) {\n const response = await (client.responses as any).parse({\n ...rest,\n input: messages,\n text: {\n format: zodTextFormat(schema, schemaName || 'output'),\n },\n });\n\n logger?.debug('chat-gpt:createChatCompletion:success', { data: response });\n\n return {\n ...response,\n parsed: response.output_parsed,\n } as CreateChatCompletionOutput;\n }\n\n // Fallback to regular chat completion for non-zod calls\n // or we can use client.responses.create if we want to be consistent\n const response = await client.chat.completions.create({\n ...mergedInput,\n } as any);\n\n logger?.debug('chat-gpt:createChatCompletion:success', { data: response });\n\n return response as CreateChatCompletionOutput;\n } catch (error) {\n logger?.debug('chat-gpt:createChatCompletion:error', { error });\n throw error;\n }\n };\n","import OpenAI from 'openai';\nimport { ChatGptConfig, ChatGptContext, Logger } from './types';\nimport { createChatGptClient } from './client';\nimport { createChatCompletion } from './operations/create-chat-completion';\n\nexport interface ChatGptAdapter {\n createChatCompletion: ReturnType<typeof createChatCompletion>;\n}\n\nexport const createAdapter = (config: ChatGptConfig, logger?: Logger): ChatGptAdapter => {\n const client = createChatGptClient(config);\n const context: ChatGptContext = { client, logger, defaults: config.defaults };\n\n return {\n createChatCompletion: createChatCompletion(context),\n };\n};\n","export const CHAT_GPT_MODELS = {\n GPT_4o: 'gpt-4o',\n GPT_4o_2024_08_06: 'gpt-4o-2024-08-06',\n GPT_4o_MINI: 'gpt-4o-mini',\n GPT_4o_MINI_2024_07_18: 'gpt-4o-mini-2024-07-18',\n GPT_4_TURBO: 'gpt-4-turbo',\n GPT_4: 'gpt-4',\n GPT_3_5_TURBO: 'gpt-3.5-turbo',\n} as const;\n\nexport type ChatGptModel = (typeof CHAT_GPT_MODELS)[keyof typeof CHAT_GPT_MODELS];\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/client.ts","../src/models.ts","../src/pricing.ts","../src/operations/create-chat-completion.ts","../src/adapter.ts"],"names":["OpenAI","zodTextFormat"],"mappings":";;;;;;;;;;AAGO,IAAM,mBAAA,GAAsB,CAAC,MAAA,KAAkC;AAClE,EAAA,OAAO,IAAIA,uBAAA,CAAO;AAAA,IACd,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,cAAc,MAAA,CAAO,YAAA;AAAA,IACrB,SAAS,MAAA,CAAO;AAAA,GACnB,CAAA;AACL;;;ACTO,IAAM,eAAA,GAAkB;AAAA,EAC3B,MAAA,EAAQ,QAAA;AAAA,EACR,iBAAA,EAAmB,mBAAA;AAAA,EACnB,WAAA,EAAa,aAAA;AAAA,EACb,sBAAA,EAAwB,wBAAA;AAAA,EACxB,WAAA,EAAa,aAAA;AAAA,EACb,KAAA,EAAO,OAAA;AAAA,EACP,aAAA,EAAe;AACnB;AAIO,IAAM,2BAAA,GAA8B,CAAC,KAAA,KAA2B;AAInE,EAAA,IAAI,KAAA,CAAM,UAAA,CAAW,aAAa,CAAA,EAAG,OAAO,IAAA;AAC5C,EAAA,IAAI,KAAA,KAAU,UAAU,OAAO,IAAA;AAC/B,EAAA,IAAI,KAAA,CAAM,UAAA,CAAW,mBAAmB,CAAA,EAAG,OAAO,IAAA;AAClD,EAAA,IAAI,KAAA,CAAM,QAAA,CAAS,YAAY,CAAA,EAAG,OAAO,IAAA;AAEzC,EAAA,OAAO,KAAA;AACX;;;ACTO,IAAM,gBAAA,GAAiD;AAAA;AAAA,EAE1D,WAAW,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,EAAA,EAAK;AAAA,EAC3D,WAAW,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,EAAA,EAAK;AAAA,EAC3D,SAAS,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,EAAA,EAAK;AAAA,EACzD,cAAc,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,CAAA,EAAI;AAAA,EAC7D,cAAc,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,IAAA,EAAO,QAAQ,GAAA,EAAI;AAAA;AAAA,EAG7D,WAAW,EAAE,KAAA,EAAO,GAAK,WAAA,EAAa,GAAA,EAAK,QAAQ,CAAA,EAAI;AAAA,EACvD,gBAAgB,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,GAAA,EAAK,QAAQ,GAAA,EAAI;AAAA,EAC5D,gBAAgB,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,KAAA,EAAO,QAAQ,GAAA,EAAI;AAAA;AAAA,EAG9D,UAAU,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,IAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EACxD,mBAAA,EAAqB,EAAE,KAAA,EAAO,CAAA,EAAK,QAAQ,EAAA,EAAK;AAAA,EAChD,qBAAqB,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,IAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EACnE,eAAe,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,GAAA,EAAI;AAAA,EAC9D,0BAA0B,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,GAAA,EAAI;AAAA;AAAA,EAGzE,IAAI,EAAE,KAAA,EAAO,IAAM,WAAA,EAAa,GAAA,EAAK,QAAQ,EAAA,EAAK;AAAA,EAClD,WAAW,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,IAAA,EAAM,QAAQ,GAAA,EAAI;AAAA,EACxD,IAAI,EAAE,KAAA,EAAO,GAAK,WAAA,EAAa,GAAA,EAAK,QAAQ,CAAA,EAAI;AAAA,EAChD,WAAW,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,IAAA,EAAM,QAAQ,GAAA,EAAI;AAAA,EACxD,WAAW,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,KAAA,EAAO,QAAQ,GAAA,EAAI;AAAA;AAAA,EAGzD,aAAA,EAAe,EAAE,KAAA,EAAO,EAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EAC3C,wBAAA,EAA0B,EAAE,KAAA,EAAO,EAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EACtD,OAAA,EAAS,EAAE,KAAA,EAAO,EAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EACrC,eAAA,EAAiB,EAAE,KAAA,EAAO,GAAA,EAAK,QAAQ,GAAA;AAC3C,CAAA;AAEO,IAAM,cAAA,GAAiB,CAC1B,KAAA,EACA,KAAA,KAQwB;AACxB,EAAA,IAAI,CAAC,OAAO,OAAO,MAAA;AAEnB,EAAA,MAAM,OAAA,GAAU,iBAAiB,KAAK,CAAA;AACtC,EAAA,IAAI,CAAC,SAAS,OAAO,MAAA;AAErB,EAAA,MAAM,WAAA,GAAc,KAAA,CAAM,YAAA,IAAgB,KAAA,CAAM,aAAA,IAAiB,CAAA;AACjE,EAAA,MAAM,YAAA,GAAe,KAAA,CAAM,aAAA,IAAiB,KAAA,CAAM,iBAAA,IAAqB,CAAA;AACvE,EAAA,MAAM,eACF,KAAA,CAAM,oBAAA,EAAsB,aAAA,IAC5B,KAAA,CAAM,uBAAuB,aAAA,IAC7B,CAAA;AAEJ,EAAA,MAAM,qBAAqB,WAAA,GAAc,YAAA;AAEzC,EAAA,MAAM,SAAA,GACD,qBAAqB,GAAA,GAAa,OAAA,CAAQ,QAC1C,YAAA,GAAe,GAAA,IAAc,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,KAAA,CAAA;AAEjE,EAAA,MAAM,UAAA,GAAc,YAAA,GAAe,GAAA,GAAa,OAAA,CAAQ,MAAA;AAExD,EAAA,OAAO;AAAA,IACH,OAAO,MAAA,CAAA,CAAQ,SAAA,GAAY,UAAA,EAAY,OAAA,CAAQ,CAAC,CAAC,CAAA;AAAA,IACjD,SAAA,EAAW,MAAA,CAAO,SAAA,CAAU,OAAA,CAAQ,CAAC,CAAC,CAAA;AAAA,IACtC,UAAA,EAAY,MAAA,CAAO,UAAA,CAAW,OAAA,CAAQ,CAAC,CAAC,CAAA;AAAA,IACxC,QAAA,EAAU;AAAA,GACd;AACJ,CAAA;;;AC/EO,IAAM,oBAAA,GACT,CAAC,GAAA,KACD,OAAO,KAAA,KAA0E;AAC7E,EAAA,MAAM,EAAE,MAAA,EAAQ,MAAA,EAAQ,QAAA,EAAS,GAAI,GAAA;AAErC,EAAA,MAAM,WAAA,GAAc;AAAA,IAChB,GAAG,QAAA;AAAA,IACH,GAAG;AAAA,GACP;AAEA,EAAA,IAAI,CAAC,YAAY,KAAA,EAAO;AACpB,IAAA,MAAM,IAAI,MAAM,kDAAkD,CAAA;AAAA,EACtE;AAEA,EAAA,MAAM,EAAE,MAAA,EAAQ,UAAA,EAAY,QAAA,EAAU,GAAG,MAAK,GAAI,WAAA;AAElD,EAAA,MAAA,EAAQ,KAAA,CAAM,qCAAA,EAAuC,EAAE,IAAA,EAAM,aAAa,CAAA;AAE1E,EAAA,IAAI;AACA,IAAA,IAAI,QAAA;AACJ,IAAA,IAAI,MAAA,EAAQ;AACR,MAAA,IAAI,CAAC,2BAAA,CAA4B,WAAA,CAAY,KAAK,CAAA,EAAG;AACjD,QAAA,MAAM,IAAI,KAAA;AAAA,UACN,CAAA,qCAAA,EAAwC,YAAY,KAAK,CAAA,qHAAA;AAAA,SAE7D;AAAA,MACJ;AAEA,MAAA,QAAA,GAAW,MAAO,MAAA,CAAO,SAAA,CAAkB,KAAA,CAAM;AAAA,QAC7C,GAAG,IAAA;AAAA,QACH,KAAA,EAAO,QAAA;AAAA,QACP,IAAA,EAAM;AAAA,UACF,MAAA,EAAQC,iBAAA,CAAc,MAAA,EAAQ,UAAA,IAAc,QAAQ;AAAA;AACxD,OACH,CAAA;AAED,MAAA,QAAA,GAAW;AAAA,QACP,GAAG,QAAA;AAAA,QACH,QAAQ,QAAA,CAAS;AAAA,OACrB;AAAA,IACJ,CAAA,MAAO;AAEH,MAAA,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,WAAA,CAAY,MAAA,CAAO;AAAA,QAC5C,GAAG;AAAA,OACC,CAAA;AAAA,IACZ;AAEA,IAAA,MAAM,KAAA,GAAQ,cAAA,CAAe,WAAA,CAAY,KAAA,EAAO,SAAS,KAAK,CAAA;AAE9D,IAAA,MAAM,MAAA,GAAS;AAAA,MACX,GAAG,QAAA;AAAA,MACH;AAAA,KACJ;AAEA,IAAA,MAAA,EAAQ,KAAA,CAAM,uCAAA,EAAyC,EAAE,IAAA,EAAM,QAAQ,CAAA;AAEvE,IAAA,OAAO,MAAA;AAAA,EACX,SAAS,KAAA,EAAO;AACZ,IAAA,MAAA,EAAQ,KAAA,CAAM,qCAAA,EAAuC,EAAE,KAAA,EAAO,CAAA;AAC9D,IAAA,MAAM,KAAA;AAAA,EACV;AACJ;;;ACzDG,IAAM,aAAA,GAAgB,CAAC,MAAA,EAAuB,MAAA,KAAoC;AACrF,EAAA,MAAM,MAAA,GAAS,oBAAoB,MAAM,CAAA;AACzC,EAAA,MAAM,UAA0B,EAAE,MAAA,EAAQ,MAAA,EAAQ,QAAA,EAAU,OAAO,QAAA,EAAS;AAE5E,EAAA,OAAO;AAAA,IACH,oBAAA,EAAsB,qBAAqB,OAAO;AAAA,GACtD;AACJ","file":"index.js","sourcesContent":["import OpenAI from 'openai';\nimport { ChatGptConfig } from './types';\n\nexport const createChatGptClient = (config: ChatGptConfig): OpenAI => {\n return new OpenAI({\n apiKey: config.apiKey,\n organization: config.organization,\n project: config.project,\n });\n};\n","export const CHAT_GPT_MODELS = {\n GPT_4o: 'gpt-4o',\n GPT_4o_2024_08_06: 'gpt-4o-2024-08-06',\n GPT_4o_MINI: 'gpt-4o-mini',\n GPT_4o_MINI_2024_07_18: 'gpt-4o-mini-2024-07-18',\n GPT_4_TURBO: 'gpt-4-turbo',\n GPT_4: 'gpt-4',\n GPT_3_5_TURBO: 'gpt-3.5-turbo',\n} as const;\n\nexport type ChatGptModel = (typeof CHAT_GPT_MODELS)[keyof typeof CHAT_GPT_MODELS];\n\nexport const isStructuredOutputSupported = (model: string): boolean => {\n const supportedPrefixes = ['gpt-4o', 'gpt-4o-mini', 'o1']; // o1 supposedly supports it in some versions soon, but for now gpt-4o is the main one.\n // Actually per docs: gpt-4o-mini, gpt-4o-2024-08-06 and later.\n\n if (model.startsWith('gpt-4o-mini')) return true;\n if (model === 'gpt-4o') return true;\n if (model.startsWith('gpt-4o-2024-08-06')) return true;\n if (model.includes('2024-08-06')) return true; // safety for other snapshots\n\n return false;\n};\n","export interface ModelPricing {\n input: number; // cost per 1M tokens\n output: number; // cost per 1M tokens\n cachedInput?: number; // cost per 1M tokens\n}\n\nexport interface PriceInfo {\n total: number;\n inputCost: number;\n outputCost: number;\n currency: string;\n}\n\nexport const CHAT_GPT_PRICING: Record<string, ModelPricing> = {\n // GPT-5 (Standard)\n 'gpt-5.2': { input: 1.75, cachedInput: 0.175, output: 14.0 },\n 'gpt-5.1': { input: 1.25, cachedInput: 0.125, output: 10.0 },\n 'gpt-5': { input: 1.25, cachedInput: 0.125, output: 10.0 },\n 'gpt-5-mini': { input: 0.25, cachedInput: 0.025, output: 2.0 },\n 'gpt-5-nano': { input: 0.05, cachedInput: 0.005, output: 0.4 },\n\n // GPT-4.1\n 'gpt-4.1': { input: 2.0, cachedInput: 0.5, output: 8.0 },\n 'gpt-4.1-mini': { input: 0.4, cachedInput: 0.1, output: 1.6 },\n 'gpt-4.1-nano': { input: 0.1, cachedInput: 0.025, output: 0.4 },\n\n // GPT-4o\n 'gpt-4o': { input: 2.5, cachedInput: 1.25, output: 10.0 },\n 'gpt-4o-2024-05-13': { input: 5.0, output: 15.0 },\n 'gpt-4o-2024-08-06': { input: 2.5, cachedInput: 1.25, output: 10.0 },\n 'gpt-4o-mini': { input: 0.15, cachedInput: 0.075, output: 0.6 },\n 'gpt-4o-mini-2024-07-18': { input: 0.15, cachedInput: 0.075, output: 0.6 },\n\n // Reasoning Models\n o1: { input: 15.0, cachedInput: 7.5, output: 60.0 },\n 'o1-mini': { input: 1.1, cachedInput: 0.55, output: 4.4 },\n o3: { input: 2.0, cachedInput: 0.5, output: 8.0 },\n 'o3-mini': { input: 1.1, cachedInput: 0.55, output: 4.4 },\n 'o4-mini': { input: 1.1, cachedInput: 0.275, output: 4.4 },\n\n // Legacy/Other\n 'gpt-4-turbo': { input: 10.0, output: 30.0 },\n 'gpt-4-turbo-2024-04-09': { input: 10.0, output: 30.0 },\n 'gpt-4': { input: 30.0, output: 60.0 },\n 'gpt-3.5-turbo': { input: 0.5, output: 1.5 },\n};\n\nexport const calculatePrice = (\n model: string,\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n input_tokens?: number;\n output_tokens?: number;\n prompt_tokens_details?: { cached_tokens?: number };\n input_tokens_details?: { cached_tokens?: number };\n },\n): PriceInfo | undefined => {\n if (!usage) return undefined;\n\n const pricing = CHAT_GPT_PRICING[model];\n if (!pricing) return undefined;\n\n const inputTokens = usage.input_tokens ?? usage.prompt_tokens ?? 0;\n const outputTokens = usage.output_tokens ?? usage.completion_tokens ?? 0;\n const cachedTokens =\n usage.input_tokens_details?.cached_tokens ??\n usage.prompt_tokens_details?.cached_tokens ??\n 0;\n\n const regularInputTokens = inputTokens - cachedTokens;\n\n const inputCost =\n (regularInputTokens / 1_000_000) * pricing.input +\n (cachedTokens / 1_000_000) * (pricing.cachedInput ?? pricing.input);\n\n const outputCost = (outputTokens / 1_000_000) * pricing.output;\n\n return {\n total: Number((inputCost + outputCost).toFixed(6)),\n inputCost: Number(inputCost.toFixed(6)),\n outputCost: Number(outputCost.toFixed(6)),\n currency: 'USD',\n };\n};\n","import { zodTextFormat } from 'openai/helpers/zod';\nimport { ChatGptContext, CreateChatCompletionInput, CreateChatCompletionOutput } from '../types';\nimport { isStructuredOutputSupported } from '../models';\nimport { calculatePrice } from '../pricing';\n\nexport const createChatCompletion =\n (ctx: ChatGptContext) =>\n async (input: CreateChatCompletionInput): Promise<CreateChatCompletionOutput> => {\n const { client, logger, defaults } = ctx;\n\n const mergedInput = {\n ...defaults,\n ...input,\n };\n\n if (!mergedInput.model) {\n throw new Error('chat-gpt:createChatCompletion: model is required');\n }\n\n const { schema, schemaName, messages, ...rest } = mergedInput;\n\n logger?.debug('chat-gpt:createChatCompletion:start', { data: mergedInput });\n\n try {\n let response: any;\n if (schema) {\n if (!isStructuredOutputSupported(mergedInput.model)) {\n throw new Error(\n `chat-gpt:createChatCompletion: model ${mergedInput.model} does not support Structured Outputs. ` +\n 'See https://platform.openai.com/docs/guides/structured-outputs#supported-models',\n );\n }\n\n response = await (client.responses as any).parse({\n ...rest,\n input: messages,\n text: {\n format: zodTextFormat(schema, schemaName || 'output'),\n },\n });\n\n response = {\n ...response,\n parsed: response.output_parsed,\n };\n } else {\n // Fallback to regular chat completion for non-zod calls\n response = await client.chat.completions.create({\n ...mergedInput,\n } as any);\n }\n\n const price = calculatePrice(mergedInput.model, response.usage);\n\n const output = {\n ...response,\n price,\n } as CreateChatCompletionOutput;\n\n logger?.debug('chat-gpt:createChatCompletion:success', { data: output });\n\n return output;\n } catch (error) {\n logger?.debug('chat-gpt:createChatCompletion:error', { error });\n throw error;\n }\n };\n","import OpenAI from 'openai';\nimport { ChatGptConfig, ChatGptContext, Logger } from './types';\nimport { createChatGptClient } from './client';\nimport { createChatCompletion } from './operations/create-chat-completion';\n\nexport interface ChatGptAdapter {\n createChatCompletion: ReturnType<typeof createChatCompletion>;\n}\n\nexport const createAdapter = (config: ChatGptConfig, logger?: Logger): ChatGptAdapter => {\n const client = createChatGptClient(config);\n const context: ChatGptContext = { client, logger, defaults: config.defaults };\n\n return {\n createChatCompletion: createChatCompletion(context),\n };\n};\n"]}
|
package/dist/index.mjs
CHANGED
|
@@ -9,6 +9,74 @@ var createChatGptClient = (config) => {
|
|
|
9
9
|
project: config.project
|
|
10
10
|
});
|
|
11
11
|
};
|
|
12
|
+
|
|
13
|
+
// src/models.ts
|
|
14
|
+
var CHAT_GPT_MODELS = {
|
|
15
|
+
GPT_4o: "gpt-4o",
|
|
16
|
+
GPT_4o_2024_08_06: "gpt-4o-2024-08-06",
|
|
17
|
+
GPT_4o_MINI: "gpt-4o-mini",
|
|
18
|
+
GPT_4o_MINI_2024_07_18: "gpt-4o-mini-2024-07-18",
|
|
19
|
+
GPT_4_TURBO: "gpt-4-turbo",
|
|
20
|
+
GPT_4: "gpt-4",
|
|
21
|
+
GPT_3_5_TURBO: "gpt-3.5-turbo"
|
|
22
|
+
};
|
|
23
|
+
var isStructuredOutputSupported = (model) => {
|
|
24
|
+
if (model.startsWith("gpt-4o-mini")) return true;
|
|
25
|
+
if (model === "gpt-4o") return true;
|
|
26
|
+
if (model.startsWith("gpt-4o-2024-08-06")) return true;
|
|
27
|
+
if (model.includes("2024-08-06")) return true;
|
|
28
|
+
return false;
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
// src/pricing.ts
|
|
32
|
+
var CHAT_GPT_PRICING = {
|
|
33
|
+
// GPT-5 (Standard)
|
|
34
|
+
"gpt-5.2": { input: 1.75, cachedInput: 0.175, output: 14 },
|
|
35
|
+
"gpt-5.1": { input: 1.25, cachedInput: 0.125, output: 10 },
|
|
36
|
+
"gpt-5": { input: 1.25, cachedInput: 0.125, output: 10 },
|
|
37
|
+
"gpt-5-mini": { input: 0.25, cachedInput: 0.025, output: 2 },
|
|
38
|
+
"gpt-5-nano": { input: 0.05, cachedInput: 5e-3, output: 0.4 },
|
|
39
|
+
// GPT-4.1
|
|
40
|
+
"gpt-4.1": { input: 2, cachedInput: 0.5, output: 8 },
|
|
41
|
+
"gpt-4.1-mini": { input: 0.4, cachedInput: 0.1, output: 1.6 },
|
|
42
|
+
"gpt-4.1-nano": { input: 0.1, cachedInput: 0.025, output: 0.4 },
|
|
43
|
+
// GPT-4o
|
|
44
|
+
"gpt-4o": { input: 2.5, cachedInput: 1.25, output: 10 },
|
|
45
|
+
"gpt-4o-2024-05-13": { input: 5, output: 15 },
|
|
46
|
+
"gpt-4o-2024-08-06": { input: 2.5, cachedInput: 1.25, output: 10 },
|
|
47
|
+
"gpt-4o-mini": { input: 0.15, cachedInput: 0.075, output: 0.6 },
|
|
48
|
+
"gpt-4o-mini-2024-07-18": { input: 0.15, cachedInput: 0.075, output: 0.6 },
|
|
49
|
+
// Reasoning Models
|
|
50
|
+
o1: { input: 15, cachedInput: 7.5, output: 60 },
|
|
51
|
+
"o1-mini": { input: 1.1, cachedInput: 0.55, output: 4.4 },
|
|
52
|
+
o3: { input: 2, cachedInput: 0.5, output: 8 },
|
|
53
|
+
"o3-mini": { input: 1.1, cachedInput: 0.55, output: 4.4 },
|
|
54
|
+
"o4-mini": { input: 1.1, cachedInput: 0.275, output: 4.4 },
|
|
55
|
+
// Legacy/Other
|
|
56
|
+
"gpt-4-turbo": { input: 10, output: 30 },
|
|
57
|
+
"gpt-4-turbo-2024-04-09": { input: 10, output: 30 },
|
|
58
|
+
"gpt-4": { input: 30, output: 60 },
|
|
59
|
+
"gpt-3.5-turbo": { input: 0.5, output: 1.5 }
|
|
60
|
+
};
|
|
61
|
+
var calculatePrice = (model, usage) => {
|
|
62
|
+
if (!usage) return void 0;
|
|
63
|
+
const pricing = CHAT_GPT_PRICING[model];
|
|
64
|
+
if (!pricing) return void 0;
|
|
65
|
+
const inputTokens = usage.input_tokens ?? usage.prompt_tokens ?? 0;
|
|
66
|
+
const outputTokens = usage.output_tokens ?? usage.completion_tokens ?? 0;
|
|
67
|
+
const cachedTokens = usage.input_tokens_details?.cached_tokens ?? usage.prompt_tokens_details?.cached_tokens ?? 0;
|
|
68
|
+
const regularInputTokens = inputTokens - cachedTokens;
|
|
69
|
+
const inputCost = regularInputTokens / 1e6 * pricing.input + cachedTokens / 1e6 * (pricing.cachedInput ?? pricing.input);
|
|
70
|
+
const outputCost = outputTokens / 1e6 * pricing.output;
|
|
71
|
+
return {
|
|
72
|
+
total: Number((inputCost + outputCost).toFixed(6)),
|
|
73
|
+
inputCost: Number(inputCost.toFixed(6)),
|
|
74
|
+
outputCost: Number(outputCost.toFixed(6)),
|
|
75
|
+
currency: "USD"
|
|
76
|
+
};
|
|
77
|
+
};
|
|
78
|
+
|
|
79
|
+
// src/operations/create-chat-completion.ts
|
|
12
80
|
var createChatCompletion = (ctx) => async (input) => {
|
|
13
81
|
const { client, logger, defaults } = ctx;
|
|
14
82
|
const mergedInput = {
|
|
@@ -21,25 +89,36 @@ var createChatCompletion = (ctx) => async (input) => {
|
|
|
21
89
|
const { schema, schemaName, messages, ...rest } = mergedInput;
|
|
22
90
|
logger?.debug("chat-gpt:createChatCompletion:start", { data: mergedInput });
|
|
23
91
|
try {
|
|
92
|
+
let response;
|
|
24
93
|
if (schema) {
|
|
25
|
-
|
|
94
|
+
if (!isStructuredOutputSupported(mergedInput.model)) {
|
|
95
|
+
throw new Error(
|
|
96
|
+
`chat-gpt:createChatCompletion: model ${mergedInput.model} does not support Structured Outputs. See https://platform.openai.com/docs/guides/structured-outputs#supported-models`
|
|
97
|
+
);
|
|
98
|
+
}
|
|
99
|
+
response = await client.responses.parse({
|
|
26
100
|
...rest,
|
|
27
101
|
input: messages,
|
|
28
102
|
text: {
|
|
29
103
|
format: zodTextFormat(schema, schemaName || "output")
|
|
30
104
|
}
|
|
31
105
|
});
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
parsed: response2.output_parsed
|
|
106
|
+
response = {
|
|
107
|
+
...response,
|
|
108
|
+
parsed: response.output_parsed
|
|
36
109
|
};
|
|
110
|
+
} else {
|
|
111
|
+
response = await client.chat.completions.create({
|
|
112
|
+
...mergedInput
|
|
113
|
+
});
|
|
37
114
|
}
|
|
38
|
-
const
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
115
|
+
const price = calculatePrice(mergedInput.model, response.usage);
|
|
116
|
+
const output = {
|
|
117
|
+
...response,
|
|
118
|
+
price
|
|
119
|
+
};
|
|
120
|
+
logger?.debug("chat-gpt:createChatCompletion:success", { data: output });
|
|
121
|
+
return output;
|
|
43
122
|
} catch (error) {
|
|
44
123
|
logger?.debug("chat-gpt:createChatCompletion:error", { error });
|
|
45
124
|
throw error;
|
|
@@ -55,17 +134,6 @@ var createAdapter = (config, logger) => {
|
|
|
55
134
|
};
|
|
56
135
|
};
|
|
57
136
|
|
|
58
|
-
|
|
59
|
-
var CHAT_GPT_MODELS = {
|
|
60
|
-
GPT_4o: "gpt-4o",
|
|
61
|
-
GPT_4o_2024_08_06: "gpt-4o-2024-08-06",
|
|
62
|
-
GPT_4o_MINI: "gpt-4o-mini",
|
|
63
|
-
GPT_4o_MINI_2024_07_18: "gpt-4o-mini-2024-07-18",
|
|
64
|
-
GPT_4_TURBO: "gpt-4-turbo",
|
|
65
|
-
GPT_4: "gpt-4",
|
|
66
|
-
GPT_3_5_TURBO: "gpt-3.5-turbo"
|
|
67
|
-
};
|
|
68
|
-
|
|
69
|
-
export { CHAT_GPT_MODELS, createAdapter, createChatCompletion, createChatGptClient };
|
|
137
|
+
export { CHAT_GPT_MODELS, createAdapter, createChatCompletion, createChatGptClient, isStructuredOutputSupported };
|
|
70
138
|
//# sourceMappingURL=index.mjs.map
|
|
71
139
|
//# sourceMappingURL=index.mjs.map
|
package/dist/index.mjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/client.ts","../src/operations/create-chat-completion.ts","../src/adapter.ts","../src/models.ts"],"names":["response"],"mappings":";;;;AAGO,IAAM,mBAAA,GAAsB,CAAC,MAAA,KAAkC;AAClE,EAAA,OAAO,IAAI,MAAA,CAAO;AAAA,IACd,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,cAAc,MAAA,CAAO,YAAA;AAAA,IACrB,SAAS,MAAA,CAAO;AAAA,GACnB,CAAA;AACL;ACNO,IAAM,oBAAA,GACT,CAAC,GAAA,KACD,OAAO,KAAA,KAA0E;AAC7E,EAAA,MAAM,EAAE,MAAA,EAAQ,MAAA,EAAQ,QAAA,EAAS,GAAI,GAAA;AAErC,EAAA,MAAM,WAAA,GAAc;AAAA,IAChB,GAAG,QAAA;AAAA,IACH,GAAG;AAAA,GACP;AAEA,EAAA,IAAI,CAAC,YAAY,KAAA,EAAO;AACpB,IAAA,MAAM,IAAI,MAAM,kDAAkD,CAAA;AAAA,EACtE;AAEA,EAAA,MAAM,EAAE,MAAA,EAAQ,UAAA,EAAY,QAAA,EAAU,GAAG,MAAK,GAAI,WAAA;AAElD,EAAA,MAAA,EAAQ,KAAA,CAAM,qCAAA,EAAuC,EAAE,IAAA,EAAM,aAAa,CAAA;AAE1E,EAAA,IAAI;AACA,IAAA,IAAI,MAAA,EAAQ;AACR,MAAA,MAAMA,SAAAA,GAAW,MAAO,MAAA,CAAO,SAAA,CAAkB,KAAA,CAAM;AAAA,QACnD,GAAG,IAAA;AAAA,QACH,KAAA,EAAO,QAAA;AAAA,QACP,IAAA,EAAM;AAAA,UACF,MAAA,EAAQ,aAAA,CAAc,MAAA,EAAQ,UAAA,IAAc,QAAQ;AAAA;AACxD,OACH,CAAA;AAED,MAAA,MAAA,EAAQ,KAAA,CAAM,uCAAA,EAAyC,EAAE,IAAA,EAAMA,WAAU,CAAA;AAEzE,MAAA,OAAO;AAAA,QACH,GAAGA,SAAAA;AAAA,QACH,QAAQA,SAAAA,CAAS;AAAA,OACrB;AAAA,IACJ;AAIA,IAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,YAAY,MAAA,CAAO;AAAA,MAClD,GAAG;AAAA,KACC,CAAA;AAER,IAAA,MAAA,EAAQ,KAAA,CAAM,uCAAA,EAAyC,EAAE,IAAA,EAAM,UAAU,CAAA;AAEzE,IAAA,OAAO,QAAA;AAAA,EACX,SAAS,KAAA,EAAO;AACZ,IAAA,MAAA,EAAQ,KAAA,CAAM,qCAAA,EAAuC,EAAE,KAAA,EAAO,CAAA;AAC9D,IAAA,MAAM,KAAA;AAAA,EACV;AACJ;;;AC3CG,IAAM,aAAA,GAAgB,CAAC,MAAA,EAAuB,MAAA,KAAoC;AACrF,EAAA,MAAM,MAAA,GAAS,oBAAoB,MAAM,CAAA;AACzC,EAAA,MAAM,UAA0B,EAAE,MAAA,EAAQ,MAAA,EAAQ,QAAA,EAAU,OAAO,QAAA,EAAS;AAE5E,EAAA,OAAO;AAAA,IACH,oBAAA,EAAsB,qBAAqB,OAAO;AAAA,GACtD;AACJ;;;AChBO,IAAM,eAAA,GAAkB;AAAA,EAC3B,MAAA,EAAQ,QAAA;AAAA,EACR,iBAAA,EAAmB,mBAAA;AAAA,EACnB,WAAA,EAAa,aAAA;AAAA,EACb,sBAAA,EAAwB,wBAAA;AAAA,EACxB,WAAA,EAAa,aAAA;AAAA,EACb,KAAA,EAAO,OAAA;AAAA,EACP,aAAA,EAAe;AACnB","file":"index.mjs","sourcesContent":["import OpenAI from 'openai';\nimport { ChatGptConfig } from './types';\n\nexport const createChatGptClient = (config: ChatGptConfig): OpenAI => {\n return new OpenAI({\n apiKey: config.apiKey,\n organization: config.organization,\n project: config.project,\n });\n};\n","import { zodTextFormat } from 'openai/helpers/zod';\nimport { ChatGptContext, CreateChatCompletionInput, CreateChatCompletionOutput } from '../types';\n\nexport const createChatCompletion =\n (ctx: ChatGptContext) =>\n async (input: CreateChatCompletionInput): Promise<CreateChatCompletionOutput> => {\n const { client, logger, defaults } = ctx;\n\n const mergedInput = {\n ...defaults,\n ...input,\n };\n\n if (!mergedInput.model) {\n throw new Error('chat-gpt:createChatCompletion: model is required');\n }\n\n const { schema, schemaName, messages, ...rest } = mergedInput;\n\n logger?.debug('chat-gpt:createChatCompletion:start', { data: mergedInput });\n\n try {\n if (schema) {\n const response = await (client.responses as any).parse({\n ...rest,\n input: messages,\n text: {\n format: zodTextFormat(schema, schemaName || 'output'),\n },\n });\n\n logger?.debug('chat-gpt:createChatCompletion:success', { data: response });\n\n return {\n ...response,\n parsed: response.output_parsed,\n } as CreateChatCompletionOutput;\n }\n\n // Fallback to regular chat completion for non-zod calls\n // or we can use client.responses.create if we want to be consistent\n const response = await client.chat.completions.create({\n ...mergedInput,\n } as any);\n\n logger?.debug('chat-gpt:createChatCompletion:success', { data: response });\n\n return response as CreateChatCompletionOutput;\n } catch (error) {\n logger?.debug('chat-gpt:createChatCompletion:error', { error });\n throw error;\n }\n };\n","import OpenAI from 'openai';\nimport { ChatGptConfig, ChatGptContext, Logger } from './types';\nimport { createChatGptClient } from './client';\nimport { createChatCompletion } from './operations/create-chat-completion';\n\nexport interface ChatGptAdapter {\n createChatCompletion: ReturnType<typeof createChatCompletion>;\n}\n\nexport const createAdapter = (config: ChatGptConfig, logger?: Logger): ChatGptAdapter => {\n const client = createChatGptClient(config);\n const context: ChatGptContext = { client, logger, defaults: config.defaults };\n\n return {\n createChatCompletion: createChatCompletion(context),\n };\n};\n","export const CHAT_GPT_MODELS = {\n GPT_4o: 'gpt-4o',\n GPT_4o_2024_08_06: 'gpt-4o-2024-08-06',\n GPT_4o_MINI: 'gpt-4o-mini',\n GPT_4o_MINI_2024_07_18: 'gpt-4o-mini-2024-07-18',\n GPT_4_TURBO: 'gpt-4-turbo',\n GPT_4: 'gpt-4',\n GPT_3_5_TURBO: 'gpt-3.5-turbo',\n} as const;\n\nexport type ChatGptModel = (typeof CHAT_GPT_MODELS)[keyof typeof CHAT_GPT_MODELS];\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/client.ts","../src/models.ts","../src/pricing.ts","../src/operations/create-chat-completion.ts","../src/adapter.ts"],"names":[],"mappings":";;;;AAGO,IAAM,mBAAA,GAAsB,CAAC,MAAA,KAAkC;AAClE,EAAA,OAAO,IAAI,MAAA,CAAO;AAAA,IACd,QAAQ,MAAA,CAAO,MAAA;AAAA,IACf,cAAc,MAAA,CAAO,YAAA;AAAA,IACrB,SAAS,MAAA,CAAO;AAAA,GACnB,CAAA;AACL;;;ACTO,IAAM,eAAA,GAAkB;AAAA,EAC3B,MAAA,EAAQ,QAAA;AAAA,EACR,iBAAA,EAAmB,mBAAA;AAAA,EACnB,WAAA,EAAa,aAAA;AAAA,EACb,sBAAA,EAAwB,wBAAA;AAAA,EACxB,WAAA,EAAa,aAAA;AAAA,EACb,KAAA,EAAO,OAAA;AAAA,EACP,aAAA,EAAe;AACnB;AAIO,IAAM,2BAAA,GAA8B,CAAC,KAAA,KAA2B;AAInE,EAAA,IAAI,KAAA,CAAM,UAAA,CAAW,aAAa,CAAA,EAAG,OAAO,IAAA;AAC5C,EAAA,IAAI,KAAA,KAAU,UAAU,OAAO,IAAA;AAC/B,EAAA,IAAI,KAAA,CAAM,UAAA,CAAW,mBAAmB,CAAA,EAAG,OAAO,IAAA;AAClD,EAAA,IAAI,KAAA,CAAM,QAAA,CAAS,YAAY,CAAA,EAAG,OAAO,IAAA;AAEzC,EAAA,OAAO,KAAA;AACX;;;ACTO,IAAM,gBAAA,GAAiD;AAAA;AAAA,EAE1D,WAAW,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,EAAA,EAAK;AAAA,EAC3D,WAAW,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,EAAA,EAAK;AAAA,EAC3D,SAAS,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,EAAA,EAAK;AAAA,EACzD,cAAc,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,CAAA,EAAI;AAAA,EAC7D,cAAc,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,IAAA,EAAO,QAAQ,GAAA,EAAI;AAAA;AAAA,EAG7D,WAAW,EAAE,KAAA,EAAO,GAAK,WAAA,EAAa,GAAA,EAAK,QAAQ,CAAA,EAAI;AAAA,EACvD,gBAAgB,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,GAAA,EAAK,QAAQ,GAAA,EAAI;AAAA,EAC5D,gBAAgB,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,KAAA,EAAO,QAAQ,GAAA,EAAI;AAAA;AAAA,EAG9D,UAAU,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,IAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EACxD,mBAAA,EAAqB,EAAE,KAAA,EAAO,CAAA,EAAK,QAAQ,EAAA,EAAK;AAAA,EAChD,qBAAqB,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,IAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EACnE,eAAe,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,GAAA,EAAI;AAAA,EAC9D,0BAA0B,EAAE,KAAA,EAAO,MAAM,WAAA,EAAa,KAAA,EAAO,QAAQ,GAAA,EAAI;AAAA;AAAA,EAGzE,IAAI,EAAE,KAAA,EAAO,IAAM,WAAA,EAAa,GAAA,EAAK,QAAQ,EAAA,EAAK;AAAA,EAClD,WAAW,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,IAAA,EAAM,QAAQ,GAAA,EAAI;AAAA,EACxD,IAAI,EAAE,KAAA,EAAO,GAAK,WAAA,EAAa,GAAA,EAAK,QAAQ,CAAA,EAAI;AAAA,EAChD,WAAW,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,IAAA,EAAM,QAAQ,GAAA,EAAI;AAAA,EACxD,WAAW,EAAE,KAAA,EAAO,KAAK,WAAA,EAAa,KAAA,EAAO,QAAQ,GAAA,EAAI;AAAA;AAAA,EAGzD,aAAA,EAAe,EAAE,KAAA,EAAO,EAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EAC3C,wBAAA,EAA0B,EAAE,KAAA,EAAO,EAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EACtD,OAAA,EAAS,EAAE,KAAA,EAAO,EAAA,EAAM,QAAQ,EAAA,EAAK;AAAA,EACrC,eAAA,EAAiB,EAAE,KAAA,EAAO,GAAA,EAAK,QAAQ,GAAA;AAC3C,CAAA;AAEO,IAAM,cAAA,GAAiB,CAC1B,KAAA,EACA,KAAA,KAQwB;AACxB,EAAA,IAAI,CAAC,OAAO,OAAO,MAAA;AAEnB,EAAA,MAAM,OAAA,GAAU,iBAAiB,KAAK,CAAA;AACtC,EAAA,IAAI,CAAC,SAAS,OAAO,MAAA;AAErB,EAAA,MAAM,WAAA,GAAc,KAAA,CAAM,YAAA,IAAgB,KAAA,CAAM,aAAA,IAAiB,CAAA;AACjE,EAAA,MAAM,YAAA,GAAe,KAAA,CAAM,aAAA,IAAiB,KAAA,CAAM,iBAAA,IAAqB,CAAA;AACvE,EAAA,MAAM,eACF,KAAA,CAAM,oBAAA,EAAsB,aAAA,IAC5B,KAAA,CAAM,uBAAuB,aAAA,IAC7B,CAAA;AAEJ,EAAA,MAAM,qBAAqB,WAAA,GAAc,YAAA;AAEzC,EAAA,MAAM,SAAA,GACD,qBAAqB,GAAA,GAAa,OAAA,CAAQ,QAC1C,YAAA,GAAe,GAAA,IAAc,OAAA,CAAQ,WAAA,IAAe,OAAA,CAAQ,KAAA,CAAA;AAEjE,EAAA,MAAM,UAAA,GAAc,YAAA,GAAe,GAAA,GAAa,OAAA,CAAQ,MAAA;AAExD,EAAA,OAAO;AAAA,IACH,OAAO,MAAA,CAAA,CAAQ,SAAA,GAAY,UAAA,EAAY,OAAA,CAAQ,CAAC,CAAC,CAAA;AAAA,IACjD,SAAA,EAAW,MAAA,CAAO,SAAA,CAAU,OAAA,CAAQ,CAAC,CAAC,CAAA;AAAA,IACtC,UAAA,EAAY,MAAA,CAAO,UAAA,CAAW,OAAA,CAAQ,CAAC,CAAC,CAAA;AAAA,IACxC,QAAA,EAAU;AAAA,GACd;AACJ,CAAA;;;AC/EO,IAAM,oBAAA,GACT,CAAC,GAAA,KACD,OAAO,KAAA,KAA0E;AAC7E,EAAA,MAAM,EAAE,MAAA,EAAQ,MAAA,EAAQ,QAAA,EAAS,GAAI,GAAA;AAErC,EAAA,MAAM,WAAA,GAAc;AAAA,IAChB,GAAG,QAAA;AAAA,IACH,GAAG;AAAA,GACP;AAEA,EAAA,IAAI,CAAC,YAAY,KAAA,EAAO;AACpB,IAAA,MAAM,IAAI,MAAM,kDAAkD,CAAA;AAAA,EACtE;AAEA,EAAA,MAAM,EAAE,MAAA,EAAQ,UAAA,EAAY,QAAA,EAAU,GAAG,MAAK,GAAI,WAAA;AAElD,EAAA,MAAA,EAAQ,KAAA,CAAM,qCAAA,EAAuC,EAAE,IAAA,EAAM,aAAa,CAAA;AAE1E,EAAA,IAAI;AACA,IAAA,IAAI,QAAA;AACJ,IAAA,IAAI,MAAA,EAAQ;AACR,MAAA,IAAI,CAAC,2BAAA,CAA4B,WAAA,CAAY,KAAK,CAAA,EAAG;AACjD,QAAA,MAAM,IAAI,KAAA;AAAA,UACN,CAAA,qCAAA,EAAwC,YAAY,KAAK,CAAA,qHAAA;AAAA,SAE7D;AAAA,MACJ;AAEA,MAAA,QAAA,GAAW,MAAO,MAAA,CAAO,SAAA,CAAkB,KAAA,CAAM;AAAA,QAC7C,GAAG,IAAA;AAAA,QACH,KAAA,EAAO,QAAA;AAAA,QACP,IAAA,EAAM;AAAA,UACF,MAAA,EAAQ,aAAA,CAAc,MAAA,EAAQ,UAAA,IAAc,QAAQ;AAAA;AACxD,OACH,CAAA;AAED,MAAA,QAAA,GAAW;AAAA,QACP,GAAG,QAAA;AAAA,QACH,QAAQ,QAAA,CAAS;AAAA,OACrB;AAAA,IACJ,CAAA,MAAO;AAEH,MAAA,QAAA,GAAW,MAAM,MAAA,CAAO,IAAA,CAAK,WAAA,CAAY,MAAA,CAAO;AAAA,QAC5C,GAAG;AAAA,OACC,CAAA;AAAA,IACZ;AAEA,IAAA,MAAM,KAAA,GAAQ,cAAA,CAAe,WAAA,CAAY,KAAA,EAAO,SAAS,KAAK,CAAA;AAE9D,IAAA,MAAM,MAAA,GAAS;AAAA,MACX,GAAG,QAAA;AAAA,MACH;AAAA,KACJ;AAEA,IAAA,MAAA,EAAQ,KAAA,CAAM,uCAAA,EAAyC,EAAE,IAAA,EAAM,QAAQ,CAAA;AAEvE,IAAA,OAAO,MAAA;AAAA,EACX,SAAS,KAAA,EAAO;AACZ,IAAA,MAAA,EAAQ,KAAA,CAAM,qCAAA,EAAuC,EAAE,KAAA,EAAO,CAAA;AAC9D,IAAA,MAAM,KAAA;AAAA,EACV;AACJ;;;ACzDG,IAAM,aAAA,GAAgB,CAAC,MAAA,EAAuB,MAAA,KAAoC;AACrF,EAAA,MAAM,MAAA,GAAS,oBAAoB,MAAM,CAAA;AACzC,EAAA,MAAM,UAA0B,EAAE,MAAA,EAAQ,MAAA,EAAQ,QAAA,EAAU,OAAO,QAAA,EAAS;AAE5E,EAAA,OAAO;AAAA,IACH,oBAAA,EAAsB,qBAAqB,OAAO;AAAA,GACtD;AACJ","file":"index.mjs","sourcesContent":["import OpenAI from 'openai';\nimport { ChatGptConfig } from './types';\n\nexport const createChatGptClient = (config: ChatGptConfig): OpenAI => {\n return new OpenAI({\n apiKey: config.apiKey,\n organization: config.organization,\n project: config.project,\n });\n};\n","export const CHAT_GPT_MODELS = {\n GPT_4o: 'gpt-4o',\n GPT_4o_2024_08_06: 'gpt-4o-2024-08-06',\n GPT_4o_MINI: 'gpt-4o-mini',\n GPT_4o_MINI_2024_07_18: 'gpt-4o-mini-2024-07-18',\n GPT_4_TURBO: 'gpt-4-turbo',\n GPT_4: 'gpt-4',\n GPT_3_5_TURBO: 'gpt-3.5-turbo',\n} as const;\n\nexport type ChatGptModel = (typeof CHAT_GPT_MODELS)[keyof typeof CHAT_GPT_MODELS];\n\nexport const isStructuredOutputSupported = (model: string): boolean => {\n const supportedPrefixes = ['gpt-4o', 'gpt-4o-mini', 'o1']; // o1 supposedly supports it in some versions soon, but for now gpt-4o is the main one.\n // Actually per docs: gpt-4o-mini, gpt-4o-2024-08-06 and later.\n\n if (model.startsWith('gpt-4o-mini')) return true;\n if (model === 'gpt-4o') return true;\n if (model.startsWith('gpt-4o-2024-08-06')) return true;\n if (model.includes('2024-08-06')) return true; // safety for other snapshots\n\n return false;\n};\n","export interface ModelPricing {\n input: number; // cost per 1M tokens\n output: number; // cost per 1M tokens\n cachedInput?: number; // cost per 1M tokens\n}\n\nexport interface PriceInfo {\n total: number;\n inputCost: number;\n outputCost: number;\n currency: string;\n}\n\nexport const CHAT_GPT_PRICING: Record<string, ModelPricing> = {\n // GPT-5 (Standard)\n 'gpt-5.2': { input: 1.75, cachedInput: 0.175, output: 14.0 },\n 'gpt-5.1': { input: 1.25, cachedInput: 0.125, output: 10.0 },\n 'gpt-5': { input: 1.25, cachedInput: 0.125, output: 10.0 },\n 'gpt-5-mini': { input: 0.25, cachedInput: 0.025, output: 2.0 },\n 'gpt-5-nano': { input: 0.05, cachedInput: 0.005, output: 0.4 },\n\n // GPT-4.1\n 'gpt-4.1': { input: 2.0, cachedInput: 0.5, output: 8.0 },\n 'gpt-4.1-mini': { input: 0.4, cachedInput: 0.1, output: 1.6 },\n 'gpt-4.1-nano': { input: 0.1, cachedInput: 0.025, output: 0.4 },\n\n // GPT-4o\n 'gpt-4o': { input: 2.5, cachedInput: 1.25, output: 10.0 },\n 'gpt-4o-2024-05-13': { input: 5.0, output: 15.0 },\n 'gpt-4o-2024-08-06': { input: 2.5, cachedInput: 1.25, output: 10.0 },\n 'gpt-4o-mini': { input: 0.15, cachedInput: 0.075, output: 0.6 },\n 'gpt-4o-mini-2024-07-18': { input: 0.15, cachedInput: 0.075, output: 0.6 },\n\n // Reasoning Models\n o1: { input: 15.0, cachedInput: 7.5, output: 60.0 },\n 'o1-mini': { input: 1.1, cachedInput: 0.55, output: 4.4 },\n o3: { input: 2.0, cachedInput: 0.5, output: 8.0 },\n 'o3-mini': { input: 1.1, cachedInput: 0.55, output: 4.4 },\n 'o4-mini': { input: 1.1, cachedInput: 0.275, output: 4.4 },\n\n // Legacy/Other\n 'gpt-4-turbo': { input: 10.0, output: 30.0 },\n 'gpt-4-turbo-2024-04-09': { input: 10.0, output: 30.0 },\n 'gpt-4': { input: 30.0, output: 60.0 },\n 'gpt-3.5-turbo': { input: 0.5, output: 1.5 },\n};\n\nexport const calculatePrice = (\n model: string,\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n input_tokens?: number;\n output_tokens?: number;\n prompt_tokens_details?: { cached_tokens?: number };\n input_tokens_details?: { cached_tokens?: number };\n },\n): PriceInfo | undefined => {\n if (!usage) return undefined;\n\n const pricing = CHAT_GPT_PRICING[model];\n if (!pricing) return undefined;\n\n const inputTokens = usage.input_tokens ?? usage.prompt_tokens ?? 0;\n const outputTokens = usage.output_tokens ?? usage.completion_tokens ?? 0;\n const cachedTokens =\n usage.input_tokens_details?.cached_tokens ??\n usage.prompt_tokens_details?.cached_tokens ??\n 0;\n\n const regularInputTokens = inputTokens - cachedTokens;\n\n const inputCost =\n (regularInputTokens / 1_000_000) * pricing.input +\n (cachedTokens / 1_000_000) * (pricing.cachedInput ?? pricing.input);\n\n const outputCost = (outputTokens / 1_000_000) * pricing.output;\n\n return {\n total: Number((inputCost + outputCost).toFixed(6)),\n inputCost: Number(inputCost.toFixed(6)),\n outputCost: Number(outputCost.toFixed(6)),\n currency: 'USD',\n };\n};\n","import { zodTextFormat } from 'openai/helpers/zod';\nimport { ChatGptContext, CreateChatCompletionInput, CreateChatCompletionOutput } from '../types';\nimport { isStructuredOutputSupported } from '../models';\nimport { calculatePrice } from '../pricing';\n\nexport const createChatCompletion =\n (ctx: ChatGptContext) =>\n async (input: CreateChatCompletionInput): Promise<CreateChatCompletionOutput> => {\n const { client, logger, defaults } = ctx;\n\n const mergedInput = {\n ...defaults,\n ...input,\n };\n\n if (!mergedInput.model) {\n throw new Error('chat-gpt:createChatCompletion: model is required');\n }\n\n const { schema, schemaName, messages, ...rest } = mergedInput;\n\n logger?.debug('chat-gpt:createChatCompletion:start', { data: mergedInput });\n\n try {\n let response: any;\n if (schema) {\n if (!isStructuredOutputSupported(mergedInput.model)) {\n throw new Error(\n `chat-gpt:createChatCompletion: model ${mergedInput.model} does not support Structured Outputs. ` +\n 'See https://platform.openai.com/docs/guides/structured-outputs#supported-models',\n );\n }\n\n response = await (client.responses as any).parse({\n ...rest,\n input: messages,\n text: {\n format: zodTextFormat(schema, schemaName || 'output'),\n },\n });\n\n response = {\n ...response,\n parsed: response.output_parsed,\n };\n } else {\n // Fallback to regular chat completion for non-zod calls\n response = await client.chat.completions.create({\n ...mergedInput,\n } as any);\n }\n\n const price = calculatePrice(mergedInput.model, response.usage);\n\n const output = {\n ...response,\n price,\n } as CreateChatCompletionOutput;\n\n logger?.debug('chat-gpt:createChatCompletion:success', { data: output });\n\n return output;\n } catch (error) {\n logger?.debug('chat-gpt:createChatCompletion:error', { error });\n throw error;\n }\n };\n","import OpenAI from 'openai';\nimport { ChatGptConfig, ChatGptContext, Logger } from './types';\nimport { createChatGptClient } from './client';\nimport { createChatCompletion } from './operations/create-chat-completion';\n\nexport interface ChatGptAdapter {\n createChatCompletion: ReturnType<typeof createChatCompletion>;\n}\n\nexport const createAdapter = (config: ChatGptConfig, logger?: Logger): ChatGptAdapter => {\n const client = createChatGptClient(config);\n const context: ChatGptContext = { client, logger, defaults: config.defaults };\n\n return {\n createChatCompletion: createChatCompletion(context),\n };\n};\n"]}
|