@kjerneverk/execution-openai 1.0.7 → 1.0.8-dev.20260212012315.eb02292
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +44 -0
- package/dist/index.js +156 -7
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.d.ts
CHANGED
|
@@ -41,6 +41,10 @@ declare class OpenAIProvider implements Provider {
|
|
|
41
41
|
* Execute a request against OpenAI
|
|
42
42
|
*/
|
|
43
43
|
execute(request: Request_2, options?: ExecutionOptions): Promise<ProviderResponse>;
|
|
44
|
+
/**
|
|
45
|
+
* Execute a request with streaming response
|
|
46
|
+
*/
|
|
47
|
+
executeStream(request: Request_2, options?: ExecutionOptions): AsyncIterable<StreamChunk>;
|
|
44
48
|
}
|
|
45
49
|
export { OpenAIProvider }
|
|
46
50
|
export default OpenAIProvider;
|
|
@@ -48,6 +52,7 @@ export default OpenAIProvider;
|
|
|
48
52
|
export declare interface Provider {
|
|
49
53
|
readonly name: string;
|
|
50
54
|
execute(request: Request_2, options?: ExecutionOptions): Promise<ProviderResponse>;
|
|
55
|
+
executeStream?(request: Request_2, options?: ExecutionOptions): AsyncIterable<StreamChunk>;
|
|
51
56
|
supportsModel?(model: Model): boolean;
|
|
52
57
|
}
|
|
53
58
|
|
|
@@ -73,10 +78,49 @@ declare interface Request_2 {
|
|
|
73
78
|
model: Model;
|
|
74
79
|
responseFormat?: any;
|
|
75
80
|
validator?: any;
|
|
81
|
+
tools?: ToolDefinition[];
|
|
76
82
|
addMessage(message: Message): void;
|
|
77
83
|
}
|
|
78
84
|
export { Request_2 as Request }
|
|
79
85
|
|
|
86
|
+
export declare interface StreamChunk {
|
|
87
|
+
type: StreamChunkType;
|
|
88
|
+
text?: string;
|
|
89
|
+
toolCall?: {
|
|
90
|
+
id?: string;
|
|
91
|
+
index?: number;
|
|
92
|
+
name?: string;
|
|
93
|
+
argumentsDelta?: string;
|
|
94
|
+
};
|
|
95
|
+
usage?: {
|
|
96
|
+
inputTokens: number;
|
|
97
|
+
outputTokens: number;
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
export declare type StreamChunkType = 'text' | 'tool_call_start' | 'tool_call_delta' | 'tool_call_end' | 'usage' | 'done';
|
|
102
|
+
|
|
103
|
+
export declare interface ToolDefinition {
|
|
104
|
+
name: string;
|
|
105
|
+
description: string;
|
|
106
|
+
parameters: ToolParameterSchema;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
export declare interface ToolParameterSchema {
|
|
110
|
+
type: 'object';
|
|
111
|
+
properties: Record<string, {
|
|
112
|
+
type: string;
|
|
113
|
+
description?: string;
|
|
114
|
+
enum?: string[];
|
|
115
|
+
items?: {
|
|
116
|
+
type: string;
|
|
117
|
+
};
|
|
118
|
+
default?: any;
|
|
119
|
+
}>;
|
|
120
|
+
required?: string[];
|
|
121
|
+
additionalProperties?: boolean;
|
|
122
|
+
}
|
|
123
|
+
|
|
80
124
|
/**
|
|
81
125
|
* Package version
|
|
82
126
|
*/
|
package/dist/index.js
CHANGED
|
@@ -23725,19 +23725,45 @@ class OpenAIProvider {
|
|
|
23725
23725
|
const client2 = new OpenAI(clientOptions);
|
|
23726
23726
|
const model = options.model || request2.model || "gpt-4";
|
|
23727
23727
|
const messages = request2.messages.map((msg) => {
|
|
23728
|
-
|
|
23729
|
-
|
|
23730
|
-
|
|
23731
|
-
|
|
23732
|
-
|
|
23733
|
-
|
|
23728
|
+
if (msg.role === "tool") {
|
|
23729
|
+
return {
|
|
23730
|
+
role: "tool",
|
|
23731
|
+
content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
|
|
23732
|
+
tool_call_id: msg.tool_call_id || ""
|
|
23733
|
+
};
|
|
23734
|
+
} else if (msg.role === "assistant" && msg.tool_calls) {
|
|
23735
|
+
return {
|
|
23736
|
+
role: "assistant",
|
|
23737
|
+
content: msg.content,
|
|
23738
|
+
tool_calls: msg.tool_calls
|
|
23739
|
+
};
|
|
23740
|
+
} else {
|
|
23741
|
+
const role = msg.role === "developer" ? "system" : msg.role;
|
|
23742
|
+
return {
|
|
23743
|
+
role,
|
|
23744
|
+
content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
|
|
23745
|
+
name: msg.name
|
|
23746
|
+
};
|
|
23747
|
+
}
|
|
23734
23748
|
});
|
|
23749
|
+
let openaiTools;
|
|
23750
|
+
if (request2.tools && request2.tools.length > 0) {
|
|
23751
|
+
openaiTools = request2.tools.map((tool) => ({
|
|
23752
|
+
type: "function",
|
|
23753
|
+
function: {
|
|
23754
|
+
name: tool.name,
|
|
23755
|
+
description: tool.description,
|
|
23756
|
+
parameters: tool.parameters
|
|
23757
|
+
}
|
|
23758
|
+
}));
|
|
23759
|
+
}
|
|
23735
23760
|
const response2 = await client2.chat.completions.create({
|
|
23736
23761
|
model,
|
|
23737
23762
|
messages,
|
|
23738
23763
|
temperature: options.temperature,
|
|
23739
23764
|
max_tokens: options.maxTokens,
|
|
23740
|
-
response_format: request2.responseFormat
|
|
23765
|
+
response_format: request2.responseFormat,
|
|
23766
|
+
...openaiTools ? { tools: openaiTools } : {}
|
|
23741
23767
|
});
|
|
23742
23768
|
const choice = response2.choices[0];
|
|
23743
23769
|
return {
|
|
@@ -23760,6 +23786,129 @@ class OpenAIProvider {
|
|
|
23760
23786
|
throw createSafeError(error, { provider: "openai" });
|
|
23761
23787
|
}
|
|
23762
23788
|
}
|
|
23789
|
+
/**
|
|
23790
|
+
* Execute a request with streaming response
|
|
23791
|
+
*/
|
|
23792
|
+
async *executeStream(request2, options = {}) {
|
|
23793
|
+
const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
|
|
23794
|
+
if (!apiKey) {
|
|
23795
|
+
throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable.");
|
|
23796
|
+
}
|
|
23797
|
+
const validation = redactor.validateKey(apiKey, "openai");
|
|
23798
|
+
if (!validation.valid) {
|
|
23799
|
+
throw new Error("Invalid OpenAI API key format");
|
|
23800
|
+
}
|
|
23801
|
+
try {
|
|
23802
|
+
const client2 = new OpenAI({ apiKey });
|
|
23803
|
+
const model = options.model || request2.model || "gpt-4";
|
|
23804
|
+
const messages = request2.messages.map((msg) => {
|
|
23805
|
+
if (msg.role === "tool") {
|
|
23806
|
+
return {
|
|
23807
|
+
role: "tool",
|
|
23808
|
+
content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
|
|
23809
|
+
tool_call_id: msg.tool_call_id || ""
|
|
23810
|
+
};
|
|
23811
|
+
} else if (msg.role === "assistant" && msg.tool_calls) {
|
|
23812
|
+
return {
|
|
23813
|
+
role: "assistant",
|
|
23814
|
+
content: msg.content,
|
|
23815
|
+
tool_calls: msg.tool_calls
|
|
23816
|
+
};
|
|
23817
|
+
} else {
|
|
23818
|
+
const role = msg.role === "developer" ? "system" : msg.role;
|
|
23819
|
+
return {
|
|
23820
|
+
role,
|
|
23821
|
+
content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
|
|
23822
|
+
name: msg.name
|
|
23823
|
+
};
|
|
23824
|
+
}
|
|
23825
|
+
});
|
|
23826
|
+
let openaiTools;
|
|
23827
|
+
if (request2.tools && request2.tools.length > 0) {
|
|
23828
|
+
openaiTools = request2.tools.map((tool) => ({
|
|
23829
|
+
type: "function",
|
|
23830
|
+
function: {
|
|
23831
|
+
name: tool.name,
|
|
23832
|
+
description: tool.description,
|
|
23833
|
+
parameters: tool.parameters
|
|
23834
|
+
}
|
|
23835
|
+
}));
|
|
23836
|
+
}
|
|
23837
|
+
const stream = await client2.chat.completions.create({
|
|
23838
|
+
model,
|
|
23839
|
+
messages,
|
|
23840
|
+
temperature: options.temperature,
|
|
23841
|
+
max_tokens: options.maxTokens,
|
|
23842
|
+
stream: true,
|
|
23843
|
+
stream_options: { include_usage: true },
|
|
23844
|
+
...openaiTools ? { tools: openaiTools } : {}
|
|
23845
|
+
});
|
|
23846
|
+
const toolCallsInProgress = /* @__PURE__ */ new Map();
|
|
23847
|
+
for await (const chunk of stream) {
|
|
23848
|
+
const delta = chunk.choices[0]?.delta;
|
|
23849
|
+
if (delta?.content) {
|
|
23850
|
+
yield { type: "text", text: delta.content };
|
|
23851
|
+
}
|
|
23852
|
+
if (delta?.tool_calls) {
|
|
23853
|
+
for (const tc of delta.tool_calls) {
|
|
23854
|
+
const index = tc.index;
|
|
23855
|
+
if (tc.id) {
|
|
23856
|
+
toolCallsInProgress.set(index, {
|
|
23857
|
+
id: tc.id,
|
|
23858
|
+
name: tc.function?.name || "",
|
|
23859
|
+
arguments: ""
|
|
23860
|
+
});
|
|
23861
|
+
yield {
|
|
23862
|
+
type: "tool_call_start",
|
|
23863
|
+
toolCall: {
|
|
23864
|
+
id: tc.id,
|
|
23865
|
+
index,
|
|
23866
|
+
name: tc.function?.name
|
|
23867
|
+
}
|
|
23868
|
+
};
|
|
23869
|
+
}
|
|
23870
|
+
if (tc.function?.arguments) {
|
|
23871
|
+
const toolCall = toolCallsInProgress.get(index);
|
|
23872
|
+
if (toolCall) {
|
|
23873
|
+
toolCall.arguments += tc.function.arguments;
|
|
23874
|
+
yield {
|
|
23875
|
+
type: "tool_call_delta",
|
|
23876
|
+
toolCall: {
|
|
23877
|
+
index,
|
|
23878
|
+
argumentsDelta: tc.function.arguments
|
|
23879
|
+
}
|
|
23880
|
+
};
|
|
23881
|
+
}
|
|
23882
|
+
}
|
|
23883
|
+
}
|
|
23884
|
+
}
|
|
23885
|
+
if (chunk.choices[0]?.finish_reason === "tool_calls") {
|
|
23886
|
+
for (const [index, toolCall] of toolCallsInProgress) {
|
|
23887
|
+
yield {
|
|
23888
|
+
type: "tool_call_end",
|
|
23889
|
+
toolCall: {
|
|
23890
|
+
id: toolCall.id,
|
|
23891
|
+
index,
|
|
23892
|
+
name: toolCall.name
|
|
23893
|
+
}
|
|
23894
|
+
};
|
|
23895
|
+
}
|
|
23896
|
+
}
|
|
23897
|
+
if (chunk.usage) {
|
|
23898
|
+
yield {
|
|
23899
|
+
type: "usage",
|
|
23900
|
+
usage: {
|
|
23901
|
+
inputTokens: chunk.usage.prompt_tokens,
|
|
23902
|
+
outputTokens: chunk.usage.completion_tokens
|
|
23903
|
+
}
|
|
23904
|
+
};
|
|
23905
|
+
}
|
|
23906
|
+
}
|
|
23907
|
+
yield { type: "done" };
|
|
23908
|
+
} catch (error) {
|
|
23909
|
+
throw createSafeError(error, { provider: "openai" });
|
|
23910
|
+
}
|
|
23911
|
+
}
|
|
23763
23912
|
}
|
|
23764
23913
|
function createOpenAIProvider() {
|
|
23765
23914
|
return new OpenAIProvider();
|