@openrouter/sdk 0.1.18 → 0.1.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,77 @@
1
+ import { OpenRouterCore } from "../core.js";
2
+ import { RequestOptions } from "../lib/sdks.js";
3
+ import { ResponseWrapper } from "../lib/response-wrapper.js";
4
+ import * as models from "../models/index.js";
5
+ import { EnhancedTool, MaxToolRounds } from "../lib/tool-types.js";
6
+ /**
7
+ * Get a response with multiple consumption patterns
8
+ *
9
+ * @remarks
10
+ * Creates a response using the OpenResponses API in streaming mode and returns
11
+ * a wrapper that allows consuming the response in multiple ways:
12
+ *
13
+ * - `await response.getMessage()` - Get the completed message (tools auto-executed)
14
+ * - `await response.getText()` - Get just the text content (tools auto-executed)
15
+ * - `for await (const delta of response.getTextStream())` - Stream text deltas
16
+ * - `for await (const delta of response.getReasoningStream())` - Stream reasoning deltas
17
+ * - `for await (const event of response.getToolStream())` - Stream tool events (incl. preliminary results)
18
+ * - `for await (const toolCall of response.getToolCallsStream())` - Stream structured tool calls
19
+ * - `await response.getToolCalls()` - Get all tool calls from completed response
20
+ * - `for await (const msg of response.getNewMessagesStream())` - Stream incremental message updates
21
+ * - `for await (const event of response.getFullResponsesStream())` - Stream all events (incl. tool preliminary)
22
+ * - `for await (const event of response.getFullChatStream())` - Stream in chat format (incl. tool preliminary)
23
+ *
24
+ * All consumption patterns can be used concurrently on the same response.
25
+ *
26
+ * @example
27
+ * ```typescript
28
+ * import { z } from 'zod';
29
+ *
30
+ * // Simple text extraction
31
+ * const response = openrouter.callModel({
32
+ * model: "openai/gpt-4",
33
+ * input: "Hello!"
34
+ * });
35
+ * const text = await response.getText();
36
+ * console.log(text);
37
+ *
38
+ * // With tools (automatic execution)
39
+ * const response = openrouter.callModel({
40
+ * model: "openai/gpt-4",
41
+ * input: "What's the weather in SF?",
42
+ * tools: [{
43
+ * type: "function",
44
+ * function: {
45
+ * name: "get_weather",
46
+ * description: "Get current weather",
47
+ * inputSchema: z.object({
48
+ * location: z.string()
49
+ * }),
50
+ * outputSchema: z.object({
51
+ * temperature: z.number(),
52
+ * description: z.string()
53
+ * }),
54
+ * execute: async (params) => {
55
+ * return { temperature: 72, description: "Sunny" };
56
+ * }
57
+ * }
58
+ * }],
59
+ * maxToolRounds: 5, // or function: (context: TurnContext) => boolean
60
+ * });
61
+ * const message = await response.getMessage(); // Tools auto-executed!
62
+ *
63
+ * // Stream with preliminary results
64
+ * for await (const event of response.getFullChatStream()) {
65
+ * if (event.type === "content.delta") {
66
+ * process.stdout.write(event.delta);
67
+ * } else if (event.type === "tool.preliminary_result") {
68
+ * console.log("Tool progress:", event.result);
69
+ * }
70
+ * }
71
+ * ```
72
+ */
73
+ export declare function callModel(client: OpenRouterCore, request: Omit<models.OpenResponsesRequest, "stream" | "tools"> & {
74
+ tools?: EnhancedTool[] | models.OpenResponsesRequest["tools"];
75
+ maxToolRounds?: MaxToolRounds;
76
+ }, options?: RequestOptions): ResponseWrapper;
77
+ //# sourceMappingURL=callModel.d.ts.map
@@ -0,0 +1,100 @@
1
+ import { ResponseWrapper } from "../lib/response-wrapper.js";
2
+ import { convertEnhancedToolsToAPIFormat } from "../lib/tool-executor.js";
3
+ /**
4
+ * Get a response with multiple consumption patterns
5
+ *
6
+ * @remarks
7
+ * Creates a response using the OpenResponses API in streaming mode and returns
8
+ * a wrapper that allows consuming the response in multiple ways:
9
+ *
10
+ * - `await response.getMessage()` - Get the completed message (tools auto-executed)
11
+ * - `await response.getText()` - Get just the text content (tools auto-executed)
12
+ * - `for await (const delta of response.getTextStream())` - Stream text deltas
13
+ * - `for await (const delta of response.getReasoningStream())` - Stream reasoning deltas
14
+ * - `for await (const event of response.getToolStream())` - Stream tool events (incl. preliminary results)
15
+ * - `for await (const toolCall of response.getToolCallsStream())` - Stream structured tool calls
16
+ * - `await response.getToolCalls()` - Get all tool calls from completed response
17
+ * - `for await (const msg of response.getNewMessagesStream())` - Stream incremental message updates
18
+ * - `for await (const event of response.getFullResponsesStream())` - Stream all events (incl. tool preliminary)
19
+ * - `for await (const event of response.getFullChatStream())` - Stream in chat format (incl. tool preliminary)
20
+ *
21
+ * All consumption patterns can be used concurrently on the same response.
22
+ *
23
+ * @example
24
+ * ```typescript
25
+ * import { z } from 'zod';
26
+ *
27
+ * // Simple text extraction
28
+ * const response = openrouter.callModel({
29
+ * model: "openai/gpt-4",
30
+ * input: "Hello!"
31
+ * });
32
+ * const text = await response.getText();
33
+ * console.log(text);
34
+ *
35
+ * // With tools (automatic execution)
36
+ * const response = openrouter.callModel({
37
+ * model: "openai/gpt-4",
38
+ * input: "What's the weather in SF?",
39
+ * tools: [{
40
+ * type: "function",
41
+ * function: {
42
+ * name: "get_weather",
43
+ * description: "Get current weather",
44
+ * inputSchema: z.object({
45
+ * location: z.string()
46
+ * }),
47
+ * outputSchema: z.object({
48
+ * temperature: z.number(),
49
+ * description: z.string()
50
+ * }),
51
+ * execute: async (params) => {
52
+ * return { temperature: 72, description: "Sunny" };
53
+ * }
54
+ * }
55
+ * }],
56
+ * maxToolRounds: 5, // or function: (context: TurnContext) => boolean
57
+ * });
58
+ * const message = await response.getMessage(); // Tools auto-executed!
59
+ *
60
+ * // Stream with preliminary results
61
+ * for await (const event of response.getFullChatStream()) {
62
+ * if (event.type === "content.delta") {
63
+ * process.stdout.write(event.delta);
64
+ * } else if (event.type === "tool.preliminary_result") {
65
+ * console.log("Tool progress:", event.result);
66
+ * }
67
+ * }
68
+ * ```
69
+ */
70
+ export function callModel(client, request, options) {
71
+ const { tools, maxToolRounds, ...apiRequest } = request;
72
+ // Separate enhanced tools from API tools
73
+ let isEnhancedTools = false;
74
+ if (tools && tools.length > 0) {
75
+ const firstTool = tools[0];
76
+ isEnhancedTools = "function" in firstTool && firstTool.function && "inputSchema" in firstTool.function;
77
+ }
78
+ const enhancedTools = isEnhancedTools ? tools : undefined;
79
+ // Convert enhanced tools to API format if provided, otherwise use tools as-is
80
+ const apiTools = enhancedTools ? convertEnhancedToolsToAPIFormat(enhancedTools) : tools;
81
+ // Build the request with converted tools
82
+ const finalRequest = {
83
+ ...apiRequest,
84
+ ...(apiTools && { tools: apiTools }),
85
+ };
86
+ const wrapperOptions = {
87
+ client,
88
+ request: finalRequest,
89
+ options: options ?? {},
90
+ };
91
+ // Only pass enhanced tools to wrapper (needed for auto-execution)
92
+ if (enhancedTools) {
93
+ wrapperOptions.tools = enhancedTools;
94
+ }
95
+ if (maxToolRounds !== undefined) {
96
+ wrapperOptions.maxToolRounds = maxToolRounds;
97
+ }
98
+ return new ResponseWrapper(wrapperOptions);
99
+ }
100
+ //# sourceMappingURL=callModel.js.map
@@ -45,8 +45,8 @@ export declare function serverURLFromOptions(options: SDKOptions): URL | null;
45
45
  export declare const SDK_METADATA: {
46
46
  readonly language: "typescript";
47
47
  readonly openapiDocVersion: "1.0.0";
48
- readonly sdkVersion: "0.1.18";
48
+ readonly sdkVersion: "0.1.23";
49
49
  readonly genVersion: "2.755.9";
50
- readonly userAgent: "speakeasy-sdk/typescript 0.1.18 2.755.9 1.0.0 @openrouter/sdk";
50
+ readonly userAgent: "speakeasy-sdk/typescript 0.1.23 2.755.9 1.0.0 @openrouter/sdk";
51
51
  };
52
52
  //# sourceMappingURL=config.d.ts.map
package/esm/lib/config.js CHANGED
@@ -25,8 +25,8 @@ export function serverURLFromOptions(options) {
25
25
  export const SDK_METADATA = {
26
26
  language: "typescript",
27
27
  openapiDocVersion: "1.0.0",
28
- sdkVersion: "0.1.18",
28
+ sdkVersion: "0.1.23",
29
29
  genVersion: "2.755.9",
30
- userAgent: "speakeasy-sdk/typescript 0.1.18 2.755.9 1.0.0 @openrouter/sdk",
30
+ userAgent: "speakeasy-sdk/typescript 0.1.23 2.755.9 1.0.0 @openrouter/sdk",
31
31
  };
32
32
  //# sourceMappingURL=config.js.map
@@ -0,0 +1,116 @@
1
+ import { OpenRouterCore } from "../core.js";
2
+ import { RequestOptions } from "./sdks.js";
3
+ import * as models from "../models/index.js";
4
+ import { EnhancedTool, ParsedToolCall, MaxToolRounds, EnhancedResponseStreamEvent, ToolStreamEvent, ChatStreamEvent } from "./tool-types.js";
5
+ export interface GetResponseOptions {
6
+ request: models.OpenResponsesRequest;
7
+ client: OpenRouterCore;
8
+ options?: RequestOptions;
9
+ tools?: EnhancedTool[];
10
+ maxToolRounds?: MaxToolRounds;
11
+ }
12
+ /**
13
+ * A wrapper around a streaming response that provides multiple consumption patterns.
14
+ *
15
+ * Allows consuming the response in multiple ways:
16
+ * - `await response.getMessage()` - Get the completed message
17
+ * - `await response.getText()` - Get just the text
18
+ * - `for await (const delta of response.getTextStream())` - Stream text deltas
19
+ * - `for await (const msg of response.getNewMessagesStream())` - Stream incremental message updates
20
+ * - `for await (const event of response.getFullResponsesStream())` - Stream all response events
21
+ *
22
+ * All consumption patterns can be used concurrently thanks to the underlying
23
+ * ReusableReadableStream implementation.
24
+ */
25
+ export declare class ResponseWrapper {
26
+ private reusableStream;
27
+ private streamPromise;
28
+ private messagePromise;
29
+ private textPromise;
30
+ private options;
31
+ private initPromise;
32
+ private toolExecutionPromise;
33
+ private finalResponse;
34
+ private preliminaryResults;
35
+ private allToolExecutionRounds;
36
+ constructor(options: GetResponseOptions);
37
+ /**
38
+ * Initialize the stream if not already started
39
+ * This is idempotent - multiple calls will return the same promise
40
+ */
41
+ private initStream;
42
+ /**
43
+ * Execute tools automatically if they are provided and have execute functions
44
+ * This is idempotent - multiple calls will return the same promise
45
+ */
46
+ private executeToolsIfNeeded;
47
+ /**
48
+ * Get the completed message from the response.
49
+ * This will consume the stream until completion, execute any tools, and extract the first message.
50
+ * Returns an AssistantMessage in chat format.
51
+ */
52
+ getMessage(): Promise<models.AssistantMessage>;
53
+ /**
54
+ * Get just the text content from the response.
55
+ * This will consume the stream until completion, execute any tools, and extract the text.
56
+ */
57
+ getText(): Promise<string>;
58
+ /**
59
+ * Stream all response events as they arrive.
60
+ * Multiple consumers can iterate over this stream concurrently.
61
+ * Includes preliminary tool result events after tool execution.
62
+ */
63
+ getFullResponsesStream(): AsyncIterableIterator<EnhancedResponseStreamEvent>;
64
+ /**
65
+ * Stream only text deltas as they arrive.
66
+ * This filters the full event stream to only yield text content.
67
+ */
68
+ getTextStream(): AsyncIterableIterator<string>;
69
+ /**
70
+ * Stream incremental message updates as content is added.
71
+ * Each iteration yields an updated version of the message with new content.
72
+ * Also yields ToolResponseMessages after tool execution completes.
73
+ * Returns AssistantMessage or ToolResponseMessage in chat format.
74
+ */
75
+ getNewMessagesStream(): AsyncIterableIterator<models.AssistantMessage | models.ToolResponseMessage>;
76
+ /**
77
+ * Stream only reasoning deltas as they arrive.
78
+ * This filters the full event stream to only yield reasoning content.
79
+ */
80
+ getReasoningStream(): AsyncIterableIterator<string>;
81
+ /**
82
+ * Stream tool call argument deltas and preliminary results.
83
+ * This filters the full event stream to yield:
84
+ * - Tool call argument deltas as { type: "delta", content: string }
85
+ * - Preliminary results as { type: "preliminary_result", toolCallId, result }
86
+ */
87
+ getToolStream(): AsyncIterableIterator<ToolStreamEvent>;
88
+ /**
89
+ * Stream events in chat format (compatibility layer).
90
+ * Note: This transforms responses API events into a chat-like format.
91
+ * Includes preliminary tool result events after tool execution.
92
+ *
93
+ * @remarks
94
+ * This is a compatibility method that attempts to transform the responses API
95
+ * stream into a format similar to the chat API. Due to differences in the APIs,
96
+ * this may not be a perfect mapping.
97
+ */
98
+ getFullChatStream(): AsyncIterableIterator<ChatStreamEvent>;
99
+ /**
100
+ * Get all tool calls from the completed response (before auto-execution).
101
+ * Note: If tools have execute functions, they will be automatically executed
102
+ * and this will return the tool calls from the initial response.
103
+ * Returns structured tool calls with parsed arguments.
104
+ */
105
+ getToolCalls(): Promise<ParsedToolCall[]>;
106
+ /**
107
+ * Stream structured tool call objects as they're completed.
108
+ * Each iteration yields a complete tool call with parsed arguments.
109
+ */
110
+ getToolCallsStream(): AsyncIterableIterator<ParsedToolCall>;
111
+ /**
112
+ * Cancel the underlying stream and all consumers
113
+ */
114
+ cancel(): Promise<void>;
115
+ }
116
+ //# sourceMappingURL=response-wrapper.d.ts.map