@yourgpt/llm-sdk 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/adapters/index.d.mts +23 -9
  2. package/dist/adapters/index.d.ts +23 -9
  3. package/dist/adapters/index.js.map +1 -1
  4. package/dist/adapters/index.mjs.map +1 -1
  5. package/dist/{base-D_FyHFKj.d.mts → base-CXNMfvXg.d.mts} +10 -2
  6. package/dist/{base-D_FyHFKj.d.ts → base-CXNMfvXg.d.ts} +10 -2
  7. package/dist/index.d.mts +33 -13
  8. package/dist/index.d.ts +33 -13
  9. package/dist/index.js.map +1 -1
  10. package/dist/index.mjs.map +1 -1
  11. package/dist/providers/anthropic/index.d.mts +2 -2
  12. package/dist/providers/anthropic/index.d.ts +2 -2
  13. package/dist/providers/anthropic/index.js.map +1 -1
  14. package/dist/providers/anthropic/index.mjs.map +1 -1
  15. package/dist/providers/azure/index.d.mts +2 -2
  16. package/dist/providers/azure/index.d.ts +2 -2
  17. package/dist/providers/azure/index.js.map +1 -1
  18. package/dist/providers/azure/index.mjs.map +1 -1
  19. package/dist/providers/google/index.d.mts +2 -2
  20. package/dist/providers/google/index.d.ts +2 -2
  21. package/dist/providers/google/index.js.map +1 -1
  22. package/dist/providers/google/index.mjs.map +1 -1
  23. package/dist/providers/ollama/index.d.mts +2 -2
  24. package/dist/providers/ollama/index.d.ts +2 -2
  25. package/dist/providers/ollama/index.js.map +1 -1
  26. package/dist/providers/ollama/index.mjs.map +1 -1
  27. package/dist/providers/openai/index.d.mts +2 -2
  28. package/dist/providers/openai/index.d.ts +2 -2
  29. package/dist/providers/openai/index.js.map +1 -1
  30. package/dist/providers/openai/index.mjs.map +1 -1
  31. package/dist/providers/xai/index.d.mts +2 -2
  32. package/dist/providers/xai/index.d.ts +2 -2
  33. package/dist/providers/xai/index.js.map +1 -1
  34. package/dist/providers/xai/index.mjs.map +1 -1
  35. package/dist/{types-BBCZ3Fxy.d.mts → types-B8rxpnYi.d.mts} +1 -1
  36. package/dist/{types-DcoCaVVC.d.ts → types-CrQftISG.d.ts} +1 -1
  37. package/package.json +2 -2
@@ -1,5 +1,13 @@
1
- import { Message, ActionDefinition, LLMConfig, StreamEvent, MessageAttachment } from '@yourgpt/copilot-sdk/core';
1
+ import { Message, ActionDefinition, StreamEvent, LLMConfig, MessageAttachment } from '@yourgpt/copilot-sdk/core';
2
2
 
3
+ /**
4
+ * Request-level LLM configuration overrides
5
+ */
6
+ interface RequestLLMConfig {
7
+ model?: string;
8
+ temperature?: number;
9
+ maxTokens?: number;
10
+ }
3
11
  /**
4
12
  * Chat completion request
5
13
  */
@@ -17,7 +25,7 @@ interface ChatCompletionRequest {
17
25
  /** System prompt */
18
26
  systemPrompt?: string;
19
27
  /** LLM configuration overrides */
20
- config?: Partial<LLMConfig>;
28
+ config?: RequestLLMConfig;
21
29
  /** Abort signal for cancellation */
22
30
  signal?: AbortSignal;
23
31
  }
@@ -1,5 +1,13 @@
1
- import { Message, ActionDefinition, LLMConfig, StreamEvent, MessageAttachment } from '@yourgpt/copilot-sdk/core';
1
+ import { Message, ActionDefinition, StreamEvent, LLMConfig, MessageAttachment } from '@yourgpt/copilot-sdk/core';
2
2
 
3
+ /**
4
+ * Request-level LLM configuration overrides
5
+ */
6
+ interface RequestLLMConfig {
7
+ model?: string;
8
+ temperature?: number;
9
+ maxTokens?: number;
10
+ }
3
11
  /**
4
12
  * Chat completion request
5
13
  */
@@ -17,7 +25,7 @@ interface ChatCompletionRequest {
17
25
  /** System prompt */
18
26
  systemPrompt?: string;
19
27
  /** LLM configuration overrides */
20
- config?: Partial<LLMConfig>;
28
+ config?: RequestLLMConfig;
21
29
  /** Abort signal for cancellation */
22
30
  signal?: AbortSignal;
23
31
  }
package/dist/index.d.mts CHANGED
@@ -5,12 +5,12 @@ export { createOpenAI, openai } from './providers/openai/index.mjs';
5
5
  export { anthropic, createAnthropic } from './providers/anthropic/index.mjs';
6
6
  export { createXAI, xai } from './providers/xai/index.mjs';
7
7
  export { createGoogle, google } from './providers/google/index.mjs';
8
- import { LLMConfig, ActionDefinition, ToolDefinition, AgentLoopConfig, KnowledgeBaseConfig, DoneEventMessage, StreamEvent, Message, AIProvider as AIProvider$1, ToolResponse } from '@yourgpt/copilot-sdk/core';
9
- export { ActionDefinition, AgentLoopConfig, LLMConfig, LLMProvider, Message, StreamEvent, ToolDefinition, ToolExecution, ToolLocation, ToolResponse, UnifiedToolCall, UnifiedToolResult } from '@yourgpt/copilot-sdk/core';
10
- import { A as AIProvider, P as ProviderCapabilities, d as ProviderFormatter } from './types-BBCZ3Fxy.mjs';
11
- export { c as AnthropicProviderConfig, e as AnthropicTool, g as AnthropicToolResult, f as AnthropicToolUse, a as AzureProviderConfig, B as BaseProviderConfig, l as GeminiFunctionCall, k as GeminiFunctionDeclaration, m as GeminiFunctionResponse, G as GoogleProviderConfig, O as OllamaProviderConfig, b as OpenAIProviderConfig, h as OpenAITool, i as OpenAIToolCall, j as OpenAIToolResult, X as XAIProviderConfig } from './types-BBCZ3Fxy.mjs';
12
- import { L as LLMAdapter } from './base-D_FyHFKj.mjs';
13
- export { A as AdapterFactory, C as ChatCompletionRequest } from './base-D_FyHFKj.mjs';
8
+ import { ActionDefinition, ToolDefinition, AgentLoopConfig, KnowledgeBaseConfig, DoneEventMessage, StreamEvent, Message, AIProvider as AIProvider$1, ToolResponse } from '@yourgpt/copilot-sdk/core';
9
+ export { ActionDefinition, AgentLoopConfig, LLMConfig, Message, StreamEvent, ToolDefinition, ToolExecution, ToolLocation, ToolResponse, UnifiedToolCall, UnifiedToolResult } from '@yourgpt/copilot-sdk/core';
10
+ import { A as AIProvider, P as ProviderCapabilities, d as ProviderFormatter } from './types-B8rxpnYi.mjs';
11
+ export { c as AnthropicProviderConfig, e as AnthropicTool, g as AnthropicToolResult, f as AnthropicToolUse, a as AzureProviderConfig, B as BaseProviderConfig, l as GeminiFunctionCall, k as GeminiFunctionDeclaration, m as GeminiFunctionResponse, G as GoogleProviderConfig, O as OllamaProviderConfig, b as OpenAIProviderConfig, h as OpenAITool, i as OpenAIToolCall, j as OpenAIToolResult, X as XAIProviderConfig } from './types-B8rxpnYi.mjs';
12
+ import { L as LLMAdapter } from './base-CXNMfvXg.mjs';
13
+ export { A as AdapterFactory, C as ChatCompletionRequest } from './base-CXNMfvXg.mjs';
14
14
  import * as hono from 'hono';
15
15
  import { Hono } from 'hono';
16
16
  export { AnthropicAdapter, AnthropicAdapterConfig, AzureAdapter, AzureAdapterConfig, GoogleAdapter, GoogleAdapterConfig, OllamaAdapter, OllamaAdapterConfig, OpenAIAdapter, OpenAIAdapterConfig, XAIAdapter, XAIAdapterConfig, createAnthropicAdapter, createAzureAdapter, createGoogleAdapter, createOllamaAdapter, createOpenAIAdapter, createXAIAdapter } from './adapters/index.mjs';
@@ -177,14 +177,33 @@ declare function formatToolsForGoogle(tools: Record<string, Tool>): Array<{
177
177
  }>;
178
178
  }>;
179
179
 
180
+ /**
181
+ * LLM provider type for server-side configuration
182
+ */
183
+ type LLMProvider = "openai" | "anthropic" | "google" | "ollama" | "xai" | "azure";
184
+ /**
185
+ * Server-side LLM configuration (complete config for runtime)
186
+ */
187
+ interface ServerLLMConfig {
188
+ /** LLM provider */
189
+ provider: LLMProvider;
190
+ /** API key for the provider */
191
+ apiKey: string;
192
+ /** Model name */
193
+ model?: string;
194
+ /** Base URL for custom/self-hosted models */
195
+ baseUrl?: string;
196
+ /** Temperature (0-2) */
197
+ temperature?: number;
198
+ /** Maximum tokens in response */
199
+ maxTokens?: number;
200
+ }
180
201
  /**
181
202
  * Runtime configuration with LLM config
182
203
  */
183
204
  interface RuntimeConfigWithLLM {
184
205
  /** LLM configuration */
185
- llm: LLMConfig & {
186
- apiKey: string;
187
- };
206
+ llm: ServerLLMConfig;
188
207
  /** Custom LLM adapter (overrides llm config) */
189
208
  adapter?: LLMAdapter;
190
209
  /** System prompt */
@@ -220,9 +239,7 @@ interface RuntimeConfigWithAdapter {
220
239
  /** Custom LLM adapter */
221
240
  adapter: LLMAdapter;
222
241
  /** LLM configuration (optional when adapter provided) */
223
- llm?: LLMConfig & {
224
- apiKey: string;
225
- };
242
+ llm?: ServerLLMConfig;
226
243
  /** System prompt */
227
244
  systemPrompt?: string;
228
245
  /** Available actions (legacy) */
@@ -312,7 +329,10 @@ interface ChatRequest {
312
329
  /** Bot ID (for cloud) */
313
330
  botId?: string;
314
331
  /** LLM config overrides */
315
- config?: Partial<LLMConfig>;
332
+ config?: {
333
+ temperature?: number;
334
+ maxTokens?: number;
335
+ };
316
336
  /** System prompt override */
317
337
  systemPrompt?: string;
318
338
  /** Actions from client (legacy) */
package/dist/index.d.ts CHANGED
@@ -5,12 +5,12 @@ export { createOpenAI, openai } from './providers/openai/index.js';
5
5
  export { anthropic, createAnthropic } from './providers/anthropic/index.js';
6
6
  export { createXAI, xai } from './providers/xai/index.js';
7
7
  export { createGoogle, google } from './providers/google/index.js';
8
- import { LLMConfig, ActionDefinition, ToolDefinition, AgentLoopConfig, KnowledgeBaseConfig, DoneEventMessage, StreamEvent, Message, AIProvider as AIProvider$1, ToolResponse } from '@yourgpt/copilot-sdk/core';
9
- export { ActionDefinition, AgentLoopConfig, LLMConfig, LLMProvider, Message, StreamEvent, ToolDefinition, ToolExecution, ToolLocation, ToolResponse, UnifiedToolCall, UnifiedToolResult } from '@yourgpt/copilot-sdk/core';
10
- import { A as AIProvider, P as ProviderCapabilities, d as ProviderFormatter } from './types-DcoCaVVC.js';
11
- export { c as AnthropicProviderConfig, e as AnthropicTool, g as AnthropicToolResult, f as AnthropicToolUse, a as AzureProviderConfig, B as BaseProviderConfig, l as GeminiFunctionCall, k as GeminiFunctionDeclaration, m as GeminiFunctionResponse, G as GoogleProviderConfig, O as OllamaProviderConfig, b as OpenAIProviderConfig, h as OpenAITool, i as OpenAIToolCall, j as OpenAIToolResult, X as XAIProviderConfig } from './types-DcoCaVVC.js';
12
- import { L as LLMAdapter } from './base-D_FyHFKj.js';
13
- export { A as AdapterFactory, C as ChatCompletionRequest } from './base-D_FyHFKj.js';
8
+ import { ActionDefinition, ToolDefinition, AgentLoopConfig, KnowledgeBaseConfig, DoneEventMessage, StreamEvent, Message, AIProvider as AIProvider$1, ToolResponse } from '@yourgpt/copilot-sdk/core';
9
+ export { ActionDefinition, AgentLoopConfig, LLMConfig, Message, StreamEvent, ToolDefinition, ToolExecution, ToolLocation, ToolResponse, UnifiedToolCall, UnifiedToolResult } from '@yourgpt/copilot-sdk/core';
10
+ import { A as AIProvider, P as ProviderCapabilities, d as ProviderFormatter } from './types-CrQftISG.js';
11
+ export { c as AnthropicProviderConfig, e as AnthropicTool, g as AnthropicToolResult, f as AnthropicToolUse, a as AzureProviderConfig, B as BaseProviderConfig, l as GeminiFunctionCall, k as GeminiFunctionDeclaration, m as GeminiFunctionResponse, G as GoogleProviderConfig, O as OllamaProviderConfig, b as OpenAIProviderConfig, h as OpenAITool, i as OpenAIToolCall, j as OpenAIToolResult, X as XAIProviderConfig } from './types-CrQftISG.js';
12
+ import { L as LLMAdapter } from './base-CXNMfvXg.js';
13
+ export { A as AdapterFactory, C as ChatCompletionRequest } from './base-CXNMfvXg.js';
14
14
  import * as hono from 'hono';
15
15
  import { Hono } from 'hono';
16
16
  export { AnthropicAdapter, AnthropicAdapterConfig, AzureAdapter, AzureAdapterConfig, GoogleAdapter, GoogleAdapterConfig, OllamaAdapter, OllamaAdapterConfig, OpenAIAdapter, OpenAIAdapterConfig, XAIAdapter, XAIAdapterConfig, createAnthropicAdapter, createAzureAdapter, createGoogleAdapter, createOllamaAdapter, createOpenAIAdapter, createXAIAdapter } from './adapters/index.js';
@@ -177,14 +177,33 @@ declare function formatToolsForGoogle(tools: Record<string, Tool>): Array<{
177
177
  }>;
178
178
  }>;
179
179
 
180
+ /**
181
+ * LLM provider type for server-side configuration
182
+ */
183
+ type LLMProvider = "openai" | "anthropic" | "google" | "ollama" | "xai" | "azure";
184
+ /**
185
+ * Server-side LLM configuration (complete config for runtime)
186
+ */
187
+ interface ServerLLMConfig {
188
+ /** LLM provider */
189
+ provider: LLMProvider;
190
+ /** API key for the provider */
191
+ apiKey: string;
192
+ /** Model name */
193
+ model?: string;
194
+ /** Base URL for custom/self-hosted models */
195
+ baseUrl?: string;
196
+ /** Temperature (0-2) */
197
+ temperature?: number;
198
+ /** Maximum tokens in response */
199
+ maxTokens?: number;
200
+ }
180
201
  /**
181
202
  * Runtime configuration with LLM config
182
203
  */
183
204
  interface RuntimeConfigWithLLM {
184
205
  /** LLM configuration */
185
- llm: LLMConfig & {
186
- apiKey: string;
187
- };
206
+ llm: ServerLLMConfig;
188
207
  /** Custom LLM adapter (overrides llm config) */
189
208
  adapter?: LLMAdapter;
190
209
  /** System prompt */
@@ -220,9 +239,7 @@ interface RuntimeConfigWithAdapter {
220
239
  /** Custom LLM adapter */
221
240
  adapter: LLMAdapter;
222
241
  /** LLM configuration (optional when adapter provided) */
223
- llm?: LLMConfig & {
224
- apiKey: string;
225
- };
242
+ llm?: ServerLLMConfig;
226
243
  /** System prompt */
227
244
  systemPrompt?: string;
228
245
  /** Available actions (legacy) */
@@ -312,7 +329,10 @@ interface ChatRequest {
312
329
  /** Bot ID (for cloud) */
313
330
  botId?: string;
314
331
  /** LLM config overrides */
315
- config?: Partial<LLMConfig>;
332
+ config?: {
333
+ temperature?: number;
334
+ maxTokens?: number;
335
+ };
316
336
  /** System prompt override */
317
337
  systemPrompt?: string;
318
338
  /** Actions from client (legacy) */