agent0-js 0.0.6 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -98,6 +98,24 @@ interface ModelOverrides {
98
98
  maxOutputTokens?: number; // Override max output tokens
99
99
  temperature?: number; // Override temperature
100
100
  maxStepCount?: number; // Override max step count
101
+ providerOptions?: ProviderOptions; // Provider-specific reasoning options
102
+ }
103
+
104
+ interface ProviderOptions {
105
+ openai?: {
106
+ reasoningEffort?: 'minimal' | 'low' | 'medium' | 'high';
107
+ reasoningSummary?: 'auto' | 'detailed';
108
+ };
109
+ xai?: {
110
+ reasoningEffort?: 'low' | 'medium' | 'high';
111
+ };
112
+ google?: {
113
+ thinkingConfig?: {
114
+ thinkingBudget?: number;
115
+ thinkingLevel?: 'low' | 'medium' | 'high';
116
+ includeThoughts?: boolean;
117
+ };
118
+ };
101
119
  }
102
120
  ```
103
121
 
@@ -278,6 +296,79 @@ async function runWithFallback(agentId: string, variables: Record<string, string
278
296
  }
279
297
  ```
280
298
 
299
+ ### Provider Options
300
+
301
+ The `providerOptions` option allows you to configure provider-specific reasoning and thinking behavior. Different providers have different options:
302
+
303
+ **OpenAI / Azure** - Use `reasoningEffort` to control how much reasoning the model does, and `reasoningSummary` to control whether the model returns its reasoning process:
304
+
305
+ ```typescript
306
+ const response = await client.generate({
307
+ agentId: 'agent_123',
308
+ overrides: {
309
+ providerOptions: {
310
+ openai: {
311
+ reasoningEffort: 'high', // 'minimal' | 'low' | 'medium' | 'high'
312
+ reasoningSummary: 'auto' // 'auto' | 'detailed' - controls reasoning output
313
+ }
314
+ }
315
+ }
316
+ });
317
+ ```
318
+
319
+ - `reasoningSummary: 'auto'` - Returns a condensed summary of the reasoning process
320
+ - `reasoningSummary: 'detailed'` - Returns more comprehensive reasoning
321
+ - When enabled, reasoning summaries appear in the stream as events with type `'reasoning'` and in non-streaming responses within the `reasoning` field
322
+
323
+ **xAI (Grok)** - Use `reasoningEffort` to control reasoning:
324
+
325
+ ```typescript
326
+ const response = await client.generate({
327
+ agentId: 'agent_123',
328
+ overrides: {
329
+ providerOptions: {
330
+ xai: {
331
+ reasoningEffort: 'high' // 'low' | 'medium' | 'high'
332
+ }
333
+ }
334
+ }
335
+ });
336
+ ```
337
+
338
+ **Google Generative AI / Google Vertex** - Use `thinkingConfig` to control thinking (use either `thinkingLevel` or `thinkingBudget`, not both):
339
+
340
+ ```typescript
341
+ // Using thinkingLevel (recommended for most cases)
342
+ const response = await client.generate({
343
+ agentId: 'agent_123',
344
+ overrides: {
345
+ providerOptions: {
346
+ google: {
347
+ thinkingConfig: {
348
+ thinkingLevel: 'high', // 'low' | 'medium' | 'high'
349
+ includeThoughts: true // Include thinking in response
350
+ }
351
+ }
352
+ }
353
+ }
354
+ });
355
+
356
+ // OR using thinkingBudget (for fine-grained control)
357
+ const response = await client.generate({
358
+ agentId: 'agent_123',
359
+ overrides: {
360
+ providerOptions: {
361
+ google: {
362
+ thinkingConfig: {
363
+ thinkingBudget: 8192, // Number of thinking tokens
364
+ includeThoughts: true
365
+ }
366
+ }
367
+ }
368
+ }
369
+ });
370
+ ```
371
+
281
372
  ### Extra Messages
282
373
 
283
374
  The `extraMessages` option allows you to programmatically append additional messages to the agent's prompt. These messages are used as-is without any variable substitution, making them ideal for:
package/dist/types.d.ts CHANGED
@@ -1,8 +1,23 @@
1
+ import type { GoogleGenerativeAIProviderOptions } from "@ai-sdk/google";
2
+ import type { OpenAIResponsesProviderOptions } from "@ai-sdk/openai";
3
+ import type { XaiProviderOptions } from "@ai-sdk/xai";
1
4
  import type { ModelMessage } from "ai";
2
5
  export interface Agent0Config {
3
6
  apiKey: string;
4
7
  baseUrl?: string;
5
8
  }
9
+ /**
10
+ * Provider-specific options for reasoning/thinking configuration.
11
+ * Each provider has its own format for controlling reasoning behavior.
12
+ */
13
+ export interface ProviderOptions {
14
+ /** OpenAI reasoning effort options */
15
+ openai?: OpenAIResponsesProviderOptions;
16
+ /** xAI reasoning effort options */
17
+ xai?: XaiProviderOptions;
18
+ /** Google/Vertex thinking configuration */
19
+ google?: GoogleGenerativeAIProviderOptions;
20
+ }
6
21
  /**
7
22
  * Model configuration overrides for runtime customization.
8
23
  * Allows downstream applications to implement load balancing, fallbacks, and dynamic model switching.
@@ -19,6 +34,8 @@ export interface ModelOverrides {
19
34
  temperature?: number;
20
35
  /** Override max step count */
21
36
  maxStepCount?: number;
37
+ /** Provider-specific options for reasoning/thinking configuration */
38
+ providerOptions?: ProviderOptions;
22
39
  }
23
40
  export interface RunOptions {
24
41
  agentId: string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "agent0-js",
3
- "version": "0.0.6",
3
+ "version": "0.0.7",
4
4
  "description": "TypeScript SDK for Agent0",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -17,6 +17,9 @@
17
17
  "typescript": "^5.0.0"
18
18
  },
19
19
  "dependencies": {
20
+ "@ai-sdk/google": "3.0.0-beta.69",
21
+ "@ai-sdk/openai": "3.0.0-beta.90",
22
+ "@ai-sdk/xai": "3.0.0-beta.54",
20
23
  "ai": "^5.0.98"
21
24
  },
22
25
  "scripts": {
package/src/types.ts CHANGED
@@ -1,8 +1,24 @@
1
+ import type { GoogleGenerativeAIProviderOptions } from "@ai-sdk/google";
2
+ import type { OpenAIResponsesProviderOptions } from "@ai-sdk/openai";
3
+ import type { XaiProviderOptions } from "@ai-sdk/xai";
1
4
  import type { ModelMessage } from "ai";
2
5
 
3
6
  export interface Agent0Config {
4
- apiKey: string;
5
- baseUrl?: string;
7
+ apiKey: string;
8
+ baseUrl?: string;
9
+ }
10
+
11
+ /**
12
+ * Provider-specific options for reasoning/thinking configuration.
13
+ * Each provider has its own format for controlling reasoning behavior.
14
+ */
15
+ export interface ProviderOptions {
16
+ /** OpenAI reasoning effort options */
17
+ openai?: OpenAIResponsesProviderOptions;
18
+ /** xAI reasoning effort options */
19
+ xai?: XaiProviderOptions;
20
+ /** Google/Vertex thinking configuration */
21
+ google?: GoogleGenerativeAIProviderOptions;
6
22
  }
7
23
 
8
24
  /**
@@ -10,29 +26,31 @@ export interface Agent0Config {
10
26
  * Allows downstream applications to implement load balancing, fallbacks, and dynamic model switching.
11
27
  */
12
28
  export interface ModelOverrides {
13
- /** Override the model provider and name */
14
- model?: {
15
- provider_id?: string;
16
- name?: string;
17
- };
18
- /** Override max output tokens */
19
- maxOutputTokens?: number;
20
- /** Override temperature */
21
- temperature?: number;
22
- /** Override max step count */
23
- maxStepCount?: number;
29
+ /** Override the model provider and name */
30
+ model?: {
31
+ provider_id?: string;
32
+ name?: string;
33
+ };
34
+ /** Override max output tokens */
35
+ maxOutputTokens?: number;
36
+ /** Override temperature */
37
+ temperature?: number;
38
+ /** Override max step count */
39
+ maxStepCount?: number;
40
+ /** Provider-specific options for reasoning/thinking configuration */
41
+ providerOptions?: ProviderOptions;
24
42
  }
25
43
 
26
44
  export interface RunOptions {
27
- agentId: string;
28
- variables?: Record<string, string>;
29
- /** Runtime model overrides for load balancing, fallbacks, etc. */
30
- overrides?: ModelOverrides;
31
- /** Extra messages to append to the agent's prompt (used as-is, no variable substitution) */
32
- extraMessages?: ModelMessage[];
45
+ agentId: string;
46
+ variables?: Record<string, string>;
47
+ /** Runtime model overrides for load balancing, fallbacks, etc. */
48
+ overrides?: ModelOverrides;
49
+ /** Extra messages to append to the agent's prompt (used as-is, no variable substitution) */
50
+ extraMessages?: ModelMessage[];
33
51
  }
34
52
 
35
53
  export interface GenerateResponse {
36
- messages: ModelMessage[];
37
- text: string;
54
+ messages: ModelMessage[];
55
+ text: string;
38
56
  }