art-framework 0.3.4 → 0.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -4126,10 +4126,24 @@ declare class GeminiAdapter implements ProviderAdapter {
4126
4126
  *
4127
4127
  * Handles both streaming and non-streaming requests based on `options.stream`.
4128
4128
  *
4129
+ * Thinking tokens (Gemini):
4130
+ * - On supported Gemini models (e.g., `gemini-2.5-*`), you can enable thought output via `config.thinkingConfig`.
4131
+ * - This adapter reads provider-specific flags from the call options:
4132
+ * - `options.gemini.thinking.includeThoughts: boolean` — when `true`, requests thought (reasoning) output.
4133
+ * - `options.gemini.thinking.thinkingBudget?: number` — optional token budget for thinking.
4134
+ * - When enabled and supported, the adapter will attempt to differentiate thought vs response parts and set
4135
+ * `StreamEvent.tokenType` accordingly:
4136
+ * - For planning calls (`callContext === 'AGENT_THOUGHT'`): `AGENT_THOUGHT_LLM_THINKING` or `AGENT_THOUGHT_LLM_RESPONSE`.
4137
+ * - For synthesis calls (`callContext === 'FINAL_SYNTHESIS'`): `FINAL_SYNTHESIS_LLM_THINKING` or `FINAL_SYNTHESIS_LLM_RESPONSE`.
4138
+ * - `LLMMetadata.thinkingTokens` will be populated if the provider reports separate thinking token usage.
4139
+ * - If the SDK/model does not expose thought parts, the adapter falls back to labeling tokens as `...LLM_RESPONSE`.
4140
+ *
4129
4141
  * @param {ArtStandardPrompt} prompt - The standardized prompt messages.
4130
4142
  * @param {CallOptions} options - Options for the LLM call, including streaming preference, model override, and execution context.
4131
4143
  * @returns {Promise<AsyncIterable<StreamEvent>>} An async iterable that yields `StreamEvent` objects.
4132
4144
  * - `TOKEN`: Contains a chunk of the response text. `tokenType` indicates if it's part of agent thought or final synthesis.
4145
+ * When Gemini thinking is enabled and available, `tokenType` may be one of the `...LLM_THINKING` or
4146
+ * `...LLM_RESPONSE` variants to separate thought vs response tokens.
4133
4147
  * - `METADATA`: Contains information like stop reason, token counts, and timing, yielded once at the end.
4134
4148
  * - `ERROR`: Contains any error encountered during translation, SDK call, or response processing.
4135
4149
  * - `END`: Signals the completion of the stream.
@@ -4138,6 +4152,23 @@ declare class GeminiAdapter implements ProviderAdapter {
4138
4152
  * @see {StreamEvent}
4139
4153
  * @see {LLMMetadata}
4140
4154
  * @see https://ai.google.dev/api/rest/v1beta/models/generateContent
4155
+ *
4156
+ * @example
4157
+ * // Enable Gemini thinking (if supported by the selected model)
4158
+ * const stream = await geminiAdapter.call(prompt, {
4159
+ * threadId,
4160
+ * stream: true,
4161
+ * callContext: 'FINAL_SYNTHESIS',
4162
+ * providerConfig, // your RuntimeProviderConfig
4163
+ * gemini: {
4164
+ * thinking: { includeThoughts: true, thinkingBudget: 8096 }
4165
+ * }
4166
+ * });
4167
+ * for await (const evt of stream) {
4168
+ * if (evt.type === 'TOKEN') {
4169
+ * // evt.tokenType may be FINAL_SYNTHESIS_LLM_THINKING or FINAL_SYNTHESIS_LLM_RESPONSE
4170
+ * }
4171
+ * }
4141
4172
  */
4142
4173
  call(prompt: ArtStandardPrompt, options: CallOptions): Promise<AsyncIterable<StreamEvent>>;
4143
4174
  /**
@@ -5205,6 +5236,6 @@ declare const generateUUID: () => string;
5205
5236
  /**
5206
5237
  * The current version of the ART Framework package.
5207
5238
  */
5208
- declare const VERSION = "0.3.3";
5239
+ declare const VERSION = "0.3.5";
5209
5240
 
5210
5241
  export { type A2AAgentInfo, type A2ATask, type A2ATaskEvent, type A2ATaskFilter, type A2ATaskMetadata, A2ATaskPriority, type A2ATaskResult, A2ATaskSocket, A2ATaskStatus, ARTError, AdapterInstantiationError, type AgentDiscoveryConfig, AgentDiscoveryService, type AgentFinalResponse, type AgentOptions, type AgentPersona, type AgentProps, type AgentState, AnthropicAdapter, type AnthropicAdapterOptions, ApiKeyStrategy, ApiQueueTimeoutError, type ArtInstance, type ArtInstanceConfig, type ArtStandardMessage, type ArtStandardMessageRole, ArtStandardMessageSchema, type ArtStandardPrompt, ArtStandardPromptSchema, AuthManager, type AvailableProviderEntry, CalculatorTool, type CallOptions, type ConversationManager, type ConversationMessage, ConversationSocket, type CreateA2ATaskRequest, DeepSeekAdapter, type DeepSeekAdapterOptions, ErrorCode, type ExecutionContext, type ExecutionMetadata, type FilterOptions, type FormattedPrompt, GeminiAdapter, type GeminiAdapterOptions, GenericOAuthStrategy, type IA2ATaskRepository, type IAgentCore, type IAuthStrategy, type IConversationRepository, type IObservationRepository, type IProviderManager, type IStateRepository, type IToolExecutor, type ITypedSocket, InMemoryStorageAdapter, IndexedDBStorageAdapter, type JsonObjectSchema, type JsonSchema, type LLMMetadata, LLMStreamSocket, LocalInstanceBusyError, LocalProviderConflictError, LogLevel, Logger, type LoggerConfig, type ManagedAdapterAccessor, McpClientController, McpManager, type McpManagerConfig, McpProxyTool, type McpResource, type McpResourceTemplate, type McpServerConfig, type McpServerStatus, type McpToolDefinition, type MessageOptions, MessageRole, ModelCapability, type OAuthConfig, type Observation, type ObservationFilter, type ObservationManager, ObservationSocket, ObservationType, OllamaAdapter, type OllamaAdapterOptions, OpenAIAdapter, type OpenAIAdapterOptions, OpenRouterAdapter, type OpenRouterAdapterOptions, type OutputParser, PESAgent, type PKCEOAuthConfig, PKCEOAuthStrategy, type ParsedToolCall, type PromptBlueprint, type PromptContext, type PromptManager, type ProviderAdapter, type ProviderManagerConfig, ProviderManagerImpl, type ReasoningEngine, type RuntimeProviderConfig, type StageSpecificPrompts, StateManager, type StateSavingStrategy, type StorageAdapter, type StreamEvent, type StreamEventTypeFilter, SupabaseStorageAdapter, type SystemPromptMergeStrategy, type SystemPromptOverride, type SystemPromptResolver, type SystemPromptSpec, type SystemPromptsRegistry, type TaskDelegationConfig, TaskDelegationService, type TaskStatusResponse, type ThreadConfig, type ThreadContext, ToolRegistry, type ToolResult, type ToolSchema, type ToolSystem, TypedSocket, UISystem, UnknownProviderError, type UnsubscribeFunction, type UpdateA2ATaskRequest, VERSION, type ZyntopiaOAuthConfig, ZyntopiaOAuthStrategy, createArtInstance, generateUUID };
package/dist/index.d.ts CHANGED
@@ -4126,10 +4126,24 @@ declare class GeminiAdapter implements ProviderAdapter {
4126
4126
  *
4127
4127
  * Handles both streaming and non-streaming requests based on `options.stream`.
4128
4128
  *
4129
+ * Thinking tokens (Gemini):
4130
+ * - On supported Gemini models (e.g., `gemini-2.5-*`), you can enable thought output via `config.thinkingConfig`.
4131
+ * - This adapter reads provider-specific flags from the call options:
4132
+ * - `options.gemini.thinking.includeThoughts: boolean` — when `true`, requests thought (reasoning) output.
4133
+ * - `options.gemini.thinking.thinkingBudget?: number` — optional token budget for thinking.
4134
+ * - When enabled and supported, the adapter will attempt to differentiate thought vs response parts and set
4135
+ * `StreamEvent.tokenType` accordingly:
4136
+ * - For planning calls (`callContext === 'AGENT_THOUGHT'`): `AGENT_THOUGHT_LLM_THINKING` or `AGENT_THOUGHT_LLM_RESPONSE`.
4137
+ * - For synthesis calls (`callContext === 'FINAL_SYNTHESIS'`): `FINAL_SYNTHESIS_LLM_THINKING` or `FINAL_SYNTHESIS_LLM_RESPONSE`.
4138
+ * - `LLMMetadata.thinkingTokens` will be populated if the provider reports separate thinking token usage.
4139
+ * - If the SDK/model does not expose thought parts, the adapter falls back to labeling tokens as `...LLM_RESPONSE`.
4140
+ *
4129
4141
  * @param {ArtStandardPrompt} prompt - The standardized prompt messages.
4130
4142
  * @param {CallOptions} options - Options for the LLM call, including streaming preference, model override, and execution context.
4131
4143
  * @returns {Promise<AsyncIterable<StreamEvent>>} An async iterable that yields `StreamEvent` objects.
4132
4144
  * - `TOKEN`: Contains a chunk of the response text. `tokenType` indicates if it's part of agent thought or final synthesis.
4145
+ * When Gemini thinking is enabled and available, `tokenType` may be one of the `...LLM_THINKING` or
4146
+ * `...LLM_RESPONSE` variants to separate thought vs response tokens.
4133
4147
  * - `METADATA`: Contains information like stop reason, token counts, and timing, yielded once at the end.
4134
4148
  * - `ERROR`: Contains any error encountered during translation, SDK call, or response processing.
4135
4149
  * - `END`: Signals the completion of the stream.
@@ -4138,6 +4152,23 @@ declare class GeminiAdapter implements ProviderAdapter {
4138
4152
  * @see {StreamEvent}
4139
4153
  * @see {LLMMetadata}
4140
4154
  * @see https://ai.google.dev/api/rest/v1beta/models/generateContent
4155
+ *
4156
+ * @example
4157
+ * // Enable Gemini thinking (if supported by the selected model)
4158
+ * const stream = await geminiAdapter.call(prompt, {
4159
+ * threadId,
4160
+ * stream: true,
4161
+ * callContext: 'FINAL_SYNTHESIS',
4162
+ * providerConfig, // your RuntimeProviderConfig
4163
+ * gemini: {
4164
+ * thinking: { includeThoughts: true, thinkingBudget: 8096 }
4165
+ * }
4166
+ * });
4167
+ * for await (const evt of stream) {
4168
+ * if (evt.type === 'TOKEN') {
4169
+ * // evt.tokenType may be FINAL_SYNTHESIS_LLM_THINKING or FINAL_SYNTHESIS_LLM_RESPONSE
4170
+ * }
4171
+ * }
4141
4172
  */
4142
4173
  call(prompt: ArtStandardPrompt, options: CallOptions): Promise<AsyncIterable<StreamEvent>>;
4143
4174
  /**
@@ -5205,6 +5236,6 @@ declare const generateUUID: () => string;
5205
5236
  /**
5206
5237
  * The current version of the ART Framework package.
5207
5238
  */
5208
- declare const VERSION = "0.3.3";
5239
+ declare const VERSION = "0.3.5";
5209
5240
 
5210
5241
  export { type A2AAgentInfo, type A2ATask, type A2ATaskEvent, type A2ATaskFilter, type A2ATaskMetadata, A2ATaskPriority, type A2ATaskResult, A2ATaskSocket, A2ATaskStatus, ARTError, AdapterInstantiationError, type AgentDiscoveryConfig, AgentDiscoveryService, type AgentFinalResponse, type AgentOptions, type AgentPersona, type AgentProps, type AgentState, AnthropicAdapter, type AnthropicAdapterOptions, ApiKeyStrategy, ApiQueueTimeoutError, type ArtInstance, type ArtInstanceConfig, type ArtStandardMessage, type ArtStandardMessageRole, ArtStandardMessageSchema, type ArtStandardPrompt, ArtStandardPromptSchema, AuthManager, type AvailableProviderEntry, CalculatorTool, type CallOptions, type ConversationManager, type ConversationMessage, ConversationSocket, type CreateA2ATaskRequest, DeepSeekAdapter, type DeepSeekAdapterOptions, ErrorCode, type ExecutionContext, type ExecutionMetadata, type FilterOptions, type FormattedPrompt, GeminiAdapter, type GeminiAdapterOptions, GenericOAuthStrategy, type IA2ATaskRepository, type IAgentCore, type IAuthStrategy, type IConversationRepository, type IObservationRepository, type IProviderManager, type IStateRepository, type IToolExecutor, type ITypedSocket, InMemoryStorageAdapter, IndexedDBStorageAdapter, type JsonObjectSchema, type JsonSchema, type LLMMetadata, LLMStreamSocket, LocalInstanceBusyError, LocalProviderConflictError, LogLevel, Logger, type LoggerConfig, type ManagedAdapterAccessor, McpClientController, McpManager, type McpManagerConfig, McpProxyTool, type McpResource, type McpResourceTemplate, type McpServerConfig, type McpServerStatus, type McpToolDefinition, type MessageOptions, MessageRole, ModelCapability, type OAuthConfig, type Observation, type ObservationFilter, type ObservationManager, ObservationSocket, ObservationType, OllamaAdapter, type OllamaAdapterOptions, OpenAIAdapter, type OpenAIAdapterOptions, OpenRouterAdapter, type OpenRouterAdapterOptions, type OutputParser, PESAgent, type PKCEOAuthConfig, PKCEOAuthStrategy, type ParsedToolCall, type PromptBlueprint, type PromptContext, type PromptManager, type ProviderAdapter, type ProviderManagerConfig, ProviderManagerImpl, type ReasoningEngine, type RuntimeProviderConfig, type StageSpecificPrompts, StateManager, type StateSavingStrategy, type StorageAdapter, type StreamEvent, type StreamEventTypeFilter, SupabaseStorageAdapter, type SystemPromptMergeStrategy, type SystemPromptOverride, type SystemPromptResolver, type SystemPromptSpec, type SystemPromptsRegistry, type TaskDelegationConfig, TaskDelegationService, type TaskStatusResponse, type ThreadConfig, type ThreadContext, ToolRegistry, type ToolResult, type ToolSchema, type ToolSystem, TypedSocket, UISystem, UnknownProviderError, type UnsubscribeFunction, type UpdateA2ATaskRequest, VERSION, type ZyntopiaOAuthConfig, ZyntopiaOAuthStrategy, createArtInstance, generateUUID };