@tuttiai/types 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.d.ts +56 -10
  2. package/package.json +1 -1
package/dist/index.d.ts CHANGED
@@ -50,15 +50,8 @@ interface TokenUsage {
50
50
  interface StreamChunk {
51
51
  type: "text" | "tool_use" | "usage";
52
52
  text?: string;
53
- tool?: {
54
- id: string;
55
- name: string;
56
- input: unknown;
57
- };
58
- usage?: {
59
- input_tokens: number;
60
- output_tokens: number;
61
- };
53
+ tool?: Omit<ToolUseBlock, "type">;
54
+ usage?: TokenUsage;
62
55
  stop_reason?: StopReason;
63
56
  }
64
57
  interface LLMProvider {
@@ -109,6 +102,38 @@ interface Voice {
109
102
  teardown?(): Promise<void>;
110
103
  }
111
104
 
105
+ /** Lifecycle hooks for agent runs, LLM calls, and tool executions. */
106
+
107
+ /** Context passed to every hook invocation. */
108
+ interface HookContext {
109
+ agent_name: string;
110
+ session_id: string;
111
+ turn: number;
112
+ metadata: Record<string, unknown>;
113
+ }
114
+ /**
115
+ * Lifecycle hooks for customizing agent behavior.
116
+ *
117
+ * Set on `ScoreConfig.hooks` (global) or `AgentConfig.hooks` (per-agent).
118
+ * Agent-level hooks merge with global hooks — both fire, agent-level first.
119
+ *
120
+ * Hook errors are caught and logged — they never crash the agent.
121
+ */
122
+ interface TuttiHooks {
123
+ /** Called before each LLM call. Return a modified request to alter it. */
124
+ beforeLLMCall?: (ctx: HookContext, request: ChatRequest) => Promise<ChatRequest>;
125
+ /** Called after each LLM response. */
126
+ afterLLMCall?: (ctx: HookContext, response: ChatResponse) => Promise<void>;
127
+ /** Called before each tool execution. Return false to block the call, or modified input. */
128
+ beforeToolCall?: (ctx: HookContext, tool: string, input: unknown) => Promise<boolean | unknown>;
129
+ /** Called after each tool execution. Return a modified result. */
130
+ afterToolCall?: (ctx: HookContext, tool: string, result: ToolResult) => Promise<ToolResult>;
131
+ /** Called when an agent run starts (before the first turn). */
132
+ beforeAgentRun?: (ctx: HookContext) => Promise<void>;
133
+ /** Called when an agent run finishes (after the last turn). */
134
+ afterAgentRun?: (ctx: HookContext, result: AgentResult) => Promise<void>;
135
+ }
136
+
112
137
  /** Agent configuration and result types. */
113
138
 
114
139
  interface BudgetConfig {
@@ -140,10 +165,14 @@ interface AgentConfig {
140
165
  semantic_memory?: AgentMemoryConfig;
141
166
  /** Enable token-by-token streaming (default: false). */
142
167
  streaming?: boolean;
168
+ /** Allow the agent to pause and ask the human for input (default: false). */
169
+ allow_human_input?: boolean;
143
170
  /** Agent IDs this agent can delegate to via the orchestrator. */
144
171
  delegates?: string[];
145
172
  /** Role in the orchestration — orchestrator receives input first. */
146
173
  role?: "orchestrator" | "specialist";
174
+ /** Agent-level lifecycle hooks — merged with global hooks from ScoreConfig. */
175
+ hooks?: TuttiHooks;
147
176
  }
148
177
  interface AgentResult {
149
178
  session_id: string;
@@ -179,6 +208,8 @@ interface ScoreConfig {
179
208
  memory?: MemoryConfig;
180
209
  /** OpenTelemetry tracing configuration. */
181
210
  telemetry?: TelemetryConfig;
211
+ /** Global lifecycle hooks — apply to all agents. */
212
+ hooks?: TuttiHooks;
182
213
  }
183
214
 
184
215
  /** Session types for conversation state management. */
@@ -268,10 +299,25 @@ type TuttiEvent = {
268
299
  type: "token:stream";
269
300
  agent_name: string;
270
301
  text: string;
302
+ } | {
303
+ type: "hitl:requested";
304
+ agent_name: string;
305
+ session_id: string;
306
+ question: string;
307
+ options?: string[];
308
+ } | {
309
+ type: "hitl:answered";
310
+ agent_name: string;
311
+ session_id: string;
312
+ answer: string;
313
+ } | {
314
+ type: "hitl:timeout";
315
+ agent_name: string;
316
+ session_id: string;
271
317
  };
272
318
  type TuttiEventType = TuttiEvent["type"];
273
319
  type TuttiEventHandler<T extends TuttiEventType = TuttiEventType> = (event: Extract<TuttiEvent, {
274
320
  type: T;
275
321
  }>) => void;
276
322
 
277
- export type { AgentConfig, AgentMemoryConfig, AgentResult, BudgetConfig, ChatMessage, ChatRequest, ChatResponse, ContentBlock, LLMProvider, MemoryConfig, Permission, ScoreConfig, Session, SessionStore, StopReason, StreamChunk, TelemetryConfig, TextBlock, TokenUsage, Tool, ToolContext, ToolDefinition, ToolMemoryHelpers, ToolResult, ToolResultBlock, ToolUseBlock, TuttiEvent, TuttiEventHandler, TuttiEventType, Voice, VoiceContext };
323
+ export type { AgentConfig, AgentMemoryConfig, AgentResult, BudgetConfig, ChatMessage, ChatRequest, ChatResponse, ContentBlock, HookContext, LLMProvider, MemoryConfig, Permission, ScoreConfig, Session, SessionStore, StopReason, StreamChunk, TelemetryConfig, TextBlock, TokenUsage, Tool, ToolContext, ToolDefinition, ToolMemoryHelpers, ToolResult, ToolResultBlock, ToolUseBlock, TuttiEvent, TuttiEventHandler, TuttiEventType, TuttiHooks, Voice, VoiceContext };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tuttiai/types",
3
- "version": "0.5.0",
3
+ "version": "0.6.0",
4
4
  "description": "Type definitions for the Tutti multi-agent orchestration framework",
5
5
  "type": "module",
6
6
  "exports": {