@quilltap/plugin-types 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,468 @@
1
+ /**
2
+ * Tool/Function calling types for Quilltap plugin development
3
+ *
4
+ * @module @quilltap/plugin-types/llm/tools
5
+ */
6
+ /**
7
+ * OpenAI-format tool definition
8
+ * Used as the universal baseline format for tool definitions
9
+ */
10
+ interface OpenAIToolDefinition {
11
+ /** Tool type - always 'function' for function calling */
12
+ type: 'function';
13
+ function: {
14
+ /** Name of the function */
15
+ name: string;
16
+ /** Description of what the function does */
17
+ description?: string;
18
+ /** Parameters schema in JSON Schema format */
19
+ parameters?: {
20
+ type: 'object';
21
+ properties: Record<string, unknown>;
22
+ required?: string[];
23
+ additionalProperties?: boolean;
24
+ };
25
+ /** Whether to use strict mode for parameters */
26
+ strict?: boolean;
27
+ };
28
+ }
29
+ /**
30
+ * Universal tool format (alias for OpenAI format)
31
+ * Used as the standard format across all providers
32
+ */
33
+ interface UniversalTool {
34
+ /** Tool type - always 'function' */
35
+ type: 'function';
36
+ function: {
37
+ /** Name of the tool/function */
38
+ name: string;
39
+ /** Description of what the tool does */
40
+ description: string;
41
+ /** Parameters schema in JSON Schema format */
42
+ parameters: {
43
+ type: 'object';
44
+ properties: Record<string, unknown>;
45
+ required: string[];
46
+ };
47
+ };
48
+ }
49
+ /**
50
+ * Anthropic-format tool definition
51
+ * Tool use format expected by Anthropic Claude models
52
+ */
53
+ interface AnthropicToolDefinition {
54
+ /** Name of the tool */
55
+ name: string;
56
+ /** Description of what the tool does */
57
+ description?: string;
58
+ /** Input schema in JSON Schema format */
59
+ input_schema: {
60
+ type: 'object';
61
+ properties: Record<string, unknown>;
62
+ required?: string[];
63
+ };
64
+ }
65
+ /**
66
+ * Google-format tool definition
67
+ * Function calling format expected by Google Gemini models
68
+ */
69
+ interface GoogleToolDefinition {
70
+ /** Name of the function */
71
+ name: string;
72
+ /** Description of what the function does */
73
+ description: string;
74
+ /** Parameters schema in JSON Schema format */
75
+ parameters: {
76
+ type: 'object';
77
+ properties: Record<string, unknown>;
78
+ required: string[];
79
+ };
80
+ }
81
+ /**
82
+ * Tool call from assistant response
83
+ * Represents a tool call made by the model
84
+ */
85
+ interface ToolCall {
86
+ /** Unique ID for this tool call */
87
+ id: string;
88
+ /** Type of tool call - always 'function' */
89
+ type: 'function';
90
+ function: {
91
+ /** Name of the function to call */
92
+ name: string;
93
+ /** Arguments as a JSON string */
94
+ arguments: string;
95
+ };
96
+ }
97
+ /**
98
+ * Parsed tool call request
99
+ * Used consistently across all providers after parsing
100
+ */
101
+ interface ToolCallRequest {
102
+ /** Name of the tool being called */
103
+ name: string;
104
+ /** Parsed arguments object */
105
+ arguments: Record<string, unknown>;
106
+ }
107
+ /**
108
+ * Tool result to send back to the model
109
+ */
110
+ interface ToolResult {
111
+ /** ID of the tool call this is responding to */
112
+ toolCallId: string;
113
+ /** Result content */
114
+ content: string;
115
+ /** Whether this result represents an error */
116
+ isError?: boolean;
117
+ }
118
+ /**
119
+ * Options for tool formatting operations
120
+ */
121
+ interface ToolFormatOptions {
122
+ /** Image provider type for context-aware formatting */
123
+ imageProviderType?: string;
124
+ /** Additional custom options */
125
+ [key: string]: unknown;
126
+ }
127
+
128
+ /**
129
+ * Core LLM types for Quilltap plugin development
130
+ *
131
+ * @module @quilltap/plugin-types/llm
132
+ */
133
+
134
+ /**
135
+ * File attachment for multimodal messages
136
+ */
137
+ interface FileAttachment {
138
+ /** Unique identifier for the attachment */
139
+ id: string;
140
+ /** Path to the file on disk (internal use) */
141
+ filepath?: string;
142
+ /** Original filename */
143
+ filename: string;
144
+ /** MIME type of the file */
145
+ mimeType: string;
146
+ /** File size in bytes */
147
+ size: number;
148
+ /** Base64 encoded data (loaded at send time) */
149
+ data?: string;
150
+ /** URL to fetch the file (alternative to data) */
151
+ url?: string;
152
+ /** Additional metadata */
153
+ metadata?: Record<string, unknown>;
154
+ }
155
+ /**
156
+ * Message in a conversation
157
+ */
158
+ interface LLMMessage {
159
+ /** Role of the message sender */
160
+ role: 'system' | 'user' | 'assistant' | 'tool';
161
+ /** Message content */
162
+ content: string;
163
+ /** Optional name for multi-character chats */
164
+ name?: string;
165
+ /** File attachments for this message */
166
+ attachments?: FileAttachment[];
167
+ /** Tool call ID (for tool role messages) */
168
+ toolCallId?: string;
169
+ /** Tool calls made by assistant */
170
+ toolCalls?: ToolCall[];
171
+ /** Cache control for prompt caching (Anthropic, Google) */
172
+ cacheControl?: {
173
+ type: 'ephemeral';
174
+ };
175
+ /** Google Gemini thought signature for thinking models */
176
+ thoughtSignature?: string;
177
+ }
178
+ /**
179
+ * JSON Schema definition for structured outputs
180
+ */
181
+ interface JSONSchemaDefinition {
182
+ /** Name of the schema */
183
+ name: string;
184
+ /** Whether to use strict mode */
185
+ strict?: boolean;
186
+ /** The JSON schema object */
187
+ schema: Record<string, unknown>;
188
+ }
189
+ /**
190
+ * Response format for structured outputs
191
+ */
192
+ interface ResponseFormat {
193
+ /** Output type */
194
+ type: 'text' | 'json_object' | 'json_schema';
195
+ /** JSON schema definition (when type is 'json_schema') */
196
+ jsonSchema?: JSONSchemaDefinition;
197
+ }
198
+ /**
199
+ * Parameters for LLM requests
200
+ */
201
+ interface LLMParams {
202
+ /** Array of messages in the conversation */
203
+ messages: LLMMessage[];
204
+ /** Model identifier */
205
+ model: string;
206
+ /** Sampling temperature (0-2) */
207
+ temperature?: number;
208
+ /** Maximum tokens to generate */
209
+ maxTokens?: number;
210
+ /** Nucleus sampling parameter */
211
+ topP?: number;
212
+ /** Stop sequences */
213
+ stop?: string | string[];
214
+ /** Tool definitions (provider-specific format) */
215
+ tools?: unknown[];
216
+ /** Tool choice configuration */
217
+ toolChoice?: 'auto' | 'none' | 'required' | {
218
+ type: 'function';
219
+ function: {
220
+ name: string;
221
+ };
222
+ };
223
+ /** Response format for structured outputs */
224
+ responseFormat?: ResponseFormat;
225
+ /** Seed for deterministic generation */
226
+ seed?: number;
227
+ /** User identifier for tracking */
228
+ user?: string;
229
+ /** Enable native web search capability */
230
+ webSearchEnabled?: boolean;
231
+ /** Provider-specific parameters from profile */
232
+ profileParameters?: Record<string, unknown>;
233
+ }
234
+ /**
235
+ * Token usage statistics
236
+ */
237
+ interface TokenUsage {
238
+ /** Tokens used for the prompt */
239
+ promptTokens: number;
240
+ /** Tokens used for the completion */
241
+ completionTokens: number;
242
+ /** Total tokens used */
243
+ totalTokens: number;
244
+ }
245
+ /**
246
+ * Cache usage statistics (OpenRouter, Anthropic)
247
+ */
248
+ interface CacheUsage {
249
+ /** Number of cached tokens */
250
+ cachedTokens?: number;
251
+ /** Cache discount amount */
252
+ cacheDiscount?: number;
253
+ /** Tokens used for cache creation */
254
+ cacheCreationInputTokens?: number;
255
+ /** Tokens read from cache */
256
+ cacheReadInputTokens?: number;
257
+ }
258
+ /**
259
+ * Attachment processing results
260
+ */
261
+ interface AttachmentResults {
262
+ /** IDs of attachments sent successfully */
263
+ sent: string[];
264
+ /** Attachments that failed with error details */
265
+ failed: Array<{
266
+ id: string;
267
+ error: string;
268
+ }>;
269
+ }
270
+ /**
271
+ * Response from LLM
272
+ */
273
+ interface LLMResponse {
274
+ /** Generated content */
275
+ content: string;
276
+ /** Reason generation stopped */
277
+ finishReason: string | null;
278
+ /** Token usage statistics */
279
+ usage: TokenUsage;
280
+ /** Provider-specific raw response */
281
+ raw?: unknown;
282
+ /** Tool calls made by the model */
283
+ toolCalls?: ToolCall[];
284
+ /** Results of attachment processing */
285
+ attachmentResults?: AttachmentResults;
286
+ /** Google Gemini thought signature */
287
+ thoughtSignature?: string;
288
+ /** Cache usage statistics */
289
+ cacheUsage?: CacheUsage;
290
+ }
291
+ /**
292
+ * Streaming chunk from LLM
293
+ */
294
+ interface StreamChunk {
295
+ /** Content in this chunk */
296
+ content: string;
297
+ /** Whether this is the final chunk */
298
+ done: boolean;
299
+ /** Token usage (typically on final chunk) */
300
+ usage?: TokenUsage;
301
+ /** Tool calls (typically on final chunk) */
302
+ toolCalls?: ToolCall[];
303
+ /** Attachment results (typically on final chunk) */
304
+ attachmentResults?: AttachmentResults;
305
+ /** Raw response for tool call detection */
306
+ rawResponse?: unknown;
307
+ /** Google Gemini thought signature */
308
+ thoughtSignature?: string;
309
+ /** Cache usage statistics */
310
+ cacheUsage?: CacheUsage;
311
+ }
312
+ /**
313
+ * Image generation parameters
314
+ */
315
+ interface ImageGenParams {
316
+ /** Image generation prompt */
317
+ prompt: string;
318
+ /** Negative prompt (what to avoid) */
319
+ negativePrompt?: string;
320
+ /** Model identifier */
321
+ model?: string;
322
+ /** Image size (e.g., '1024x1024') */
323
+ size?: string;
324
+ /** Aspect ratio (e.g., '16:9') */
325
+ aspectRatio?: string;
326
+ /** Image quality */
327
+ quality?: 'standard' | 'hd';
328
+ /** Image style */
329
+ style?: 'vivid' | 'natural';
330
+ /** Number of images to generate */
331
+ n?: number;
332
+ /** Response format */
333
+ responseFormat?: 'url' | 'b64_json';
334
+ /** Seed for reproducibility */
335
+ seed?: number;
336
+ /** Guidance scale for diffusion models */
337
+ guidanceScale?: number;
338
+ /** Inference steps for diffusion models */
339
+ steps?: number;
340
+ }
341
+ /**
342
+ * Generated image result
343
+ */
344
+ interface GeneratedImage {
345
+ /** Base64 encoded image data */
346
+ data?: string;
347
+ /** URL to the generated image */
348
+ url?: string;
349
+ /** Deprecated: use 'data' instead */
350
+ b64Json?: string;
351
+ /** Image MIME type */
352
+ mimeType?: string;
353
+ /** Revised prompt (some providers modify the prompt) */
354
+ revisedPrompt?: string;
355
+ /** Seed used for generation */
356
+ seed?: number;
357
+ }
358
+ /**
359
+ * Image generation response
360
+ */
361
+ interface ImageGenResponse {
362
+ /** Array of generated images */
363
+ images: GeneratedImage[];
364
+ /** Provider-specific raw response */
365
+ raw?: unknown;
366
+ }
367
+ /**
368
+ * Model warning level
369
+ */
370
+ type ModelWarningLevel = 'info' | 'warning' | 'error';
371
+ /**
372
+ * Model warning information
373
+ */
374
+ interface ModelWarning {
375
+ /** Warning severity level */
376
+ level: ModelWarningLevel;
377
+ /** Warning message */
378
+ message: string;
379
+ /** Optional link to documentation */
380
+ documentationUrl?: string;
381
+ }
382
+ /**
383
+ * Model metadata with warnings and capabilities
384
+ */
385
+ interface ModelMetadata {
386
+ /** Model identifier */
387
+ id: string;
388
+ /** Human-readable display name */
389
+ displayName?: string;
390
+ /** Warnings or recommendations */
391
+ warnings?: ModelWarning[];
392
+ /** Whether the model is deprecated */
393
+ deprecated?: boolean;
394
+ /** Whether the model is experimental/preview */
395
+ experimental?: boolean;
396
+ /** Capabilities this model lacks */
397
+ missingCapabilities?: string[];
398
+ /** Maximum output tokens */
399
+ maxOutputTokens?: number;
400
+ /** Context window size */
401
+ contextWindow?: number;
402
+ }
403
+ /**
404
+ * Core LLM provider interface
405
+ *
406
+ * Plugins can implement this interface to provide LLM functionality.
407
+ */
408
+ interface LLMProvider {
409
+ /** Whether this provider supports file attachments */
410
+ readonly supportsFileAttachments: boolean;
411
+ /** Supported MIME types for file attachments */
412
+ readonly supportedMimeTypes: string[];
413
+ /** Whether this provider supports image generation */
414
+ readonly supportsImageGeneration: boolean;
415
+ /** Whether this provider supports web search */
416
+ readonly supportsWebSearch: boolean;
417
+ /**
418
+ * Send a message and get a complete response
419
+ */
420
+ sendMessage(params: LLMParams, apiKey: string): Promise<LLMResponse>;
421
+ /**
422
+ * Send a message and stream the response
423
+ */
424
+ streamMessage(params: LLMParams, apiKey: string): AsyncGenerator<StreamChunk>;
425
+ /**
426
+ * Validate an API key
427
+ */
428
+ validateApiKey(apiKey: string): Promise<boolean>;
429
+ /**
430
+ * Get available models from the provider
431
+ */
432
+ getAvailableModels(apiKey: string): Promise<string[]>;
433
+ /**
434
+ * Generate an image (optional)
435
+ */
436
+ generateImage?(params: ImageGenParams, apiKey: string): Promise<ImageGenResponse>;
437
+ /**
438
+ * Get metadata for a specific model (optional)
439
+ */
440
+ getModelMetadata?(modelId: string): ModelMetadata | undefined;
441
+ /**
442
+ * Get metadata for all models with warnings (optional)
443
+ */
444
+ getModelsWithMetadata?(apiKey: string): Promise<ModelMetadata[]>;
445
+ }
446
+ /**
447
+ * Image generation provider interface
448
+ */
449
+ interface ImageGenProvider {
450
+ /** Provider identifier */
451
+ readonly provider: string;
452
+ /** Models supported by this provider */
453
+ readonly supportedModels: string[];
454
+ /**
455
+ * Generate an image
456
+ */
457
+ generateImage(params: ImageGenParams, apiKey: string): Promise<ImageGenResponse>;
458
+ /**
459
+ * Validate an API key
460
+ */
461
+ validateApiKey(apiKey: string): Promise<boolean>;
462
+ /**
463
+ * Get available models
464
+ */
465
+ getAvailableModels(apiKey?: string): Promise<string[]>;
466
+ }
467
+
468
+ export type { AnthropicToolDefinition, AttachmentResults, CacheUsage, FileAttachment, GeneratedImage, GoogleToolDefinition, ImageGenParams, ImageGenProvider, ImageGenResponse, JSONSchemaDefinition, LLMMessage, LLMParams, LLMProvider, LLMResponse, ModelMetadata, ModelWarning, ModelWarningLevel, OpenAIToolDefinition, ResponseFormat, StreamChunk, TokenUsage, ToolCall, ToolCallRequest, ToolFormatOptions, ToolResult, UniversalTool };