@tigmart/ai-types 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,237 @@
1
+ /**
2
+ * Arbitrary key-value metadata that can be attached to any domain entity.
3
+ * Values are intentionally restricted to JSON-safe scalars so storage
4
+ * adapters can serialize without additional logic.
5
+ */
6
+ type Metadata = Record<string, string | number | boolean | null>;
7
+
8
+ /** The author role of a message in a conversation. */
9
+ type MessageRole = 'user' | 'assistant' | 'system' | 'tool';
10
+ /**
11
+ * Lifecycle status of a message.
12
+ *
13
+ * - pending — created locally, not yet sent or acknowledged
14
+ * - streaming — assistant is actively writing (partial content)
15
+ * - done — content is complete and stable
16
+ * - error — delivery or generation failed; see `Message.error`
17
+ */
18
+ type MessageStatus = 'pending' | 'streaming' | 'done' | 'error';
19
+ interface TextPart {
20
+ type: 'text';
21
+ text: string;
22
+ }
23
+ interface ImagePart {
24
+ type: 'image';
25
+ /** Publicly accessible URL or a data: URI for inline images. */
26
+ url: string;
27
+ mimeType: string;
28
+ altText?: string;
29
+ }
30
+ interface FilePart {
31
+ type: 'file';
32
+ /** Publicly accessible URL to the uploaded file. */
33
+ url: string;
34
+ name: string;
35
+ mimeType: string;
36
+ /** File size in bytes. */
37
+ size: number;
38
+ }
39
+ /** A single piece of content within a message. */
40
+ type MessagePart = TextPart | ImagePart | FilePart;
41
+ interface Message {
42
+ id: string;
43
+ conversationId: string;
44
+ role: MessageRole;
45
+ /**
46
+ * Ordered list of content parts that make up this message.
47
+ * Most messages will have a single TextPart. Multi-modal messages
48
+ * may have an ImagePart or FilePart alongside text.
49
+ */
50
+ parts: MessagePart[];
51
+ status: MessageStatus;
52
+ /** Human-readable error description when status === 'error'. */
53
+ error?: string;
54
+ /** The model ID used to generate this message (assistant messages only). */
55
+ model?: string;
56
+ /** The provider ID used to generate this message (assistant messages only). */
57
+ providerId?: string;
58
+ /** Unix timestamp (ms) when the message was created. */
59
+ createdAt: number;
60
+ /** Unix timestamp (ms) when the message was last updated. */
61
+ updatedAt: number;
62
+ metadata: Metadata;
63
+ }
64
+
65
+ /**
66
+ * A conversation is a single chat thread.
67
+ * It belongs to an optional project and always has a provider + model assigned.
68
+ */
69
+ interface Conversation {
70
+ id: string;
71
+ /** Null means the conversation is not grouped under any project. */
72
+ projectId: string | null;
73
+ title: string;
74
+ /**
75
+ * Optional system prompt for this specific conversation.
76
+ * If the conversation belongs to a project, this overrides the
77
+ * project's defaultSystemPrompt when set.
78
+ */
79
+ systemPrompt?: string;
80
+ /** ID of the provider selected for this conversation. */
81
+ providerId: string;
82
+ /** ID of the model (scoped to the provider) selected for this conversation. */
83
+ modelId: string;
84
+ /** Unix timestamp (ms) when the conversation was created. */
85
+ createdAt: number;
86
+ /** Unix timestamp (ms) when the conversation was last updated. */
87
+ updatedAt: number;
88
+ metadata: Metadata;
89
+ }
90
+
91
+ /**
92
+ * A project groups one or more conversations under a shared context.
93
+ * Defaults set here are applied when creating a new conversation
94
+ * inside the project, but each conversation can override them.
95
+ */
96
+ interface Project {
97
+ id: string;
98
+ name: string;
99
+ description?: string;
100
+ /** Default provider ID applied to new conversations in this project. */
101
+ defaultProviderId?: string;
102
+ /** Default model ID applied to new conversations in this project. */
103
+ defaultModelId?: string;
104
+ /** Default system prompt applied to new conversations in this project. */
105
+ defaultSystemPrompt?: string;
106
+ /** Unix timestamp (ms) when the project was created. */
107
+ createdAt: number;
108
+ /** Unix timestamp (ms) when the project was last updated. */
109
+ updatedAt: number;
110
+ metadata: Metadata;
111
+ }
112
+
113
+ /**
114
+ * Describes what a specific model is capable of.
115
+ * Used by the UI to conditionally render controls (e.g. attach button)
116
+ * and by adapters to validate requests before sending.
117
+ */
118
+ interface ModelCapabilities {
119
+ streaming: boolean;
120
+ /** Accepts image parts in messages. */
121
+ images: boolean;
122
+ /** Supports tool/function calling. */
123
+ tools: boolean;
124
+ }
125
+ /** A model offered by a provider. */
126
+ interface Model {
127
+ id: string;
128
+ name: string;
129
+ /** Maximum combined input + output tokens. */
130
+ contextWindow: number;
131
+ capabilities: ModelCapabilities;
132
+ }
133
+ /**
134
+ * Static configuration describing a provider and its available models.
135
+ * This is the registration shape — not the runtime adapter.
136
+ */
137
+ interface ProviderConfig {
138
+ id: string;
139
+ name: string;
140
+ models: Model[];
141
+ }
142
+
143
+ /**
144
+ * Token consumption reported by the provider at the end of a stream.
145
+ * Fields are optional because not all providers return full breakdowns.
146
+ */
147
+ interface TokenUsage {
148
+ promptTokens: number;
149
+ completionTokens: number;
150
+ totalTokens: number;
151
+ }
152
+ /**
153
+ * A single chunk emitted by a provider adapter during streaming.
154
+ * Discriminated by `type`.
155
+ *
156
+ * - delta — partial text content to append to the current message
157
+ * - done — stream has ended cleanly; optional token usage provided
158
+ * - error — stream ended with an error from the provider
159
+ */
160
+ type StreamChunk = {
161
+ type: 'delta';
162
+ content: string;
163
+ } | {
164
+ type: 'done';
165
+ usage?: TokenUsage;
166
+ } | {
167
+ type: 'error';
168
+ error: string;
169
+ };
170
+ /** Options forwarded to the provider when starting a stream. */
171
+ interface StreamOptions {
172
+ /** AbortSignal to cancel an in-progress stream. */
173
+ signal?: AbortSignal;
174
+ maxTokens?: number;
175
+ temperature?: number;
176
+ }
177
+
178
+ /**
179
+ * Pluggable storage contract for all platform entities.
180
+ *
181
+ * Implementations must handle their own serialization.
182
+ * All operations are async to support both synchronous backends
183
+ * (e.g. localStorage wrapped in Promises) and remote backends.
184
+ *
185
+ * V1 ships with a LocalStorageAdapter.
186
+ * Later: IndexedDBAdapter, RESTAdapter.
187
+ */
188
+ interface ChatStorage {
189
+ getProjects(): Promise<Project[]>;
190
+ saveProject(project: Project): Promise<void>;
191
+ deleteProject(id: string): Promise<void>;
192
+ /**
193
+ * Returns all conversations, optionally filtered by project.
194
+ * Pass `null` to get conversations that are not in any project.
195
+ * Omit the argument to get all conversations regardless of project.
196
+ */
197
+ getConversations(projectId?: string | null): Promise<Conversation[]>;
198
+ saveConversation(conversation: Conversation): Promise<void>;
199
+ deleteConversation(id: string): Promise<void>;
200
+ /** Returns all messages for a conversation in insertion order. */
201
+ getMessages(conversationId: string): Promise<Message[]>;
202
+ /** Appends a new message. Throws if the message ID already exists. */
203
+ appendMessage(conversationId: string, message: Message): Promise<void>;
204
+ /** Replaces an existing message in full (used to apply streaming deltas). */
205
+ updateMessage(conversationId: string, message: Message): Promise<void>;
206
+ deleteMessage(conversationId: string, messageId: string): Promise<void>;
207
+ }
208
+
209
+ /**
210
+ * The contract every LLM provider adapter must implement.
211
+ * Implementations live in @topsoft/ai-core (server-side only).
212
+ *
213
+ * The adapter receives normalized Message objects and returns an
214
+ * AsyncIterable of StreamChunks. Mapping to/from provider-specific
215
+ * wire formats is the adapter's responsibility.
216
+ */
217
+ interface ProviderAdapter {
218
+ /** Unique identifier for the provider (e.g. 'openai', 'anthropic'). */
219
+ readonly id: string;
220
+ /** Human-readable display name (e.g. 'OpenAI'). */
221
+ readonly name: string;
222
+ /** All models this provider exposes. */
223
+ readonly models: Model[];
224
+ /**
225
+ * Start a streaming completion.
226
+ *
227
+ * @param messages Full conversation history in normalized format.
228
+ * @param modelId One of the model IDs from `this.models`.
229
+ * @param options Optional per-request controls.
230
+ * @returns AsyncIterable of StreamChunks. Callers must consume
231
+ * the iterable to completion or call signal.abort()
232
+ * to cancel early.
233
+ */
234
+ stream(messages: Message[], modelId: string, options?: StreamOptions): AsyncIterable<StreamChunk>;
235
+ }
236
+
237
+ export type { ChatStorage, Conversation, FilePart, ImagePart, Message, MessagePart, MessageRole, MessageStatus, Metadata, Model, ModelCapabilities, Project, ProviderAdapter, ProviderConfig, StreamChunk, StreamOptions, TextPart, TokenUsage };
@@ -0,0 +1,237 @@
1
+ /**
2
+ * Arbitrary key-value metadata that can be attached to any domain entity.
3
+ * Values are intentionally restricted to JSON-safe scalars so storage
4
+ * adapters can serialize without additional logic.
5
+ */
6
+ type Metadata = Record<string, string | number | boolean | null>;
7
+
8
+ /** The author role of a message in a conversation. */
9
+ type MessageRole = 'user' | 'assistant' | 'system' | 'tool';
10
+ /**
11
+ * Lifecycle status of a message.
12
+ *
13
+ * - pending — created locally, not yet sent or acknowledged
14
+ * - streaming — assistant is actively writing (partial content)
15
+ * - done — content is complete and stable
16
+ * - error — delivery or generation failed; see `Message.error`
17
+ */
18
+ type MessageStatus = 'pending' | 'streaming' | 'done' | 'error';
19
+ interface TextPart {
20
+ type: 'text';
21
+ text: string;
22
+ }
23
+ interface ImagePart {
24
+ type: 'image';
25
+ /** Publicly accessible URL or a data: URI for inline images. */
26
+ url: string;
27
+ mimeType: string;
28
+ altText?: string;
29
+ }
30
+ interface FilePart {
31
+ type: 'file';
32
+ /** Publicly accessible URL to the uploaded file. */
33
+ url: string;
34
+ name: string;
35
+ mimeType: string;
36
+ /** File size in bytes. */
37
+ size: number;
38
+ }
39
+ /** A single piece of content within a message. */
40
+ type MessagePart = TextPart | ImagePart | FilePart;
41
+ interface Message {
42
+ id: string;
43
+ conversationId: string;
44
+ role: MessageRole;
45
+ /**
46
+ * Ordered list of content parts that make up this message.
47
+ * Most messages will have a single TextPart. Multi-modal messages
48
+ * may have an ImagePart or FilePart alongside text.
49
+ */
50
+ parts: MessagePart[];
51
+ status: MessageStatus;
52
+ /** Human-readable error description when status === 'error'. */
53
+ error?: string;
54
+ /** The model ID used to generate this message (assistant messages only). */
55
+ model?: string;
56
+ /** The provider ID used to generate this message (assistant messages only). */
57
+ providerId?: string;
58
+ /** Unix timestamp (ms) when the message was created. */
59
+ createdAt: number;
60
+ /** Unix timestamp (ms) when the message was last updated. */
61
+ updatedAt: number;
62
+ metadata: Metadata;
63
+ }
64
+
65
+ /**
66
+ * A conversation is a single chat thread.
67
+ * It belongs to an optional project and always has a provider + model assigned.
68
+ */
69
+ interface Conversation {
70
+ id: string;
71
+ /** Null means the conversation is not grouped under any project. */
72
+ projectId: string | null;
73
+ title: string;
74
+ /**
75
+ * Optional system prompt for this specific conversation.
76
+ * If the conversation belongs to a project, this overrides the
77
+ * project's defaultSystemPrompt when set.
78
+ */
79
+ systemPrompt?: string;
80
+ /** ID of the provider selected for this conversation. */
81
+ providerId: string;
82
+ /** ID of the model (scoped to the provider) selected for this conversation. */
83
+ modelId: string;
84
+ /** Unix timestamp (ms) when the conversation was created. */
85
+ createdAt: number;
86
+ /** Unix timestamp (ms) when the conversation was last updated. */
87
+ updatedAt: number;
88
+ metadata: Metadata;
89
+ }
90
+
91
+ /**
92
+ * A project groups one or more conversations under a shared context.
93
+ * Defaults set here are applied when creating a new conversation
94
+ * inside the project, but each conversation can override them.
95
+ */
96
+ interface Project {
97
+ id: string;
98
+ name: string;
99
+ description?: string;
100
+ /** Default provider ID applied to new conversations in this project. */
101
+ defaultProviderId?: string;
102
+ /** Default model ID applied to new conversations in this project. */
103
+ defaultModelId?: string;
104
+ /** Default system prompt applied to new conversations in this project. */
105
+ defaultSystemPrompt?: string;
106
+ /** Unix timestamp (ms) when the project was created. */
107
+ createdAt: number;
108
+ /** Unix timestamp (ms) when the project was last updated. */
109
+ updatedAt: number;
110
+ metadata: Metadata;
111
+ }
112
+
113
+ /**
114
+ * Describes what a specific model is capable of.
115
+ * Used by the UI to conditionally render controls (e.g. attach button)
116
+ * and by adapters to validate requests before sending.
117
+ */
118
+ interface ModelCapabilities {
119
+ streaming: boolean;
120
+ /** Accepts image parts in messages. */
121
+ images: boolean;
122
+ /** Supports tool/function calling. */
123
+ tools: boolean;
124
+ }
125
+ /** A model offered by a provider. */
126
+ interface Model {
127
+ id: string;
128
+ name: string;
129
+ /** Maximum combined input + output tokens. */
130
+ contextWindow: number;
131
+ capabilities: ModelCapabilities;
132
+ }
133
+ /**
134
+ * Static configuration describing a provider and its available models.
135
+ * This is the registration shape — not the runtime adapter.
136
+ */
137
+ interface ProviderConfig {
138
+ id: string;
139
+ name: string;
140
+ models: Model[];
141
+ }
142
+
143
+ /**
144
+ * Token consumption reported by the provider at the end of a stream.
145
+ * Fields are optional because not all providers return full breakdowns.
146
+ */
147
+ interface TokenUsage {
148
+ promptTokens: number;
149
+ completionTokens: number;
150
+ totalTokens: number;
151
+ }
152
+ /**
153
+ * A single chunk emitted by a provider adapter during streaming.
154
+ * Discriminated by `type`.
155
+ *
156
+ * - delta — partial text content to append to the current message
157
+ * - done — stream has ended cleanly; optional token usage provided
158
+ * - error — stream ended with an error from the provider
159
+ */
160
+ type StreamChunk = {
161
+ type: 'delta';
162
+ content: string;
163
+ } | {
164
+ type: 'done';
165
+ usage?: TokenUsage;
166
+ } | {
167
+ type: 'error';
168
+ error: string;
169
+ };
170
+ /** Options forwarded to the provider when starting a stream. */
171
+ interface StreamOptions {
172
+ /** AbortSignal to cancel an in-progress stream. */
173
+ signal?: AbortSignal;
174
+ maxTokens?: number;
175
+ temperature?: number;
176
+ }
177
+
178
+ /**
179
+ * Pluggable storage contract for all platform entities.
180
+ *
181
+ * Implementations must handle their own serialization.
182
+ * All operations are async to support both synchronous backends
183
+ * (e.g. localStorage wrapped in Promises) and remote backends.
184
+ *
185
+ * V1 ships with a LocalStorageAdapter.
186
+ * Later: IndexedDBAdapter, RESTAdapter.
187
+ */
188
+ interface ChatStorage {
189
+ getProjects(): Promise<Project[]>;
190
+ saveProject(project: Project): Promise<void>;
191
+ deleteProject(id: string): Promise<void>;
192
+ /**
193
+ * Returns all conversations, optionally filtered by project.
194
+ * Pass `null` to get conversations that are not in any project.
195
+ * Omit the argument to get all conversations regardless of project.
196
+ */
197
+ getConversations(projectId?: string | null): Promise<Conversation[]>;
198
+ saveConversation(conversation: Conversation): Promise<void>;
199
+ deleteConversation(id: string): Promise<void>;
200
+ /** Returns all messages for a conversation in insertion order. */
201
+ getMessages(conversationId: string): Promise<Message[]>;
202
+ /** Appends a new message. Throws if the message ID already exists. */
203
+ appendMessage(conversationId: string, message: Message): Promise<void>;
204
+ /** Replaces an existing message in full (used to apply streaming deltas). */
205
+ updateMessage(conversationId: string, message: Message): Promise<void>;
206
+ deleteMessage(conversationId: string, messageId: string): Promise<void>;
207
+ }
208
+
209
+ /**
210
+ * The contract every LLM provider adapter must implement.
211
+ * Implementations live in @topsoft/ai-core (server-side only).
212
+ *
213
+ * The adapter receives normalized Message objects and returns an
214
+ * AsyncIterable of StreamChunks. Mapping to/from provider-specific
215
+ * wire formats is the adapter's responsibility.
216
+ */
217
+ interface ProviderAdapter {
218
+ /** Unique identifier for the provider (e.g. 'openai', 'anthropic'). */
219
+ readonly id: string;
220
+ /** Human-readable display name (e.g. 'OpenAI'). */
221
+ readonly name: string;
222
+ /** All models this provider exposes. */
223
+ readonly models: Model[];
224
+ /**
225
+ * Start a streaming completion.
226
+ *
227
+ * @param messages Full conversation history in normalized format.
228
+ * @param modelId One of the model IDs from `this.models`.
229
+ * @param options Optional per-request controls.
230
+ * @returns AsyncIterable of StreamChunks. Callers must consume
231
+ * the iterable to completion or call signal.abort()
232
+ * to cancel early.
233
+ */
234
+ stream(messages: Message[], modelId: string, options?: StreamOptions): AsyncIterable<StreamChunk>;
235
+ }
236
+
237
+ export type { ChatStorage, Conversation, FilePart, ImagePart, Message, MessagePart, MessageRole, MessageStatus, Metadata, Model, ModelCapabilities, Project, ProviderAdapter, ProviderConfig, StreamChunk, StreamOptions, TextPart, TokenUsage };
package/dist/index.js ADDED
@@ -0,0 +1,18 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __copyProps = (to, from, except, desc) => {
7
+ if (from && typeof from === "object" || typeof from === "function") {
8
+ for (let key of __getOwnPropNames(from))
9
+ if (!__hasOwnProp.call(to, key) && key !== except)
10
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
11
+ }
12
+ return to;
13
+ };
14
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
15
+
16
+ // src/index.ts
17
+ var index_exports = {};
18
+ module.exports = __toCommonJS(index_exports);
package/dist/index.mjs ADDED
File without changes
package/package.json ADDED
@@ -0,0 +1,29 @@
1
+ {
2
+ "name": "@tigmart/ai-types",
3
+ "version": "0.0.1",
4
+ "description": "Shared TypeScript types for the AI chat platform",
5
+ "main": "./dist/index.js",
6
+ "module": "./dist/index.mjs",
7
+ "types": "./dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.ts",
11
+ "import": "./dist/index.mjs",
12
+ "require": "./dist/index.js"
13
+ }
14
+ },
15
+ "files": ["dist"],
16
+ "license": "MIT",
17
+ "publishConfig": {
18
+ "access": "public"
19
+ },
20
+ "scripts": {
21
+ "build": "tsup",
22
+ "dev": "tsup --watch",
23
+ "typecheck": "tsc --noEmit"
24
+ },
25
+ "devDependencies": {
26
+ "tsup": "^8.3.0",
27
+ "typescript": "^5.7.0"
28
+ }
29
+ }