@spawnco/sdk-types 0.0.48 → 0.0.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -236,6 +236,30 @@ interface SpawnServerSDK__V1<TConfig = any> {
236
236
  * List all document names for this variant.
237
237
  */
238
238
  list(): Promise<string[]>;
239
+ /**
240
+ * Acquire a distributed lock on a key. Blocks until lock is acquired or timeout.
241
+ * Always use try/finally to ensure unlock() is called.
242
+ *
243
+ * @example
244
+ * await sdk.documents.lock("leaderboard");
245
+ * try {
246
+ * const data = await sdk.documents.get("leaderboard");
247
+ * // ... mutate data ...
248
+ * await sdk.documents.set("leaderboard", data);
249
+ * } finally {
250
+ * await sdk.documents.unlock("leaderboard");
251
+ * }
252
+ */
253
+ lock(key: string, options?: {
254
+ /** Auto-expire if worker crashes (default: 30000ms) */
255
+ ttlMs?: number;
256
+ /** Max time to wait for lock (default: 10000ms) */
257
+ waitMs?: number;
258
+ }): Promise<void>;
259
+ /**
260
+ * Release a distributed lock. Must be called after lock() in a finally block.
261
+ */
262
+ unlock(key: string): Promise<void>;
239
263
  };
240
264
  config: {
241
265
  get(): Promise<TConfig>;
@@ -316,6 +340,48 @@ interface SpawnServerSDK__V1<TConfig = any> {
316
340
  limit?: number;
317
341
  }): Promise<Record<string, LeaderboardEntry[]>>;
318
342
  };
343
+ /**
344
+ * LLM API for conversational AI with persistent memory.
345
+ * Used for NPC dialog, content generation, and dynamic gameplay.
346
+ */
347
+ llm: LlmApi;
348
+ }
349
+ type LlmModel = 'fast' | 'smart';
350
+ interface LlmChatOptions<T = void> {
351
+ /** Unique conversation ID, e.g., "npc:merlin:player:123" */
352
+ conversationId: string;
353
+ /** User's message to the AI */
354
+ message: string;
355
+ /** System prompt (only used on first message of conversation) */
356
+ system?: string;
357
+ /** JSON Schema for structured output (uses tool_use under the hood) */
358
+ schema?: T extends void ? never : object;
359
+ /** Max messages to keep in history (default: 20) */
360
+ maxHistory?: number;
361
+ /** Model selection: 'fast' (~500ms) or 'smart' (~2s) */
362
+ model?: LlmModel;
363
+ }
364
+ interface LlmChatResponse<T = void> {
365
+ /** Raw response text */
366
+ text: string;
367
+ /** Parsed object if schema was provided */
368
+ object: T extends void ? undefined : T;
369
+ /** Token usage for billing */
370
+ usage: {
371
+ inputTokens: number;
372
+ outputTokens: number;
373
+ };
374
+ }
375
+ interface LlmApi {
376
+ /**
377
+ * Conversational chat with persistent memory.
378
+ * Conversation state stored in variant documents automatically.
379
+ */
380
+ chat<T = void>(options: LlmChatOptions<T>): Promise<LlmChatResponse<T>>;
381
+ /**
382
+ * Clear conversation history (e.g., NPC "forgets" player)
383
+ */
384
+ clearConversation(conversationId: string): Promise<void>;
319
385
  }
320
386
  interface TokenPayload {
321
387
  sub: string;
@@ -380,6 +446,6 @@ interface LeaderboardEntry {
380
446
  timestamp: number;
381
447
  }
382
448
  type SpawnClientSDK__V0<TConfig = any> = Omit<SpawnClientSDK__V1<TConfig>, 'economy'>;
383
- type SpawnServerSDK__V0<TConfig = any> = Omit<SpawnServerSDK__V1<TConfig>, 'economy' | 'room'>;
449
+ type SpawnServerSDK__V0<TConfig = any> = Omit<SpawnServerSDK__V1<TConfig>, 'economy' | 'room' | 'llm'>;
384
450
 
385
- export type { ClipDescriptor, ClipFormat, ClipKind, ClipListOptions, ClipLookupOptions, ClipStatus, ClipThumbnailDescriptor, InventoryItem, Item, LeaderboardEntry, Room, RoomInfo, RoomVisibility, SoundSettings, SpawnClientSDK__V0, SpawnClientSDK__V1, SpawnServerSDK__V0, SpawnServerSDK__V1, TokenPayload, UploadClipOptions, User };
451
+ export type { ClipDescriptor, ClipFormat, ClipKind, ClipListOptions, ClipLookupOptions, ClipStatus, ClipThumbnailDescriptor, InventoryItem, Item, LeaderboardEntry, LlmApi, LlmChatOptions, LlmChatResponse, LlmModel, Room, RoomInfo, RoomVisibility, SoundSettings, SpawnClientSDK__V0, SpawnClientSDK__V1, SpawnServerSDK__V0, SpawnServerSDK__V1, TokenPayload, UploadClipOptions, User };
package/dist/index.d.ts CHANGED
@@ -236,6 +236,30 @@ interface SpawnServerSDK__V1<TConfig = any> {
236
236
  * List all document names for this variant.
237
237
  */
238
238
  list(): Promise<string[]>;
239
+ /**
240
+ * Acquire a distributed lock on a key. Blocks until lock is acquired or timeout.
241
+ * Always use try/finally to ensure unlock() is called.
242
+ *
243
+ * @example
244
+ * await sdk.documents.lock("leaderboard");
245
+ * try {
246
+ * const data = await sdk.documents.get("leaderboard");
247
+ * // ... mutate data ...
248
+ * await sdk.documents.set("leaderboard", data);
249
+ * } finally {
250
+ * await sdk.documents.unlock("leaderboard");
251
+ * }
252
+ */
253
+ lock(key: string, options?: {
254
+ /** Auto-expire if worker crashes (default: 30000ms) */
255
+ ttlMs?: number;
256
+ /** Max time to wait for lock (default: 10000ms) */
257
+ waitMs?: number;
258
+ }): Promise<void>;
259
+ /**
260
+ * Release a distributed lock. Must be called after lock() in a finally block.
261
+ */
262
+ unlock(key: string): Promise<void>;
239
263
  };
240
264
  config: {
241
265
  get(): Promise<TConfig>;
@@ -316,6 +340,48 @@ interface SpawnServerSDK__V1<TConfig = any> {
316
340
  limit?: number;
317
341
  }): Promise<Record<string, LeaderboardEntry[]>>;
318
342
  };
343
+ /**
344
+ * LLM API for conversational AI with persistent memory.
345
+ * Used for NPC dialog, content generation, and dynamic gameplay.
346
+ */
347
+ llm: LlmApi;
348
+ }
349
+ type LlmModel = 'fast' | 'smart';
350
+ interface LlmChatOptions<T = void> {
351
+ /** Unique conversation ID, e.g., "npc:merlin:player:123" */
352
+ conversationId: string;
353
+ /** User's message to the AI */
354
+ message: string;
355
+ /** System prompt (only used on first message of conversation) */
356
+ system?: string;
357
+ /** JSON Schema for structured output (uses tool_use under the hood) */
358
+ schema?: T extends void ? never : object;
359
+ /** Max messages to keep in history (default: 20) */
360
+ maxHistory?: number;
361
+ /** Model selection: 'fast' (~500ms) or 'smart' (~2s) */
362
+ model?: LlmModel;
363
+ }
364
+ interface LlmChatResponse<T = void> {
365
+ /** Raw response text */
366
+ text: string;
367
+ /** Parsed object if schema was provided */
368
+ object: T extends void ? undefined : T;
369
+ /** Token usage for billing */
370
+ usage: {
371
+ inputTokens: number;
372
+ outputTokens: number;
373
+ };
374
+ }
375
+ interface LlmApi {
376
+ /**
377
+ * Conversational chat with persistent memory.
378
+ * Conversation state stored in variant documents automatically.
379
+ */
380
+ chat<T = void>(options: LlmChatOptions<T>): Promise<LlmChatResponse<T>>;
381
+ /**
382
+ * Clear conversation history (e.g., NPC "forgets" player)
383
+ */
384
+ clearConversation(conversationId: string): Promise<void>;
319
385
  }
320
386
  interface TokenPayload {
321
387
  sub: string;
@@ -380,6 +446,6 @@ interface LeaderboardEntry {
380
446
  timestamp: number;
381
447
  }
382
448
  type SpawnClientSDK__V0<TConfig = any> = Omit<SpawnClientSDK__V1<TConfig>, 'economy'>;
383
- type SpawnServerSDK__V0<TConfig = any> = Omit<SpawnServerSDK__V1<TConfig>, 'economy' | 'room'>;
449
+ type SpawnServerSDK__V0<TConfig = any> = Omit<SpawnServerSDK__V1<TConfig>, 'economy' | 'room' | 'llm'>;
384
450
 
385
- export type { ClipDescriptor, ClipFormat, ClipKind, ClipListOptions, ClipLookupOptions, ClipStatus, ClipThumbnailDescriptor, InventoryItem, Item, LeaderboardEntry, Room, RoomInfo, RoomVisibility, SoundSettings, SpawnClientSDK__V0, SpawnClientSDK__V1, SpawnServerSDK__V0, SpawnServerSDK__V1, TokenPayload, UploadClipOptions, User };
451
+ export type { ClipDescriptor, ClipFormat, ClipKind, ClipListOptions, ClipLookupOptions, ClipStatus, ClipThumbnailDescriptor, InventoryItem, Item, LeaderboardEntry, LlmApi, LlmChatOptions, LlmChatResponse, LlmModel, Room, RoomInfo, RoomVisibility, SoundSettings, SpawnClientSDK__V0, SpawnClientSDK__V1, SpawnServerSDK__V0, SpawnServerSDK__V1, TokenPayload, UploadClipOptions, User };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@spawnco/sdk-types",
3
- "version": "0.0.48",
3
+ "version": "0.0.50",
4
4
  "description": "TypeScript type definitions for Spawn SDK",
5
5
  "main": "./dist/index.js",
6
6
  "module": "./dist/index.mjs",
package/src/v1.ts CHANGED
@@ -260,6 +260,35 @@ export interface SpawnServerSDK__V1<TConfig = any> {
260
260
  * List all document names for this variant.
261
261
  */
262
262
  list(): Promise<string[]>;
263
+
264
+ /**
265
+ * Acquire a distributed lock on a key. Blocks until lock is acquired or timeout.
266
+ * Always use try/finally to ensure unlock() is called.
267
+ *
268
+ * @example
269
+ * await sdk.documents.lock("leaderboard");
270
+ * try {
271
+ * const data = await sdk.documents.get("leaderboard");
272
+ * // ... mutate data ...
273
+ * await sdk.documents.set("leaderboard", data);
274
+ * } finally {
275
+ * await sdk.documents.unlock("leaderboard");
276
+ * }
277
+ */
278
+ lock(
279
+ key: string,
280
+ options?: {
281
+ /** Auto-expire if worker crashes (default: 30000ms) */
282
+ ttlMs?: number;
283
+ /** Max time to wait for lock (default: 10000ms) */
284
+ waitMs?: number;
285
+ }
286
+ ): Promise<void>;
287
+
288
+ /**
289
+ * Release a distributed lock. Must be called after lock() in a finally block.
290
+ */
291
+ unlock(key: string): Promise<void>;
263
292
  };
264
293
 
265
294
  config: {
@@ -379,6 +408,55 @@ export interface SpawnServerSDK__V1<TConfig = any> {
379
408
  }
380
409
  ): Promise<Record<string, LeaderboardEntry[]>>;
381
410
  };
411
+
412
+ /**
413
+ * LLM API for conversational AI with persistent memory.
414
+ * Used for NPC dialog, content generation, and dynamic gameplay.
415
+ */
416
+ llm: LlmApi;
417
+ }
418
+
419
+ // LLM Types
420
+ export type LlmModel = 'fast' | 'smart';
421
+
422
+ export interface LlmChatOptions<T = void> {
423
+ /** Unique conversation ID, e.g., "npc:merlin:player:123" */
424
+ conversationId: string;
425
+ /** User's message to the AI */
426
+ message: string;
427
+ /** System prompt (only used on first message of conversation) */
428
+ system?: string;
429
+ /** JSON Schema for structured output (uses tool_use under the hood) */
430
+ schema?: T extends void ? never : object;
431
+ /** Max messages to keep in history (default: 20) */
432
+ maxHistory?: number;
433
+ /** Model selection: 'fast' (~500ms) or 'smart' (~2s) */
434
+ model?: LlmModel;
435
+ }
436
+
437
+ export interface LlmChatResponse<T = void> {
438
+ /** Raw response text */
439
+ text: string;
440
+ /** Parsed object if schema was provided */
441
+ object: T extends void ? undefined : T;
442
+ /** Token usage for billing */
443
+ usage: {
444
+ inputTokens: number;
445
+ outputTokens: number;
446
+ };
447
+ }
448
+
449
+ export interface LlmApi {
450
+ /**
451
+ * Conversational chat with persistent memory.
452
+ * Conversation state stored in variant documents automatically.
453
+ */
454
+ chat<T = void>(options: LlmChatOptions<T>): Promise<LlmChatResponse<T>>;
455
+
456
+ /**
457
+ * Clear conversation history (e.g., NPC "forgets" player)
458
+ */
459
+ clearConversation(conversationId: string): Promise<void>;
382
460
  }
383
461
 
384
462
  // Token types
@@ -450,4 +528,4 @@ export interface LeaderboardEntry {
450
528
  // v0 sdk types for testing
451
529
 
452
530
  export type SpawnClientSDK__V0<TConfig = any> = Omit<SpawnClientSDK__V1<TConfig>, 'economy'>;
453
- export type SpawnServerSDK__V0<TConfig = any> = Omit<SpawnServerSDK__V1<TConfig>, 'economy' | 'room'>;
531
+ export type SpawnServerSDK__V0<TConfig = any> = Omit<SpawnServerSDK__V1<TConfig>, 'economy' | 'room' | 'llm'>;