@soederpop/luca 0.0.28 → 0.0.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  import { setBuildTimeData, setContainerBuildTimeData } from './index.js';
2
2
 
3
3
  // Auto-generated introspection registry data
4
- // Generated at: 2026-03-23T07:45:56.976Z
4
+ // Generated at: 2026-03-24T01:41:39.147Z
5
5
 
6
6
  setBuildTimeData('features.googleDocs', {
7
7
  "id": "features.googleDocs",
@@ -5773,6 +5773,32 @@ setBuildTimeData('features.fs', {
5773
5773
  }
5774
5774
  ]
5775
5775
  },
5776
+ "isSymlink": {
5777
+ "description": "Checks if a path is a symbolic link.",
5778
+ "parameters": {
5779
+ "path": {
5780
+ "type": "string",
5781
+ "description": "The path to check"
5782
+ }
5783
+ },
5784
+ "required": [
5785
+ "path"
5786
+ ],
5787
+ "returns": "boolean"
5788
+ },
5789
+ "realpath": {
5790
+ "description": "Resolves a symlink to its real path. Returns the resolved path as-is if not a symlink.",
5791
+ "parameters": {
5792
+ "path": {
5793
+ "type": "string",
5794
+ "description": "The path to resolve"
5795
+ }
5796
+ },
5797
+ "required": [
5798
+ "path"
5799
+ ],
5800
+ "returns": "string"
5801
+ },
5776
5802
  "stat": {
5777
5803
  "description": "Synchronously returns the stat object for a file or directory.",
5778
5804
  "parameters": {
@@ -10355,208 +10381,6 @@ setBuildTimeData('clients.openai', {
10355
10381
  ]
10356
10382
  });
10357
10383
 
10358
- setBuildTimeData('clients.elevenlabs', {
10359
- "id": "clients.elevenlabs",
10360
- "description": "ElevenLabs client — text-to-speech synthesis via the ElevenLabs REST API. Provides methods for listing voices, listing models, and generating speech audio. Audio is returned as a Buffer; use `say()` for a convenience method that writes to disk.",
10361
- "shortcut": "clients.elevenlabs",
10362
- "className": "ElevenLabsClient",
10363
- "methods": {
10364
- "beforeRequest": {
10365
- "description": "Inject the xi-api-key header before each request.",
10366
- "parameters": {},
10367
- "required": [],
10368
- "returns": "void"
10369
- },
10370
- "connect": {
10371
- "description": "Validate the API key by listing available models.",
10372
- "parameters": {},
10373
- "required": [],
10374
- "returns": "Promise<this>",
10375
- "examples": [
10376
- {
10377
- "language": "ts",
10378
- "code": "await el.connect()"
10379
- }
10380
- ]
10381
- },
10382
- "listVoices": {
10383
- "description": "List available voices with optional search and filtering.",
10384
- "parameters": {
10385
- "options": {
10386
- "type": "{\n search?: string\n category?: string\n voice_type?: string\n page_size?: number\n next_page_token?: string\n }",
10387
- "description": "Query parameters for filtering voices"
10388
- }
10389
- },
10390
- "required": [],
10391
- "returns": "Promise<any>",
10392
- "examples": [
10393
- {
10394
- "language": "ts",
10395
- "code": "const voices = await el.listVoices()\nconst premade = await el.listVoices({ category: 'premade' })"
10396
- }
10397
- ]
10398
- },
10399
- "getVoice": {
10400
- "description": "Get details for a single voice.",
10401
- "parameters": {
10402
- "voiceId": {
10403
- "type": "string",
10404
- "description": "The voice ID to look up"
10405
- }
10406
- },
10407
- "required": [
10408
- "voiceId"
10409
- ],
10410
- "returns": "Promise<any>",
10411
- "examples": [
10412
- {
10413
- "language": "ts",
10414
- "code": "const voice = await el.getVoice('21m00Tcm4TlvDq8ikWAM')\nconsole.log(voice.name, voice.settings)"
10415
- }
10416
- ]
10417
- },
10418
- "listModels": {
10419
- "description": "List available TTS models.",
10420
- "parameters": {},
10421
- "required": [],
10422
- "returns": "Promise<any[]>",
10423
- "examples": [
10424
- {
10425
- "language": "ts",
10426
- "code": "const models = await el.listModels()\nconsole.log(models.map(m => m.model_id))"
10427
- }
10428
- ]
10429
- },
10430
- "synthesize": {
10431
- "description": "Synthesize speech from text, returning audio as a Buffer.",
10432
- "parameters": {
10433
- "text": {
10434
- "type": "string",
10435
- "description": "The text to convert to speech"
10436
- },
10437
- "options": {
10438
- "type": "SynthesizeOptions",
10439
- "description": "Voice, model, format, and voice settings overrides",
10440
- "properties": {
10441
- "voiceId": {
10442
- "type": "string",
10443
- "description": ""
10444
- },
10445
- "modelId": {
10446
- "type": "string",
10447
- "description": ""
10448
- },
10449
- "outputFormat": {
10450
- "type": "string",
10451
- "description": ""
10452
- },
10453
- "voiceSettings": {
10454
- "type": "ElevenLabsVoiceSettings",
10455
- "description": ""
10456
- },
10457
- "disableCache": {
10458
- "type": "boolean",
10459
- "description": ""
10460
- }
10461
- }
10462
- }
10463
- },
10464
- "required": [
10465
- "text"
10466
- ],
10467
- "returns": "Promise<Buffer>",
10468
- "examples": [
10469
- {
10470
- "language": "ts",
10471
- "code": "const audio = await el.synthesize('Hello world')\n// audio is a Buffer of mp3 data\n\nconst custom = await el.synthesize('Hello', {\n voiceId: '21m00Tcm4TlvDq8ikWAM',\n voiceSettings: { stability: 0.5, similarityBoost: 0.8 }\n})"
10472
- }
10473
- ]
10474
- },
10475
- "say": {
10476
- "description": "Synthesize speech and write the audio to a file.",
10477
- "parameters": {
10478
- "text": {
10479
- "type": "string",
10480
- "description": "The text to convert to speech"
10481
- },
10482
- "outputPath": {
10483
- "type": "string",
10484
- "description": "File path to write the audio to"
10485
- },
10486
- "options": {
10487
- "type": "SynthesizeOptions",
10488
- "description": "Voice, model, format, and voice settings overrides",
10489
- "properties": {
10490
- "voiceId": {
10491
- "type": "string",
10492
- "description": ""
10493
- },
10494
- "modelId": {
10495
- "type": "string",
10496
- "description": ""
10497
- },
10498
- "outputFormat": {
10499
- "type": "string",
10500
- "description": ""
10501
- },
10502
- "voiceSettings": {
10503
- "type": "ElevenLabsVoiceSettings",
10504
- "description": ""
10505
- },
10506
- "disableCache": {
10507
- "type": "boolean",
10508
- "description": ""
10509
- }
10510
- }
10511
- }
10512
- },
10513
- "required": [
10514
- "text",
10515
- "outputPath"
10516
- ],
10517
- "returns": "Promise<string>",
10518
- "examples": [
10519
- {
10520
- "language": "ts",
10521
- "code": "const path = await el.say('Hello world', './hello.mp3')\nconsole.log(`Audio saved to ${path}`)"
10522
- }
10523
- ]
10524
- }
10525
- },
10526
- "getters": {
10527
- "apiKey": {
10528
- "description": "The resolved API key from options or environment.",
10529
- "returns": "string"
10530
- }
10531
- },
10532
- "events": {
10533
- "failure": {
10534
- "name": "failure",
10535
- "description": "Event emitted by ElevenLabsClient",
10536
- "arguments": {}
10537
- },
10538
- "voices": {
10539
- "name": "voices",
10540
- "description": "Event emitted by ElevenLabsClient",
10541
- "arguments": {}
10542
- },
10543
- "speech": {
10544
- "name": "speech",
10545
- "description": "Event emitted by ElevenLabsClient",
10546
- "arguments": {}
10547
- }
10548
- },
10549
- "state": {},
10550
- "options": {},
10551
- "envVars": [],
10552
- "examples": [
10553
- {
10554
- "language": "ts",
10555
- "code": "const el = container.client('elevenlabs')\nawait el.connect()\nconst voices = await el.listVoices()\nconst audio = await el.synthesize('Hello world')\n// audio is a Buffer of mp3 data"
10556
- }
10557
- ]
10558
- });
10559
-
10560
10384
  setBuildTimeData('clients.supabase', {
10561
10385
  "id": "clients.supabase",
10562
10386
  "description": "Supabase client for the Luca container system. Wraps the official `@supabase/supabase-js` SDK and exposes it through Luca's typed state, events, and introspection system. The SDK is isomorphic so this single implementation works in both Node and browser containers. Use `client.sdk` for full SDK access, or use the convenience wrappers for common operations (auth, database queries, storage, edge functions, realtime).",
@@ -11005,16 +10829,218 @@ setBuildTimeData('clients.comfyui', {
11005
10829
  ]
11006
10830
  });
11007
10831
 
11008
- setBuildTimeData('servers.mcp', {
11009
- "id": "servers.mcp",
11010
- "description": "MCP (Model Context Protocol) server for exposing tools, resources, and prompts to AI clients like Claude Code. Uses the low-level MCP SDK Server class directly with Zod 4 native JSON Schema conversion. Register tools, resources, and prompts programmatically, then start the server over stdio (for CLI integration) or HTTP (for remote access).",
11011
- "shortcut": "servers.mcp",
11012
- "className": "MCPServer",
10832
+ setBuildTimeData('clients.elevenlabs', {
10833
+ "id": "clients.elevenlabs",
10834
+ "description": "ElevenLabs client text-to-speech synthesis via the ElevenLabs REST API. Provides methods for listing voices, listing models, and generating speech audio. Audio is returned as a Buffer; use `say()` for a convenience method that writes to disk.",
10835
+ "shortcut": "clients.elevenlabs",
10836
+ "className": "ElevenLabsClient",
11013
10837
  "methods": {
11014
- "tool": {
11015
- "description": "Register an MCP tool. The tool's Zod schema is converted to JSON Schema for the protocol listing, and used for runtime argument validation. Tool handlers can return a string (auto-wrapped as text content) or a full CallToolResult object for advanced responses (images, errors, etc).",
10838
+ "beforeRequest": {
10839
+ "description": "Inject the xi-api-key header before each request.",
10840
+ "parameters": {},
10841
+ "required": [],
10842
+ "returns": "void"
10843
+ },
10844
+ "connect": {
10845
+ "description": "Validate the API key by listing available models.",
10846
+ "parameters": {},
10847
+ "required": [],
10848
+ "returns": "Promise<this>",
10849
+ "examples": [
10850
+ {
10851
+ "language": "ts",
10852
+ "code": "await el.connect()"
10853
+ }
10854
+ ]
10855
+ },
10856
+ "listVoices": {
10857
+ "description": "List available voices with optional search and filtering.",
11016
10858
  "parameters": {
11017
- "name": {
10859
+ "options": {
10860
+ "type": "{\n search?: string\n category?: string\n voice_type?: string\n page_size?: number\n next_page_token?: string\n }",
10861
+ "description": "Query parameters for filtering voices"
10862
+ }
10863
+ },
10864
+ "required": [],
10865
+ "returns": "Promise<any>",
10866
+ "examples": [
10867
+ {
10868
+ "language": "ts",
10869
+ "code": "const voices = await el.listVoices()\nconst premade = await el.listVoices({ category: 'premade' })"
10870
+ }
10871
+ ]
10872
+ },
10873
+ "getVoice": {
10874
+ "description": "Get details for a single voice.",
10875
+ "parameters": {
10876
+ "voiceId": {
10877
+ "type": "string",
10878
+ "description": "The voice ID to look up"
10879
+ }
10880
+ },
10881
+ "required": [
10882
+ "voiceId"
10883
+ ],
10884
+ "returns": "Promise<any>",
10885
+ "examples": [
10886
+ {
10887
+ "language": "ts",
10888
+ "code": "const voice = await el.getVoice('21m00Tcm4TlvDq8ikWAM')\nconsole.log(voice.name, voice.settings)"
10889
+ }
10890
+ ]
10891
+ },
10892
+ "listModels": {
10893
+ "description": "List available TTS models.",
10894
+ "parameters": {},
10895
+ "required": [],
10896
+ "returns": "Promise<any[]>",
10897
+ "examples": [
10898
+ {
10899
+ "language": "ts",
10900
+ "code": "const models = await el.listModels()\nconsole.log(models.map(m => m.model_id))"
10901
+ }
10902
+ ]
10903
+ },
10904
+ "synthesize": {
10905
+ "description": "Synthesize speech from text, returning audio as a Buffer.",
10906
+ "parameters": {
10907
+ "text": {
10908
+ "type": "string",
10909
+ "description": "The text to convert to speech"
10910
+ },
10911
+ "options": {
10912
+ "type": "SynthesizeOptions",
10913
+ "description": "Voice, model, format, and voice settings overrides",
10914
+ "properties": {
10915
+ "voiceId": {
10916
+ "type": "string",
10917
+ "description": ""
10918
+ },
10919
+ "modelId": {
10920
+ "type": "string",
10921
+ "description": ""
10922
+ },
10923
+ "outputFormat": {
10924
+ "type": "string",
10925
+ "description": ""
10926
+ },
10927
+ "voiceSettings": {
10928
+ "type": "ElevenLabsVoiceSettings",
10929
+ "description": ""
10930
+ },
10931
+ "disableCache": {
10932
+ "type": "boolean",
10933
+ "description": ""
10934
+ }
10935
+ }
10936
+ }
10937
+ },
10938
+ "required": [
10939
+ "text"
10940
+ ],
10941
+ "returns": "Promise<Buffer>",
10942
+ "examples": [
10943
+ {
10944
+ "language": "ts",
10945
+ "code": "const audio = await el.synthesize('Hello world')\n// audio is a Buffer of mp3 data\n\nconst custom = await el.synthesize('Hello', {\n voiceId: '21m00Tcm4TlvDq8ikWAM',\n voiceSettings: { stability: 0.5, similarityBoost: 0.8 }\n})"
10946
+ }
10947
+ ]
10948
+ },
10949
+ "say": {
10950
+ "description": "Synthesize speech and write the audio to a file.",
10951
+ "parameters": {
10952
+ "text": {
10953
+ "type": "string",
10954
+ "description": "The text to convert to speech"
10955
+ },
10956
+ "outputPath": {
10957
+ "type": "string",
10958
+ "description": "File path to write the audio to"
10959
+ },
10960
+ "options": {
10961
+ "type": "SynthesizeOptions",
10962
+ "description": "Voice, model, format, and voice settings overrides",
10963
+ "properties": {
10964
+ "voiceId": {
10965
+ "type": "string",
10966
+ "description": ""
10967
+ },
10968
+ "modelId": {
10969
+ "type": "string",
10970
+ "description": ""
10971
+ },
10972
+ "outputFormat": {
10973
+ "type": "string",
10974
+ "description": ""
10975
+ },
10976
+ "voiceSettings": {
10977
+ "type": "ElevenLabsVoiceSettings",
10978
+ "description": ""
10979
+ },
10980
+ "disableCache": {
10981
+ "type": "boolean",
10982
+ "description": ""
10983
+ }
10984
+ }
10985
+ }
10986
+ },
10987
+ "required": [
10988
+ "text",
10989
+ "outputPath"
10990
+ ],
10991
+ "returns": "Promise<string>",
10992
+ "examples": [
10993
+ {
10994
+ "language": "ts",
10995
+ "code": "const path = await el.say('Hello world', './hello.mp3')\nconsole.log(`Audio saved to ${path}`)"
10996
+ }
10997
+ ]
10998
+ }
10999
+ },
11000
+ "getters": {
11001
+ "apiKey": {
11002
+ "description": "The resolved API key from options or environment.",
11003
+ "returns": "string"
11004
+ }
11005
+ },
11006
+ "events": {
11007
+ "failure": {
11008
+ "name": "failure",
11009
+ "description": "Event emitted by ElevenLabsClient",
11010
+ "arguments": {}
11011
+ },
11012
+ "voices": {
11013
+ "name": "voices",
11014
+ "description": "Event emitted by ElevenLabsClient",
11015
+ "arguments": {}
11016
+ },
11017
+ "speech": {
11018
+ "name": "speech",
11019
+ "description": "Event emitted by ElevenLabsClient",
11020
+ "arguments": {}
11021
+ }
11022
+ },
11023
+ "state": {},
11024
+ "options": {},
11025
+ "envVars": [],
11026
+ "examples": [
11027
+ {
11028
+ "language": "ts",
11029
+ "code": "const el = container.client('elevenlabs')\nawait el.connect()\nconst voices = await el.listVoices()\nconst audio = await el.synthesize('Hello world')\n// audio is a Buffer of mp3 data"
11030
+ }
11031
+ ]
11032
+ });
11033
+
11034
+ setBuildTimeData('servers.mcp', {
11035
+ "id": "servers.mcp",
11036
+ "description": "MCP (Model Context Protocol) server for exposing tools, resources, and prompts to AI clients like Claude Code. Uses the low-level MCP SDK Server class directly with Zod 4 native JSON Schema conversion. Register tools, resources, and prompts programmatically, then start the server over stdio (for CLI integration) or HTTP (for remote access).",
11037
+ "shortcut": "servers.mcp",
11038
+ "className": "MCPServer",
11039
+ "methods": {
11040
+ "tool": {
11041
+ "description": "Register an MCP tool. The tool's Zod schema is converted to JSON Schema for the protocol listing, and used for runtime argument validation. Tool handlers can return a string (auto-wrapped as text content) or a full CallToolResult object for advanced responses (images, errors, etc).",
11042
+ "parameters": {
11043
+ "name": {
11018
11044
  "type": "string",
11019
11045
  "description": "Unique tool name"
11020
11046
  },
@@ -17612,6 +17638,32 @@ export const introspectionData = [
17612
17638
  }
17613
17639
  ]
17614
17640
  },
17641
+ "isSymlink": {
17642
+ "description": "Checks if a path is a symbolic link.",
17643
+ "parameters": {
17644
+ "path": {
17645
+ "type": "string",
17646
+ "description": "The path to check"
17647
+ }
17648
+ },
17649
+ "required": [
17650
+ "path"
17651
+ ],
17652
+ "returns": "boolean"
17653
+ },
17654
+ "realpath": {
17655
+ "description": "Resolves a symlink to its real path. Returns the resolved path as-is if not a symlink.",
17656
+ "parameters": {
17657
+ "path": {
17658
+ "type": "string",
17659
+ "description": "The path to resolve"
17660
+ }
17661
+ },
17662
+ "required": [
17663
+ "path"
17664
+ ],
17665
+ "returns": "string"
17666
+ },
17615
17667
  "stat": {
17616
17668
  "description": "Synchronously returns the stat object for a file or directory.",
17617
17669
  "parameters": {
@@ -22175,224 +22227,23 @@ export const introspectionData = [
22175
22227
  ]
22176
22228
  },
22177
22229
  {
22178
- "id": "clients.elevenlabs",
22179
- "description": "ElevenLabs client text-to-speech synthesis via the ElevenLabs REST API. Provides methods for listing voices, listing models, and generating speech audio. Audio is returned as a Buffer; use `say()` for a convenience method that writes to disk.",
22180
- "shortcut": "clients.elevenlabs",
22181
- "className": "ElevenLabsClient",
22230
+ "id": "clients.supabase",
22231
+ "description": "Supabase client for the Luca container system. Wraps the official `@supabase/supabase-js` SDK and exposes it through Luca's typed state, events, and introspection system. The SDK is isomorphic so this single implementation works in both Node and browser containers. Use `client.sdk` for full SDK access, or use the convenience wrappers for common operations (auth, database queries, storage, edge functions, realtime).",
22232
+ "shortcut": "clients.supabase",
22233
+ "className": "SupabaseClient",
22182
22234
  "methods": {
22183
- "beforeRequest": {
22184
- "description": "Inject the xi-api-key header before each request.",
22185
- "parameters": {},
22186
- "required": [],
22187
- "returns": "void"
22188
- },
22189
- "connect": {
22190
- "description": "Validate the API key by listing available models.",
22191
- "parameters": {},
22192
- "required": [],
22193
- "returns": "Promise<this>",
22194
- "examples": [
22195
- {
22196
- "language": "ts",
22197
- "code": "await el.connect()"
22198
- }
22199
- ]
22200
- },
22201
- "listVoices": {
22202
- "description": "List available voices with optional search and filtering.",
22235
+ "from": {
22236
+ "description": "Start a query on a Postgres table or view.",
22203
22237
  "parameters": {
22204
- "options": {
22205
- "type": "{\n search?: string\n category?: string\n voice_type?: string\n page_size?: number\n next_page_token?: string\n }",
22206
- "description": "Query parameters for filtering voices"
22238
+ "table": {
22239
+ "type": "string",
22240
+ "description": "The table or view name to query"
22207
22241
  }
22208
22242
  },
22209
- "required": [],
22210
- "returns": "Promise<any>",
22211
- "examples": [
22212
- {
22213
- "language": "ts",
22214
- "code": "const voices = await el.listVoices()\nconst premade = await el.listVoices({ category: 'premade' })"
22215
- }
22216
- ]
22217
- },
22218
- "getVoice": {
22219
- "description": "Get details for a single voice.",
22220
- "parameters": {
22221
- "voiceId": {
22222
- "type": "string",
22223
- "description": "The voice ID to look up"
22224
- }
22225
- },
22226
- "required": [
22227
- "voiceId"
22228
- ],
22229
- "returns": "Promise<any>",
22230
- "examples": [
22231
- {
22232
- "language": "ts",
22233
- "code": "const voice = await el.getVoice('21m00Tcm4TlvDq8ikWAM')\nconsole.log(voice.name, voice.settings)"
22234
- }
22235
- ]
22236
- },
22237
- "listModels": {
22238
- "description": "List available TTS models.",
22239
- "parameters": {},
22240
- "required": [],
22241
- "returns": "Promise<any[]>",
22242
- "examples": [
22243
- {
22244
- "language": "ts",
22245
- "code": "const models = await el.listModels()\nconsole.log(models.map(m => m.model_id))"
22246
- }
22247
- ]
22248
- },
22249
- "synthesize": {
22250
- "description": "Synthesize speech from text, returning audio as a Buffer.",
22251
- "parameters": {
22252
- "text": {
22253
- "type": "string",
22254
- "description": "The text to convert to speech"
22255
- },
22256
- "options": {
22257
- "type": "SynthesizeOptions",
22258
- "description": "Voice, model, format, and voice settings overrides",
22259
- "properties": {
22260
- "voiceId": {
22261
- "type": "string",
22262
- "description": ""
22263
- },
22264
- "modelId": {
22265
- "type": "string",
22266
- "description": ""
22267
- },
22268
- "outputFormat": {
22269
- "type": "string",
22270
- "description": ""
22271
- },
22272
- "voiceSettings": {
22273
- "type": "ElevenLabsVoiceSettings",
22274
- "description": ""
22275
- },
22276
- "disableCache": {
22277
- "type": "boolean",
22278
- "description": ""
22279
- }
22280
- }
22281
- }
22282
- },
22283
- "required": [
22284
- "text"
22285
- ],
22286
- "returns": "Promise<Buffer>",
22287
- "examples": [
22288
- {
22289
- "language": "ts",
22290
- "code": "const audio = await el.synthesize('Hello world')\n// audio is a Buffer of mp3 data\n\nconst custom = await el.synthesize('Hello', {\n voiceId: '21m00Tcm4TlvDq8ikWAM',\n voiceSettings: { stability: 0.5, similarityBoost: 0.8 }\n})"
22291
- }
22292
- ]
22293
- },
22294
- "say": {
22295
- "description": "Synthesize speech and write the audio to a file.",
22296
- "parameters": {
22297
- "text": {
22298
- "type": "string",
22299
- "description": "The text to convert to speech"
22300
- },
22301
- "outputPath": {
22302
- "type": "string",
22303
- "description": "File path to write the audio to"
22304
- },
22305
- "options": {
22306
- "type": "SynthesizeOptions",
22307
- "description": "Voice, model, format, and voice settings overrides",
22308
- "properties": {
22309
- "voiceId": {
22310
- "type": "string",
22311
- "description": ""
22312
- },
22313
- "modelId": {
22314
- "type": "string",
22315
- "description": ""
22316
- },
22317
- "outputFormat": {
22318
- "type": "string",
22319
- "description": ""
22320
- },
22321
- "voiceSettings": {
22322
- "type": "ElevenLabsVoiceSettings",
22323
- "description": ""
22324
- },
22325
- "disableCache": {
22326
- "type": "boolean",
22327
- "description": ""
22328
- }
22329
- }
22330
- }
22331
- },
22332
- "required": [
22333
- "text",
22334
- "outputPath"
22335
- ],
22336
- "returns": "Promise<string>",
22337
- "examples": [
22338
- {
22339
- "language": "ts",
22340
- "code": "const path = await el.say('Hello world', './hello.mp3')\nconsole.log(`Audio saved to ${path}`)"
22341
- }
22342
- ]
22343
- }
22344
- },
22345
- "getters": {
22346
- "apiKey": {
22347
- "description": "The resolved API key from options or environment.",
22348
- "returns": "string"
22349
- }
22350
- },
22351
- "events": {
22352
- "failure": {
22353
- "name": "failure",
22354
- "description": "Event emitted by ElevenLabsClient",
22355
- "arguments": {}
22356
- },
22357
- "voices": {
22358
- "name": "voices",
22359
- "description": "Event emitted by ElevenLabsClient",
22360
- "arguments": {}
22361
- },
22362
- "speech": {
22363
- "name": "speech",
22364
- "description": "Event emitted by ElevenLabsClient",
22365
- "arguments": {}
22366
- }
22367
- },
22368
- "state": {},
22369
- "options": {},
22370
- "envVars": [],
22371
- "examples": [
22372
- {
22373
- "language": "ts",
22374
- "code": "const el = container.client('elevenlabs')\nawait el.connect()\nconst voices = await el.listVoices()\nconst audio = await el.synthesize('Hello world')\n// audio is a Buffer of mp3 data"
22375
- }
22376
- ]
22377
- },
22378
- {
22379
- "id": "clients.supabase",
22380
- "description": "Supabase client for the Luca container system. Wraps the official `@supabase/supabase-js` SDK and exposes it through Luca's typed state, events, and introspection system. The SDK is isomorphic so this single implementation works in both Node and browser containers. Use `client.sdk` for full SDK access, or use the convenience wrappers for common operations (auth, database queries, storage, edge functions, realtime).",
22381
- "shortcut": "clients.supabase",
22382
- "className": "SupabaseClient",
22383
- "methods": {
22384
- "from": {
22385
- "description": "Start a query on a Postgres table or view.",
22386
- "parameters": {
22387
- "table": {
22388
- "type": "string",
22389
- "description": "The table or view name to query"
22390
- }
22391
- },
22392
- "required": [
22393
- "table"
22394
- ],
22395
- "returns": "void"
22243
+ "required": [
22244
+ "table"
22245
+ ],
22246
+ "returns": "void"
22396
22247
  },
22397
22248
  "rpc": {
22398
22249
  "description": "Call a Postgres function (RPC).",
@@ -22821,6 +22672,207 @@ export const introspectionData = [
22821
22672
  }
22822
22673
  ]
22823
22674
  },
22675
+ {
22676
+ "id": "clients.elevenlabs",
22677
+ "description": "ElevenLabs client — text-to-speech synthesis via the ElevenLabs REST API. Provides methods for listing voices, listing models, and generating speech audio. Audio is returned as a Buffer; use `say()` for a convenience method that writes to disk.",
22678
+ "shortcut": "clients.elevenlabs",
22679
+ "className": "ElevenLabsClient",
22680
+ "methods": {
22681
+ "beforeRequest": {
22682
+ "description": "Inject the xi-api-key header before each request.",
22683
+ "parameters": {},
22684
+ "required": [],
22685
+ "returns": "void"
22686
+ },
22687
+ "connect": {
22688
+ "description": "Validate the API key by listing available models.",
22689
+ "parameters": {},
22690
+ "required": [],
22691
+ "returns": "Promise<this>",
22692
+ "examples": [
22693
+ {
22694
+ "language": "ts",
22695
+ "code": "await el.connect()"
22696
+ }
22697
+ ]
22698
+ },
22699
+ "listVoices": {
22700
+ "description": "List available voices with optional search and filtering.",
22701
+ "parameters": {
22702
+ "options": {
22703
+ "type": "{\n search?: string\n category?: string\n voice_type?: string\n page_size?: number\n next_page_token?: string\n }",
22704
+ "description": "Query parameters for filtering voices"
22705
+ }
22706
+ },
22707
+ "required": [],
22708
+ "returns": "Promise<any>",
22709
+ "examples": [
22710
+ {
22711
+ "language": "ts",
22712
+ "code": "const voices = await el.listVoices()\nconst premade = await el.listVoices({ category: 'premade' })"
22713
+ }
22714
+ ]
22715
+ },
22716
+ "getVoice": {
22717
+ "description": "Get details for a single voice.",
22718
+ "parameters": {
22719
+ "voiceId": {
22720
+ "type": "string",
22721
+ "description": "The voice ID to look up"
22722
+ }
22723
+ },
22724
+ "required": [
22725
+ "voiceId"
22726
+ ],
22727
+ "returns": "Promise<any>",
22728
+ "examples": [
22729
+ {
22730
+ "language": "ts",
22731
+ "code": "const voice = await el.getVoice('21m00Tcm4TlvDq8ikWAM')\nconsole.log(voice.name, voice.settings)"
22732
+ }
22733
+ ]
22734
+ },
22735
+ "listModels": {
22736
+ "description": "List available TTS models.",
22737
+ "parameters": {},
22738
+ "required": [],
22739
+ "returns": "Promise<any[]>",
22740
+ "examples": [
22741
+ {
22742
+ "language": "ts",
22743
+ "code": "const models = await el.listModels()\nconsole.log(models.map(m => m.model_id))"
22744
+ }
22745
+ ]
22746
+ },
22747
+ "synthesize": {
22748
+ "description": "Synthesize speech from text, returning audio as a Buffer.",
22749
+ "parameters": {
22750
+ "text": {
22751
+ "type": "string",
22752
+ "description": "The text to convert to speech"
22753
+ },
22754
+ "options": {
22755
+ "type": "SynthesizeOptions",
22756
+ "description": "Voice, model, format, and voice settings overrides",
22757
+ "properties": {
22758
+ "voiceId": {
22759
+ "type": "string",
22760
+ "description": ""
22761
+ },
22762
+ "modelId": {
22763
+ "type": "string",
22764
+ "description": ""
22765
+ },
22766
+ "outputFormat": {
22767
+ "type": "string",
22768
+ "description": ""
22769
+ },
22770
+ "voiceSettings": {
22771
+ "type": "ElevenLabsVoiceSettings",
22772
+ "description": ""
22773
+ },
22774
+ "disableCache": {
22775
+ "type": "boolean",
22776
+ "description": ""
22777
+ }
22778
+ }
22779
+ }
22780
+ },
22781
+ "required": [
22782
+ "text"
22783
+ ],
22784
+ "returns": "Promise<Buffer>",
22785
+ "examples": [
22786
+ {
22787
+ "language": "ts",
22788
+ "code": "const audio = await el.synthesize('Hello world')\n// audio is a Buffer of mp3 data\n\nconst custom = await el.synthesize('Hello', {\n voiceId: '21m00Tcm4TlvDq8ikWAM',\n voiceSettings: { stability: 0.5, similarityBoost: 0.8 }\n})"
22789
+ }
22790
+ ]
22791
+ },
22792
+ "say": {
22793
+ "description": "Synthesize speech and write the audio to a file.",
22794
+ "parameters": {
22795
+ "text": {
22796
+ "type": "string",
22797
+ "description": "The text to convert to speech"
22798
+ },
22799
+ "outputPath": {
22800
+ "type": "string",
22801
+ "description": "File path to write the audio to"
22802
+ },
22803
+ "options": {
22804
+ "type": "SynthesizeOptions",
22805
+ "description": "Voice, model, format, and voice settings overrides",
22806
+ "properties": {
22807
+ "voiceId": {
22808
+ "type": "string",
22809
+ "description": ""
22810
+ },
22811
+ "modelId": {
22812
+ "type": "string",
22813
+ "description": ""
22814
+ },
22815
+ "outputFormat": {
22816
+ "type": "string",
22817
+ "description": ""
22818
+ },
22819
+ "voiceSettings": {
22820
+ "type": "ElevenLabsVoiceSettings",
22821
+ "description": ""
22822
+ },
22823
+ "disableCache": {
22824
+ "type": "boolean",
22825
+ "description": ""
22826
+ }
22827
+ }
22828
+ }
22829
+ },
22830
+ "required": [
22831
+ "text",
22832
+ "outputPath"
22833
+ ],
22834
+ "returns": "Promise<string>",
22835
+ "examples": [
22836
+ {
22837
+ "language": "ts",
22838
+ "code": "const path = await el.say('Hello world', './hello.mp3')\nconsole.log(`Audio saved to ${path}`)"
22839
+ }
22840
+ ]
22841
+ }
22842
+ },
22843
+ "getters": {
22844
+ "apiKey": {
22845
+ "description": "The resolved API key from options or environment.",
22846
+ "returns": "string"
22847
+ }
22848
+ },
22849
+ "events": {
22850
+ "failure": {
22851
+ "name": "failure",
22852
+ "description": "Event emitted by ElevenLabsClient",
22853
+ "arguments": {}
22854
+ },
22855
+ "voices": {
22856
+ "name": "voices",
22857
+ "description": "Event emitted by ElevenLabsClient",
22858
+ "arguments": {}
22859
+ },
22860
+ "speech": {
22861
+ "name": "speech",
22862
+ "description": "Event emitted by ElevenLabsClient",
22863
+ "arguments": {}
22864
+ }
22865
+ },
22866
+ "state": {},
22867
+ "options": {},
22868
+ "envVars": [],
22869
+ "examples": [
22870
+ {
22871
+ "language": "ts",
22872
+ "code": "const el = container.client('elevenlabs')\nawait el.connect()\nconst voices = await el.listVoices()\nconst audio = await el.synthesize('Hello world')\n// audio is a Buffer of mp3 data"
22873
+ }
22874
+ ]
22875
+ },
22824
22876
  {
22825
22877
  "id": "servers.mcp",
22826
22878
  "description": "MCP (Model Context Protocol) server for exposing tools, resources, and prompts to AI clients like Claude Code. Uses the low-level MCP SDK Server class directly with Zod 4 native JSON Schema conversion. Register tools, resources, and prompts programmatically, then start the server over stdio (for CLI integration) or HTTP (for remote access).",