@nordsym/apiclaw 1.4.4 → 1.4.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/execute.ts CHANGED
@@ -1204,6 +1204,446 @@ const handlers: Record<string, Record<string, (params: any, creds: any) => Promi
1204
1204
  }
1205
1205
  },
1206
1206
  },
1207
+
1208
+ // Groq - Ultra-fast LLM inference
1209
+ groq: {
1210
+ chat: async (params, creds) => {
1211
+ const { messages, model = 'llama3-8b-8192', max_tokens = 1024 } = params;
1212
+
1213
+ if (!messages || !Array.isArray(messages)) {
1214
+ return createErrorResult('groq', 'chat', 'Missing required param: messages (array)', ERROR_CODES.INVALID_PARAMS);
1215
+ }
1216
+
1217
+ const response = await fetchWithRetry('https://api.groq.com/openai/v1/chat/completions', {
1218
+ method: 'POST',
1219
+ headers: {
1220
+ 'Authorization': `Bearer ${creds.api_key}`,
1221
+ 'Content-Type': 'application/json',
1222
+ },
1223
+ body: JSON.stringify({ model, messages, max_tokens }),
1224
+ }, { provider: 'groq', action: 'chat' });
1225
+
1226
+ const data = await response.json() as Record<string, unknown>;
1227
+
1228
+ if (!response.ok) {
1229
+ const err = data.error as Record<string, unknown> | undefined;
1230
+ return createErrorResult('groq', 'chat', (err?.message as string) || 'Chat failed', statusToErrorCode(response.status));
1231
+ }
1232
+
1233
+ const choices = data.choices as Array<Record<string, unknown>> | undefined;
1234
+ const message = choices?.[0]?.message as Record<string, unknown> | undefined;
1235
+
1236
+ return {
1237
+ success: true,
1238
+ provider: 'groq',
1239
+ action: 'chat',
1240
+ data: {
1241
+ content: message?.content,
1242
+ model: data.model,
1243
+ usage: data.usage,
1244
+ },
1245
+ };
1246
+ },
1247
+ },
1248
+
1249
+ // Deepgram - Speech-to-text transcription
1250
+ deepgram: {
1251
+ transcribe: async (params, creds) => {
1252
+ const { url, model = 'nova-2', language = 'en' } = params;
1253
+
1254
+ if (!url) {
1255
+ return createErrorResult('deepgram', 'transcribe', 'Missing required param: url (audio file URL)', ERROR_CODES.INVALID_PARAMS);
1256
+ }
1257
+
1258
+ const response = await fetchWithRetry(`https://api.deepgram.com/v1/listen?model=${model}&language=${language}&smart_format=true`, {
1259
+ method: 'POST',
1260
+ headers: {
1261
+ 'Authorization': `Token ${creds.api_key}`,
1262
+ 'Content-Type': 'application/json',
1263
+ },
1264
+ body: JSON.stringify({ url }),
1265
+ }, { provider: 'deepgram', action: 'transcribe' });
1266
+
1267
+ const data = await response.json() as Record<string, unknown>;
1268
+
1269
+ if (!response.ok) {
1270
+ return createErrorResult('deepgram', 'transcribe', (data.err_msg as string) || 'Transcription failed', statusToErrorCode(response.status));
1271
+ }
1272
+
1273
+ const results = data.results as Record<string, unknown> | undefined;
1274
+ const channels = results?.channels as Array<Record<string, unknown>> | undefined;
1275
+ const alternatives = channels?.[0]?.alternatives as Array<Record<string, unknown>> | undefined;
1276
+ const transcript = alternatives?.[0]?.transcript as string | undefined;
1277
+
1278
+ return {
1279
+ success: true,
1280
+ provider: 'deepgram',
1281
+ action: 'transcribe',
1282
+ data: {
1283
+ transcript,
1284
+ confidence: alternatives?.[0]?.confidence,
1285
+ duration: (data.metadata as Record<string, unknown> | undefined)?.duration,
1286
+ },
1287
+ };
1288
+ },
1289
+ },
1290
+
1291
+ // Serper - Google Search API for AI
1292
+ serper: {
1293
+ search: async (params, creds) => {
1294
+ const { query, num = 10, gl = 'us', hl = 'en' } = params;
1295
+
1296
+ if (!query) {
1297
+ return createErrorResult('serper', 'search', 'Missing required param: query', ERROR_CODES.INVALID_PARAMS);
1298
+ }
1299
+
1300
+ const response = await fetchWithRetry('https://google.serper.dev/search', {
1301
+ method: 'POST',
1302
+ headers: {
1303
+ 'X-API-KEY': creds.api_key,
1304
+ 'Content-Type': 'application/json',
1305
+ },
1306
+ body: JSON.stringify({ q: query, num, gl, hl }),
1307
+ }, { provider: 'serper', action: 'search' });
1308
+
1309
+ const data = await response.json() as Record<string, unknown>;
1310
+
1311
+ if (!response.ok) {
1312
+ return createErrorResult('serper', 'search', (data.message as string) || 'Search failed', statusToErrorCode(response.status));
1313
+ }
1314
+
1315
+ const organic = (data.organic as Array<Record<string, unknown>>) || [];
1316
+
1317
+ return {
1318
+ success: true,
1319
+ provider: 'serper',
1320
+ action: 'search',
1321
+ data: {
1322
+ query,
1323
+ results: organic.map(r => ({
1324
+ title: r.title,
1325
+ url: r.link,
1326
+ snippet: r.snippet,
1327
+ position: r.position,
1328
+ })),
1329
+ total: organic.length,
1330
+ answerBox: data.answerBox,
1331
+ knowledgeGraph: data.knowledgeGraph,
1332
+ },
1333
+ };
1334
+ },
1335
+ },
1336
+
1337
+ // Mistral - Open-weight LLMs
1338
+ mistral: {
1339
+ chat: async (params, creds) => {
1340
+ const { messages, model = 'mistral-small-latest', max_tokens = 1024 } = params;
1341
+
1342
+ if (!messages || !Array.isArray(messages)) {
1343
+ return createErrorResult('mistral', 'chat', 'Missing required param: messages (array)', ERROR_CODES.INVALID_PARAMS);
1344
+ }
1345
+
1346
+ const response = await fetchWithRetry('https://api.mistral.ai/v1/chat/completions', {
1347
+ method: 'POST',
1348
+ headers: {
1349
+ 'Authorization': `Bearer ${creds.api_key}`,
1350
+ 'Content-Type': 'application/json',
1351
+ },
1352
+ body: JSON.stringify({ model, messages, max_tokens }),
1353
+ }, { provider: 'mistral', action: 'chat' });
1354
+
1355
+ const data = await response.json() as Record<string, unknown>;
1356
+
1357
+ if (!response.ok) {
1358
+ const err = data.message as string | undefined;
1359
+ return createErrorResult('mistral', 'chat', err || 'Chat failed', statusToErrorCode(response.status));
1360
+ }
1361
+
1362
+ const choices = data.choices as Array<Record<string, unknown>> | undefined;
1363
+ const message = choices?.[0]?.message as Record<string, unknown> | undefined;
1364
+
1365
+ return {
1366
+ success: true,
1367
+ provider: 'mistral',
1368
+ action: 'chat',
1369
+ data: {
1370
+ content: message?.content,
1371
+ model: data.model,
1372
+ usage: data.usage,
1373
+ },
1374
+ };
1375
+ },
1376
+
1377
+ embed: async (params, creds) => {
1378
+ const { input, model = 'mistral-embed' } = params;
1379
+
1380
+ if (!input) {
1381
+ return createErrorResult('mistral', 'embed', 'Missing required param: input (string or array)', ERROR_CODES.INVALID_PARAMS);
1382
+ }
1383
+
1384
+ const inputs = Array.isArray(input) ? input : [input];
1385
+
1386
+ const response = await fetchWithRetry('https://api.mistral.ai/v1/embeddings', {
1387
+ method: 'POST',
1388
+ headers: {
1389
+ 'Authorization': `Bearer ${creds.api_key}`,
1390
+ 'Content-Type': 'application/json',
1391
+ },
1392
+ body: JSON.stringify({ model, input: inputs }),
1393
+ }, { provider: 'mistral', action: 'embed' });
1394
+
1395
+ const data = await response.json() as Record<string, unknown>;
1396
+
1397
+ if (!response.ok) {
1398
+ return createErrorResult('mistral', 'embed', (data.message as string) || 'Embedding failed', statusToErrorCode(response.status));
1399
+ }
1400
+
1401
+ const embedData = data.data as Array<Record<string, unknown>> | undefined;
1402
+
1403
+ return {
1404
+ success: true,
1405
+ provider: 'mistral',
1406
+ action: 'embed',
1407
+ data: {
1408
+ embeddings: embedData?.map(d => d.embedding),
1409
+ model: data.model,
1410
+ usage: data.usage,
1411
+ },
1412
+ };
1413
+ },
1414
+ },
1415
+
1416
+ // Cohere - Enterprise NLP and embeddings
1417
+ cohere: {
1418
+ chat: async (params, creds) => {
1419
+ const { message, model = 'command-r', max_tokens = 1024, preamble } = params;
1420
+
1421
+ if (!message) {
1422
+ return createErrorResult('cohere', 'chat', 'Missing required param: message', ERROR_CODES.INVALID_PARAMS);
1423
+ }
1424
+
1425
+ const body: Record<string, unknown> = { model, message, max_tokens };
1426
+ if (preamble) body.preamble = preamble;
1427
+
1428
+ const response = await fetchWithRetry('https://api.cohere.com/v1/chat', {
1429
+ method: 'POST',
1430
+ headers: {
1431
+ 'Authorization': `Bearer ${creds.api_key}`,
1432
+ 'Content-Type': 'application/json',
1433
+ },
1434
+ body: JSON.stringify(body),
1435
+ }, { provider: 'cohere', action: 'chat' });
1436
+
1437
+ const data = await response.json() as Record<string, unknown>;
1438
+
1439
+ if (!response.ok) {
1440
+ return createErrorResult('cohere', 'chat', (data.message as string) || 'Chat failed', statusToErrorCode(response.status));
1441
+ }
1442
+
1443
+ return {
1444
+ success: true,
1445
+ provider: 'cohere',
1446
+ action: 'chat',
1447
+ data: {
1448
+ content: data.text,
1449
+ generation_id: data.generation_id,
1450
+ usage: data.meta,
1451
+ },
1452
+ };
1453
+ },
1454
+
1455
+ embed: async (params, creds) => {
1456
+ const { texts, model = 'embed-english-v3.0', input_type = 'search_document' } = params;
1457
+
1458
+ if (!texts || !Array.isArray(texts)) {
1459
+ return createErrorResult('cohere', 'embed', 'Missing required param: texts (array of strings)', ERROR_CODES.INVALID_PARAMS);
1460
+ }
1461
+
1462
+ const response = await fetchWithRetry('https://api.cohere.com/v1/embed', {
1463
+ method: 'POST',
1464
+ headers: {
1465
+ 'Authorization': `Bearer ${creds.api_key}`,
1466
+ 'Content-Type': 'application/json',
1467
+ },
1468
+ body: JSON.stringify({ model, texts, input_type }),
1469
+ }, { provider: 'cohere', action: 'embed' });
1470
+
1471
+ const data = await response.json() as Record<string, unknown>;
1472
+
1473
+ if (!response.ok) {
1474
+ return createErrorResult('cohere', 'embed', (data.message as string) || 'Embedding failed', statusToErrorCode(response.status));
1475
+ }
1476
+
1477
+ return {
1478
+ success: true,
1479
+ provider: 'cohere',
1480
+ action: 'embed',
1481
+ data: {
1482
+ embeddings: data.embeddings,
1483
+ model: data.model,
1484
+ },
1485
+ };
1486
+ },
1487
+ },
1488
+
1489
+ // Together AI - Open-source model inference
1490
+ together_ai: {
1491
+ chat: async (params, creds) => {
1492
+ const { messages, model = 'meta-llama/Llama-3-8b-chat-hf', max_tokens = 1024 } = params;
1493
+
1494
+ if (!messages || !Array.isArray(messages)) {
1495
+ return createErrorResult('together_ai', 'chat', 'Missing required param: messages (array)', ERROR_CODES.INVALID_PARAMS);
1496
+ }
1497
+
1498
+ const response = await fetchWithRetry('https://api.together.xyz/v1/chat/completions', {
1499
+ method: 'POST',
1500
+ headers: {
1501
+ 'Authorization': `Bearer ${creds.api_key}`,
1502
+ 'Content-Type': 'application/json',
1503
+ },
1504
+ body: JSON.stringify({ model, messages, max_tokens }),
1505
+ }, { provider: 'together_ai', action: 'chat' });
1506
+
1507
+ const data = await response.json() as Record<string, unknown>;
1508
+
1509
+ if (!response.ok) {
1510
+ const err = data.error as Record<string, unknown> | undefined;
1511
+ return createErrorResult('together_ai', 'chat', (err?.message as string) || 'Chat failed', statusToErrorCode(response.status));
1512
+ }
1513
+
1514
+ const choices = data.choices as Array<Record<string, unknown>> | undefined;
1515
+ const message = choices?.[0]?.message as Record<string, unknown> | undefined;
1516
+
1517
+ return {
1518
+ success: true,
1519
+ provider: 'together_ai',
1520
+ action: 'chat',
1521
+ data: {
1522
+ content: message?.content,
1523
+ model: data.model,
1524
+ usage: data.usage,
1525
+ },
1526
+ };
1527
+ },
1528
+ },
1529
+
1530
+ // Stability AI - Image generation
1531
+ stability_ai: {
1532
+ generate_image: async (params, creds) => {
1533
+ const { prompt, model = 'stable-diffusion-xl-1024-v1-0', width = 1024, height = 1024, steps = 30 } = params;
1534
+
1535
+ if (!prompt) {
1536
+ return createErrorResult('stability_ai', 'generate_image', 'Missing required param: prompt', ERROR_CODES.INVALID_PARAMS);
1537
+ }
1538
+
1539
+ const response = await fetchWithRetry(`https://api.stability.ai/v1/generation/${model}/text-to-image`, {
1540
+ method: 'POST',
1541
+ headers: {
1542
+ 'Authorization': `Bearer ${creds.api_key}`,
1543
+ 'Content-Type': 'application/json',
1544
+ 'Accept': 'application/json',
1545
+ },
1546
+ body: JSON.stringify({
1547
+ text_prompts: [{ text: prompt, weight: 1 }],
1548
+ width,
1549
+ height,
1550
+ steps,
1551
+ samples: 1,
1552
+ }),
1553
+ }, { provider: 'stability_ai', action: 'generate_image' });
1554
+
1555
+ const data = await response.json() as Record<string, unknown>;
1556
+
1557
+ if (!response.ok) {
1558
+ return createErrorResult('stability_ai', 'generate_image', (data.message as string) || 'Image generation failed', statusToErrorCode(response.status));
1559
+ }
1560
+
1561
+ const artifacts = data.artifacts as Array<Record<string, unknown>> | undefined;
1562
+ const image = artifacts?.[0];
1563
+
1564
+ return {
1565
+ success: true,
1566
+ provider: 'stability_ai',
1567
+ action: 'generate_image',
1568
+ data: {
1569
+ image_base64: image?.base64,
1570
+ finish_reason: image?.finishReason,
1571
+ seed: image?.seed,
1572
+ },
1573
+ };
1574
+ },
1575
+ },
1576
+
1577
+ // AssemblyAI - Audio transcription and intelligence
1578
+ assemblyai: {
1579
+ transcribe: async (params, creds) => {
1580
+ const { audio_url, language_code = 'en', speaker_labels = false, sentiment_analysis = false } = params;
1581
+
1582
+ if (!audio_url) {
1583
+ return createErrorResult('assemblyai', 'transcribe', 'Missing required param: audio_url', ERROR_CODES.INVALID_PARAMS);
1584
+ }
1585
+
1586
+ // Submit transcription job
1587
+ const submitResponse = await fetchWithRetry('https://api.assemblyai.com/v2/transcript', {
1588
+ method: 'POST',
1589
+ headers: {
1590
+ 'Authorization': creds.api_key,
1591
+ 'Content-Type': 'application/json',
1592
+ },
1593
+ body: JSON.stringify({ audio_url, language_code, speaker_labels, sentiment_analysis }),
1594
+ }, { provider: 'assemblyai', action: 'transcribe' });
1595
+
1596
+ const submitData = await submitResponse.json() as Record<string, unknown>;
1597
+
1598
+ if (!submitResponse.ok) {
1599
+ return createErrorResult('assemblyai', 'transcribe', (submitData.error as string) || 'Submit failed', statusToErrorCode(submitResponse.status));
1600
+ }
1601
+
1602
+ const transcriptId = submitData.id as string;
1603
+
1604
+ // Poll until complete (max 120 seconds)
1605
+ const startTime = Date.now();
1606
+ while (Date.now() - startTime < 120000) {
1607
+ await sleep(3000);
1608
+
1609
+ const pollResponse = await fetchWithRetry(`https://api.assemblyai.com/v2/transcript/${transcriptId}`, {
1610
+ headers: { 'Authorization': creds.api_key },
1611
+ }, { provider: 'assemblyai', action: 'transcribe_poll' });
1612
+
1613
+ const pollData = await pollResponse.json() as Record<string, unknown>;
1614
+
1615
+ if (pollData.status === 'completed') {
1616
+ return {
1617
+ success: true,
1618
+ provider: 'assemblyai',
1619
+ action: 'transcribe',
1620
+ data: {
1621
+ transcript: pollData.text,
1622
+ words: pollData.words,
1623
+ utterances: pollData.utterances,
1624
+ sentiment_analysis_results: pollData.sentiment_analysis_results,
1625
+ audio_duration: pollData.audio_duration,
1626
+ },
1627
+ };
1628
+ }
1629
+
1630
+ if (pollData.status === 'error') {
1631
+ return createErrorResult('assemblyai', 'transcribe', (pollData.error as string) || 'Transcription failed', ERROR_CODES.PROVIDER_ERROR);
1632
+ }
1633
+ }
1634
+
1635
+ return {
1636
+ success: true,
1637
+ provider: 'assemblyai',
1638
+ action: 'transcribe',
1639
+ data: {
1640
+ status: 'processing',
1641
+ transcript_id: transcriptId,
1642
+ message: 'Transcription still processing. Use transcript_id to poll manually.',
1643
+ },
1644
+ };
1645
+ },
1646
+ },
1207
1647
  };
1208
1648
 
1209
1649
  // Get available actions for a provider (static handlers only)
package/src/index.ts CHANGED
@@ -1441,23 +1441,23 @@ Docs: https://apiclaw.nordsym.com
1441
1441
 
1442
1442
  try {
1443
1443
  // Check if workspace already exists
1444
- const existing = await convex.query("workspaces:getByEmail" as any, { email }) as { _id: string; status: string; tier: string; usageCount: number; usageLimit: number } | null;
1445
-
1444
+ const existing = await convex.query("workspaces:getByEmail" as any, { email }) as { id: string; status: string; tier: string; usageCount: number; usageLimit: number } | null;
1445
+
1446
1446
  if (existing && existing.status === 'active') {
1447
1447
  // Workspace exists and is active - create session directly
1448
1448
  const fingerprint = getMachineFingerprint();
1449
1449
  const sessionResult = await convex.mutation("workspaces:createAgentSession" as any, {
1450
- workspaceId: existing._id,
1450
+ workspaceId: existing.id,
1451
1451
  fingerprint,
1452
1452
  }) as { success: boolean; sessionToken?: string };
1453
-
1453
+
1454
1454
  if (sessionResult.success) {
1455
- writeSession(sessionResult.sessionToken!, existing._id, email);
1455
+ writeSession(sessionResult.sessionToken!, existing.id, email);
1456
1456
 
1457
1457
  // Update global context
1458
1458
  workspaceContext = {
1459
1459
  sessionToken: sessionResult.sessionToken!,
1460
- workspaceId: existing._id,
1460
+ workspaceId: existing.id,
1461
1461
  email,
1462
1462
  tier: existing.tier,
1463
1463
  usageRemaining: existing.usageLimit - existing.usageCount,
package/src/proxy.ts CHANGED
@@ -21,4 +21,4 @@ export async function callProxy(provider: string, params: any): Promise<any> {
21
21
  return response.json();
22
22
  }
23
23
 
24
- export const PROXY_PROVIDERS = ["openrouter", "brave_search", "resend", "elevenlabs", "46elks", "twilio", "replicate", "firecrawl", "e2b"];
24
+ export const PROXY_PROVIDERS = ["openrouter", "brave_search", "resend", "elevenlabs", "46elks", "twilio", "replicate", "firecrawl", "e2b", "groq", "deepgram", "serper", "mistral", "cohere", "together_ai", "stability_ai", "assemblyai"];