@discomedia/utils 1.0.25 → 1.0.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/index-frontend.cjs +42 -12
  2. package/dist/index-frontend.cjs.map +1 -1
  3. package/dist/index-frontend.mjs +42 -13
  4. package/dist/index-frontend.mjs.map +1 -1
  5. package/dist/index.cjs +171 -22
  6. package/dist/index.cjs.map +1 -1
  7. package/dist/index.mjs +171 -23
  8. package/dist/index.mjs.map +1 -1
  9. package/dist/package.json +2 -2
  10. package/dist/test.js +250 -63
  11. package/dist/test.js.map +1 -1
  12. package/dist/types/index-frontend.d.ts +1 -1
  13. package/dist/types/index.d.ts +3 -1
  14. package/dist/types/index.d.ts.map +1 -1
  15. package/dist/types/json-tools.d.ts.map +1 -1
  16. package/dist/types/llm-deepseek.d.ts +1 -1
  17. package/dist/types/llm-deepseek.d.ts.map +1 -1
  18. package/dist/types/llm-images.d.ts.map +1 -1
  19. package/dist/types/llm-openai.d.ts +2 -2
  20. package/dist/types/llm-openai.d.ts.map +1 -1
  21. package/dist/types/llm-openrouter.d.ts +28 -0
  22. package/dist/types/llm-openrouter.d.ts.map +1 -0
  23. package/dist/types/misc-utils.d.ts.map +1 -1
  24. package/dist/types/types/llm-types.d.ts +26 -3
  25. package/dist/types/types/llm-types.d.ts.map +1 -1
  26. package/dist/types/types/logging-types.d.ts +1 -1
  27. package/dist/types/types/logging-types.d.ts.map +1 -1
  28. package/dist/types-frontend/index-frontend.d.ts +1 -1
  29. package/dist/types-frontend/index.d.ts +3 -1
  30. package/dist/types-frontend/index.d.ts.map +1 -1
  31. package/dist/types-frontend/json-tools.d.ts.map +1 -1
  32. package/dist/types-frontend/llm-deepseek.d.ts +1 -1
  33. package/dist/types-frontend/llm-deepseek.d.ts.map +1 -1
  34. package/dist/types-frontend/llm-images.d.ts.map +1 -1
  35. package/dist/types-frontend/llm-openai.d.ts +2 -2
  36. package/dist/types-frontend/llm-openai.d.ts.map +1 -1
  37. package/dist/types-frontend/llm-openrouter.d.ts +28 -0
  38. package/dist/types-frontend/llm-openrouter.d.ts.map +1 -0
  39. package/dist/types-frontend/misc-utils.d.ts.map +1 -1
  40. package/dist/types-frontend/types/llm-types.d.ts +26 -3
  41. package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
  42. package/dist/types-frontend/types/logging-types.d.ts +1 -1
  43. package/dist/types-frontend/types/logging-types.d.ts.map +1 -1
  44. package/package.json +2 -2
package/dist/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "publishConfig": {
4
4
  "access": "public"
5
5
  },
6
- "version": "1.0.25",
6
+ "version": "1.0.26",
7
7
  "author": "Disco Media",
8
8
  "description": "Utility functions used in Disco Media apps",
9
9
  "always-build-npm": true,
@@ -33,7 +33,7 @@
33
33
  },
34
34
  "dependencies": {
35
35
  "dotenv": "^17.2.1",
36
- "openai": "^5.12.1",
36
+ "openai": "^5.12.2",
37
37
  "p-limit": "^6.2.0",
38
38
  "tslib": "^2.8.1",
39
39
  "ws": "^8.18.3"
package/dist/test.js CHANGED
@@ -1019,8 +1019,28 @@ function dateTimeForGS(date) {
1019
1019
  .replace(/\./g, '/');
1020
1020
  }
1021
1021
 
1022
+ /**
1023
+ * Type guard to check if a model is an OpenRouter model
1024
+ */
1025
+ function isOpenRouterModel(model) {
1026
+ const openRouterModels = [
1027
+ 'openai/gpt-5',
1028
+ 'openai/gpt-5-mini',
1029
+ 'openai/gpt-5-nano',
1030
+ 'openai/gpt-oss-120b',
1031
+ 'z.ai/glm-4.5',
1032
+ 'z.ai/glm-4.5-air',
1033
+ 'google/gemini-2.5-flash',
1034
+ 'google/gemini-2.5-flash-lite',
1035
+ 'deepseek/deepseek-r1-0528',
1036
+ 'deepseek/deepseek-chat-v3-0324',
1037
+ ];
1038
+ return openRouterModels.includes(model);
1039
+ }
1040
+
1022
1041
  var Types = /*#__PURE__*/Object.freeze({
1023
- __proto__: null
1042
+ __proto__: null,
1043
+ isOpenRouterModel: isOpenRouterModel
1024
1044
  });
1025
1045
 
1026
1046
  // Utility function for debug logging
@@ -1112,29 +1132,31 @@ function hideApiKeyFromurl(url) {
1112
1132
  * @returns Structured error details.
1113
1133
  */
1114
1134
  function extractErrorDetails(error, response) {
1115
- if (error.name === 'TypeError' && error.message.includes('fetch')) {
1135
+ const errMsg = error instanceof Error ? error.message : String(error);
1136
+ const errName = error instanceof Error ? error.name : 'Error';
1137
+ if (errName === 'TypeError' && errMsg.includes('fetch')) {
1116
1138
  return { type: 'NETWORK_ERROR', reason: 'Network connectivity issue', status: null };
1117
1139
  }
1118
- if (error.message.includes('HTTP error: 429')) {
1119
- const match = error.message.match(/RATE_LIMIT: 429:(\d+)/);
1140
+ if (errMsg.includes('HTTP error: 429')) {
1141
+ const match = errMsg.match(/RATE_LIMIT: 429:(\d+)/);
1120
1142
  const retryAfter = match ? parseInt(match[1]) : undefined;
1121
1143
  return { type: 'RATE_LIMIT', reason: 'Rate limit exceeded', status: 429, retryAfter };
1122
1144
  }
1123
- if (error.message.includes('HTTP error: 401') || error.message.includes('AUTH_ERROR: 401')) {
1145
+ if (errMsg.includes('HTTP error: 401') || errMsg.includes('AUTH_ERROR: 401')) {
1124
1146
  return { type: 'AUTH_ERROR', reason: 'Authentication failed - invalid API key', status: 401 };
1125
1147
  }
1126
- if (error.message.includes('HTTP error: 403') || error.message.includes('AUTH_ERROR: 403')) {
1148
+ if (errMsg.includes('HTTP error: 403') || errMsg.includes('AUTH_ERROR: 403')) {
1127
1149
  return { type: 'AUTH_ERROR', reason: 'Access forbidden - insufficient permissions', status: 403 };
1128
1150
  }
1129
- if (error.message.includes('SERVER_ERROR:')) {
1130
- const status = parseInt(error.message.split('SERVER_ERROR: ')[1]) || 500;
1151
+ if (errMsg.includes('SERVER_ERROR:')) {
1152
+ const status = parseInt(errMsg.split('SERVER_ERROR: ')[1]) || 500;
1131
1153
  return { type: 'SERVER_ERROR', reason: `Server error (${status})`, status };
1132
1154
  }
1133
- if (error.message.includes('CLIENT_ERROR:')) {
1134
- const status = parseInt(error.message.split('CLIENT_ERROR: ')[1]) || 400;
1155
+ if (errMsg.includes('CLIENT_ERROR:')) {
1156
+ const status = parseInt(errMsg.split('CLIENT_ERROR: ')[1]) || 400;
1135
1157
  return { type: 'CLIENT_ERROR', reason: `Client error (${status})`, status };
1136
1158
  }
1137
- return { type: 'UNKNOWN', reason: error.message || 'Unknown error', status: null };
1159
+ return { type: 'UNKNOWN', reason: errMsg || 'Unknown error', status: null };
1138
1160
  }
1139
1161
  /**
1140
1162
  * Fetches a resource with intelligent retry logic for handling transient errors.
@@ -2368,7 +2390,7 @@ const safeJSON = (text) => {
2368
2390
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2369
2391
  const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
2370
2392
 
2371
- const VERSION = '5.12.1'; // x-release-please-version
2393
+ const VERSION = '5.12.2'; // x-release-please-version
2372
2394
 
2373
2395
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2374
2396
  const isRunningInBrowser = () => {
@@ -9163,7 +9185,8 @@ function fixBrokenJson(jsonStr) {
9163
9185
  return parse();
9164
9186
  }
9165
9187
  catch (error) {
9166
- console.error(`Error parsing JSON at position ${index}: ${error.message}`);
9188
+ const msg = error instanceof Error ? error.message : String(error);
9189
+ console.error(`Error parsing JSON at position ${index}: ${msg}`);
9167
9190
  return null;
9168
9191
  }
9169
9192
  }
@@ -9707,7 +9730,13 @@ async function makeImagesCall(prompt, options = {}) {
9707
9730
  const enhancedResponse = {
9708
9731
  ...response,
9709
9732
  usage: {
9710
- ...response.usage,
9733
+ // OpenAI Images response may not include usage details per image; preserve if present
9734
+ ...(response.usage ?? {
9735
+ input_tokens: 0,
9736
+ input_tokens_details: { image_tokens: 0, text_tokens: 0 },
9737
+ output_tokens: 0,
9738
+ total_tokens: 0,
9739
+ }),
9711
9740
  provider: 'openai',
9712
9741
  model: 'gpt-image-1',
9713
9742
  cost,
@@ -9716,7 +9745,8 @@ async function makeImagesCall(prompt, options = {}) {
9716
9745
  return enhancedResponse;
9717
9746
  }
9718
9747
  catch (error) {
9719
- throw new Error(`OpenAI Images API call failed: ${error.message}`);
9748
+ const message = error instanceof Error ? error.message : 'Unknown error';
9749
+ throw new Error(`OpenAI Images API call failed: ${message}`);
9720
9750
  }
9721
9751
  }
9722
9752
 
@@ -9941,14 +9971,15 @@ const makeDeepseekCall = async (content, responseFormat = 'json', options = {})
9941
9971
  const completion = await createDeepseekCompletion(content, responseFormat, mergedOptions);
9942
9972
  // Handle tool calls similarly to OpenAI
9943
9973
  if (completion.tool_calls && completion.tool_calls.length > 0) {
9974
+ const fnCalls = completion.tool_calls
9975
+ .filter((tc) => tc.type === 'function')
9976
+ .map((tc) => ({
9977
+ id: tc.id,
9978
+ name: tc.function.name,
9979
+ arguments: JSON.parse(tc.function.arguments),
9980
+ }));
9944
9981
  return {
9945
- response: {
9946
- tool_calls: completion.tool_calls.map((tc) => ({
9947
- id: tc.id,
9948
- name: tc.function.name,
9949
- arguments: JSON.parse(tc.function.arguments),
9950
- })),
9951
- },
9982
+ response: { tool_calls: fnCalls },
9952
9983
  usage: {
9953
9984
  prompt_tokens: completion.usage.prompt_tokens,
9954
9985
  completion_tokens: completion.usage.completion_tokens,
@@ -10003,6 +10034,122 @@ const makeDeepseekCall = async (content, responseFormat = 'json', options = {})
10003
10034
  }
10004
10035
  };
10005
10036
 
10037
+ // llm-openrouter.ts
10038
+ // Map our ContextMessage to OpenAI chat message
10039
+ function mapContextToMessages(context) {
10040
+ return context.map((msg) => {
10041
+ const role = msg.role === 'developer' ? 'system' : msg.role;
10042
+ return { role, content: msg.content };
10043
+ });
10044
+ }
10045
+ function toOpenRouterModel(model) {
10046
+ if (model && model.includes('/'))
10047
+ return model;
10048
+ const base = normalizeModelName(model || DEFAULT_MODEL);
10049
+ return `openai/${base}`;
10050
+ }
10051
+ // Normalize model name for pricing
10052
+ function normalizeModelForPricing(model) {
10053
+ if (!model)
10054
+ return { provider: 'openai', coreModel: normalizeModelName(DEFAULT_MODEL) };
10055
+ const [maybeProvider, maybeModel] = model.includes('/') ? model.split('/') : ['openai', model];
10056
+ const provider = (maybeProvider === 'deepseek' ? 'deepseek' : 'openai');
10057
+ const coreModel = normalizeModelName(maybeModel || model);
10058
+ return { provider, coreModel };
10059
+ }
10060
+ /**
10061
+ * Make a call through OpenRouter using the OpenAI Chat Completions-compatible API.
10062
+ * Supports: JSON mode, model selection, message history, and tools.
10063
+ */
10064
+ async function makeOpenRouterCall(input, options = {}) {
10065
+ const { apiKey = process.env.OPENROUTER_API_KEY, model, responseFormat = 'text', tools, toolChoice, context, developerPrompt, temperature = 0.2, max_tokens, top_p, frequency_penalty, presence_penalty, stop, seed, referer = process.env.OPENROUTER_SITE_URL, title = process.env.OPENROUTER_SITE_NAME, } = options;
10066
+ if (!apiKey) {
10067
+ throw new Error('OpenRouter API key is not provided and OPENROUTER_API_KEY is not set');
10068
+ }
10069
+ const client = new OpenAI({
10070
+ apiKey,
10071
+ baseURL: 'https://openrouter.ai/api/v1',
10072
+ defaultHeaders: {
10073
+ ...(referer ? { 'HTTP-Referer': referer } : {}),
10074
+ ...(title ? { 'X-Title': title } : {}),
10075
+ },
10076
+ });
10077
+ const messages = [];
10078
+ if (developerPrompt && developerPrompt.trim()) {
10079
+ messages.push({ role: 'system', content: developerPrompt });
10080
+ }
10081
+ if (context && context.length > 0) {
10082
+ messages.push(...mapContextToMessages(context));
10083
+ }
10084
+ messages.push({ role: 'user', content: input });
10085
+ // Configure response_format
10086
+ let response_format;
10087
+ let parsingFormat = 'text';
10088
+ if (responseFormat === 'json') {
10089
+ response_format = { type: 'json_object' };
10090
+ parsingFormat = 'json';
10091
+ }
10092
+ else if (typeof responseFormat === 'object') {
10093
+ response_format = { type: 'json_object' };
10094
+ parsingFormat = responseFormat;
10095
+ }
10096
+ const modelId = toOpenRouterModel(model);
10097
+ const completion = await client.chat.completions.create({
10098
+ model: modelId,
10099
+ messages,
10100
+ response_format,
10101
+ tools,
10102
+ tool_choice: toolChoice,
10103
+ temperature,
10104
+ max_tokens,
10105
+ top_p,
10106
+ frequency_penalty,
10107
+ presence_penalty,
10108
+ stop,
10109
+ seed,
10110
+ });
10111
+ const choice = completion.choices && completion.choices.length > 0 ? completion.choices[0] : undefined;
10112
+ const message = (choice && 'message' in choice ? choice.message : undefined);
10113
+ const { provider: pricingProvider, coreModel } = normalizeModelForPricing(modelId);
10114
+ const promptTokens = completion.usage?.prompt_tokens ?? 0;
10115
+ const completionTokens = completion.usage?.completion_tokens ?? 0;
10116
+ const cost = calculateCost(pricingProvider, coreModel, promptTokens, completionTokens);
10117
+ // Tool calls branch: return empty string response and expose tool_calls on LLMResponse
10118
+ const hasToolCalls = Array.isArray(message?.tool_calls) && message.tool_calls.length > 0;
10119
+ if (hasToolCalls) {
10120
+ const usageModel = isOpenRouterModel(modelId) ? modelId : DEFAULT_MODEL;
10121
+ return {
10122
+ response: '',
10123
+ usage: {
10124
+ prompt_tokens: promptTokens,
10125
+ completion_tokens: completionTokens,
10126
+ provider: 'openrouter',
10127
+ model: usageModel,
10128
+ cost,
10129
+ },
10130
+ tool_calls: message.tool_calls,
10131
+ };
10132
+ }
10133
+ const rawText = typeof message?.content === 'string' ? message.content : '';
10134
+ const parsed = await parseResponse(rawText, parsingFormat);
10135
+ if (parsed === null) {
10136
+ throw new Error('Failed to parse OpenRouter response');
10137
+ }
10138
+ // Ensure the model value conforms to LLMModel; otherwise fall back to DEFAULT_MODEL
10139
+ const usageModel = isOpenRouterModel(modelId) ? modelId : DEFAULT_MODEL;
10140
+ return {
10141
+ response: parsed,
10142
+ usage: {
10143
+ prompt_tokens: promptTokens,
10144
+ completion_tokens: completionTokens,
10145
+ provider: 'openrouter',
10146
+ model: usageModel,
10147
+ cost,
10148
+ },
10149
+ ...(hasToolCalls ? { tool_calls: message.tool_calls } : {}),
10150
+ };
10151
+ }
10152
+
10006
10153
  /**
10007
10154
  * A class to measure performance of code execution.
10008
10155
  *
@@ -18312,6 +18459,7 @@ const disco = {
18312
18459
  call: makeLLMCall,
18313
18460
  seek: makeDeepseekCall,
18314
18461
  images: makeImagesCall,
18462
+ open: makeOpenRouterCall,
18315
18463
  },
18316
18464
  polygon: {
18317
18465
  fetchTickerInfo: fetchTickerInfo,
@@ -18358,46 +18506,6 @@ const disco = {
18358
18506
  };
18359
18507
 
18360
18508
  // Test file for context functionality
18361
- async function testLLM() {
18362
- //const models: LLMModel[] = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
18363
- const models = ['gpt-5-nano'];
18364
- for (const model of models) {
18365
- console.log(`\nTesting model: ${model}`);
18366
- // // 1. Basic call
18367
- // try {
18368
- // const basic = await disco.llm.call('What is the capital of France?', { model });
18369
- // if (!basic || !basic.response) {
18370
- // throw new Error('No response from LLM');
18371
- // }
18372
- // console.log(`Response: ${basic.response}`);
18373
- // } catch (e) {
18374
- // console.error(` Basic call error:`, e);
18375
- // }
18376
- // // 2. JSON call
18377
- // try {
18378
- // const jsonPrompt = 'Return a JSON object with keys country and capital for France.';
18379
- // const json = await disco.llm.call(jsonPrompt, { model, responseFormat: 'json' });
18380
- // if (!json || !json.response) {
18381
- // throw new Error('No response from LLM');
18382
- // }
18383
- // console.log
18384
- // } catch (e) {
18385
- // console.error(` JSON call error:`, e);
18386
- // }
18387
- // 3. Web search
18388
- try {
18389
- const searchPrompt = 'What is the latest news about artificial intelligence? Respond with 3 sentences max.';
18390
- const tool = await disco.llm.call(searchPrompt, { model, useWebSearch: true });
18391
- if (!tool || !tool.response) {
18392
- throw new Error('No response from LLM');
18393
- }
18394
- console.log(`Response: ${tool.response}`);
18395
- }
18396
- catch (e) {
18397
- console.error(` Web search error:`, e);
18398
- }
18399
- }
18400
- }
18401
18509
  // testGetTradingDate();
18402
18510
  // testGetTradingStartAndEndDates();
18403
18511
  // testGetLastFullTradingDate();
@@ -18406,5 +18514,84 @@ async function testLLM() {
18406
18514
  // testGetNextMarketDay();
18407
18515
  // testCountTradingDays();
18408
18516
  // testGetPreviousMarketDay();
18409
- testLLM();
18517
+ // OpenRouter tests (requires OPENROUTER_API_KEY)
18518
+ async function testOpenRouter() {
18519
+ if (!process.env.OPENROUTER_API_KEY) {
18520
+ console.log('Skipping OpenRouter tests: OPENROUTER_API_KEY not set');
18521
+ return;
18522
+ }
18523
+ const models = [
18524
+ 'openai/gpt-5-nano', // inexpensive, tool-capable
18525
+ 'google/gemini-2.5-flash-lite',
18526
+ 'deepseek/deepseek-chat-v3-0324',
18527
+ ];
18528
+ for (const model of models) {
18529
+ console.log(`\n--- Testing OpenRouter model: ${model} ---`);
18530
+ // 1) JSON mode
18531
+ try {
18532
+ const prompt = 'Return a JSON object with keys country and capital for France.';
18533
+ const jsonRes = await disco.llm.open(prompt, { model, responseFormat: 'json' });
18534
+ console.log('\n[OpenRouter] JSON mode test');
18535
+ console.log(' Response:', jsonRes.response);
18536
+ console.log(' Usage:', jsonRes.usage);
18537
+ }
18538
+ catch (e) {
18539
+ console.error(' OpenRouter JSON mode error:', e);
18540
+ }
18541
+ // 2) Model history (context)
18542
+ try {
18543
+ const context = [
18544
+ { role: 'user', content: 'What is the capital of Germany?' },
18545
+ { role: 'assistant', content: 'Berlin.' },
18546
+ ];
18547
+ const followup = 'Remind me what you said earlier in one word.';
18548
+ const histRes = await disco.llm.open(followup, { model, context });
18549
+ console.log('\n[OpenRouter] History test');
18550
+ console.log(' Response:', histRes.response);
18551
+ }
18552
+ catch (e) {
18553
+ console.error(' OpenRouter history error:', e);
18554
+ }
18555
+ // 3) Tool calling
18556
+ try {
18557
+ const tools = [
18558
+ {
18559
+ type: 'function',
18560
+ function: {
18561
+ name: 'add',
18562
+ description: 'Add two integers and return the sum',
18563
+ parameters: {
18564
+ type: 'object',
18565
+ properties: {
18566
+ a: { type: 'integer' },
18567
+ b: { type: 'integer' },
18568
+ },
18569
+ required: ['a', 'b'],
18570
+ },
18571
+ },
18572
+ },
18573
+ ];
18574
+ const toolPrompt = 'Please add 2 and 3. If tools are available, call the add function.';
18575
+ const toolRes = await disco.llm.open(toolPrompt, { model, tools, toolChoice: 'auto' });
18576
+ console.log('\n[OpenRouter] Tools test');
18577
+ if (toolRes.tool_calls && toolRes.tool_calls.length > 0) {
18578
+ const summarized = toolRes.tool_calls
18579
+ .filter((tc) => tc.type === 'function')
18580
+ .map((tc) => ({
18581
+ id: tc.id,
18582
+ name: tc.function.name,
18583
+ arguments: tc.function.arguments,
18584
+ }));
18585
+ console.log(' Tool calls:', summarized);
18586
+ }
18587
+ else {
18588
+ console.log(' No tool calls. Model replied:', toolRes.response);
18589
+ }
18590
+ }
18591
+ catch (e) {
18592
+ console.error(' OpenRouter tools error:', e);
18593
+ }
18594
+ }
18595
+ }
18596
+ testOpenRouter();
18410
18597
  //# sourceMappingURL=test.js.map