@discomedia/utils 1.0.25 → 1.0.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/index-frontend.cjs +42 -12
  2. package/dist/index-frontend.cjs.map +1 -1
  3. package/dist/index-frontend.mjs +42 -13
  4. package/dist/index-frontend.mjs.map +1 -1
  5. package/dist/index.cjs +171 -22
  6. package/dist/index.cjs.map +1 -1
  7. package/dist/index.mjs +171 -23
  8. package/dist/index.mjs.map +1 -1
  9. package/dist/package.json +2 -2
  10. package/dist/test.js +250 -63
  11. package/dist/test.js.map +1 -1
  12. package/dist/types/index-frontend.d.ts +1 -1
  13. package/dist/types/index.d.ts +3 -1
  14. package/dist/types/index.d.ts.map +1 -1
  15. package/dist/types/json-tools.d.ts.map +1 -1
  16. package/dist/types/llm-deepseek.d.ts +1 -1
  17. package/dist/types/llm-deepseek.d.ts.map +1 -1
  18. package/dist/types/llm-images.d.ts.map +1 -1
  19. package/dist/types/llm-openai.d.ts +2 -2
  20. package/dist/types/llm-openai.d.ts.map +1 -1
  21. package/dist/types/llm-openrouter.d.ts +28 -0
  22. package/dist/types/llm-openrouter.d.ts.map +1 -0
  23. package/dist/types/misc-utils.d.ts.map +1 -1
  24. package/dist/types/types/llm-types.d.ts +26 -3
  25. package/dist/types/types/llm-types.d.ts.map +1 -1
  26. package/dist/types/types/logging-types.d.ts +1 -1
  27. package/dist/types/types/logging-types.d.ts.map +1 -1
  28. package/dist/types-frontend/index-frontend.d.ts +1 -1
  29. package/dist/types-frontend/index.d.ts +3 -1
  30. package/dist/types-frontend/index.d.ts.map +1 -1
  31. package/dist/types-frontend/json-tools.d.ts.map +1 -1
  32. package/dist/types-frontend/llm-deepseek.d.ts +1 -1
  33. package/dist/types-frontend/llm-deepseek.d.ts.map +1 -1
  34. package/dist/types-frontend/llm-images.d.ts.map +1 -1
  35. package/dist/types-frontend/llm-openai.d.ts +2 -2
  36. package/dist/types-frontend/llm-openai.d.ts.map +1 -1
  37. package/dist/types-frontend/llm-openrouter.d.ts +28 -0
  38. package/dist/types-frontend/llm-openrouter.d.ts.map +1 -0
  39. package/dist/types-frontend/misc-utils.d.ts.map +1 -1
  40. package/dist/types-frontend/types/llm-types.d.ts +26 -3
  41. package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
  42. package/dist/types-frontend/types/logging-types.d.ts +1 -1
  43. package/dist/types-frontend/types/logging-types.d.ts.map +1 -1
  44. package/package.json +2 -2
package/dist/index.cjs CHANGED
@@ -1021,8 +1021,28 @@ function dateTimeForGS(date) {
1021
1021
  .replace(/\./g, '/');
1022
1022
  }
1023
1023
 
1024
+ /**
1025
+ * Type guard to check if a model is an OpenRouter model
1026
+ */
1027
+ function isOpenRouterModel(model) {
1028
+ const openRouterModels = [
1029
+ 'openai/gpt-5',
1030
+ 'openai/gpt-5-mini',
1031
+ 'openai/gpt-5-nano',
1032
+ 'openai/gpt-oss-120b',
1033
+ 'z.ai/glm-4.5',
1034
+ 'z.ai/glm-4.5-air',
1035
+ 'google/gemini-2.5-flash',
1036
+ 'google/gemini-2.5-flash-lite',
1037
+ 'deepseek/deepseek-r1-0528',
1038
+ 'deepseek/deepseek-chat-v3-0324',
1039
+ ];
1040
+ return openRouterModels.includes(model);
1041
+ }
1042
+
1024
1043
  var Types = /*#__PURE__*/Object.freeze({
1025
- __proto__: null
1044
+ __proto__: null,
1045
+ isOpenRouterModel: isOpenRouterModel
1026
1046
  });
1027
1047
 
1028
1048
  // Utility function for debug logging
@@ -1114,29 +1134,31 @@ function hideApiKeyFromurl(url) {
1114
1134
  * @returns Structured error details.
1115
1135
  */
1116
1136
  function extractErrorDetails(error, response) {
1117
- if (error.name === 'TypeError' && error.message.includes('fetch')) {
1137
+ const errMsg = error instanceof Error ? error.message : String(error);
1138
+ const errName = error instanceof Error ? error.name : 'Error';
1139
+ if (errName === 'TypeError' && errMsg.includes('fetch')) {
1118
1140
  return { type: 'NETWORK_ERROR', reason: 'Network connectivity issue', status: null };
1119
1141
  }
1120
- if (error.message.includes('HTTP error: 429')) {
1121
- const match = error.message.match(/RATE_LIMIT: 429:(\d+)/);
1142
+ if (errMsg.includes('HTTP error: 429')) {
1143
+ const match = errMsg.match(/RATE_LIMIT: 429:(\d+)/);
1122
1144
  const retryAfter = match ? parseInt(match[1]) : undefined;
1123
1145
  return { type: 'RATE_LIMIT', reason: 'Rate limit exceeded', status: 429, retryAfter };
1124
1146
  }
1125
- if (error.message.includes('HTTP error: 401') || error.message.includes('AUTH_ERROR: 401')) {
1147
+ if (errMsg.includes('HTTP error: 401') || errMsg.includes('AUTH_ERROR: 401')) {
1126
1148
  return { type: 'AUTH_ERROR', reason: 'Authentication failed - invalid API key', status: 401 };
1127
1149
  }
1128
- if (error.message.includes('HTTP error: 403') || error.message.includes('AUTH_ERROR: 403')) {
1150
+ if (errMsg.includes('HTTP error: 403') || errMsg.includes('AUTH_ERROR: 403')) {
1129
1151
  return { type: 'AUTH_ERROR', reason: 'Access forbidden - insufficient permissions', status: 403 };
1130
1152
  }
1131
- if (error.message.includes('SERVER_ERROR:')) {
1132
- const status = parseInt(error.message.split('SERVER_ERROR: ')[1]) || 500;
1153
+ if (errMsg.includes('SERVER_ERROR:')) {
1154
+ const status = parseInt(errMsg.split('SERVER_ERROR: ')[1]) || 500;
1133
1155
  return { type: 'SERVER_ERROR', reason: `Server error (${status})`, status };
1134
1156
  }
1135
- if (error.message.includes('CLIENT_ERROR:')) {
1136
- const status = parseInt(error.message.split('CLIENT_ERROR: ')[1]) || 400;
1157
+ if (errMsg.includes('CLIENT_ERROR:')) {
1158
+ const status = parseInt(errMsg.split('CLIENT_ERROR: ')[1]) || 400;
1137
1159
  return { type: 'CLIENT_ERROR', reason: `Client error (${status})`, status };
1138
1160
  }
1139
- return { type: 'UNKNOWN', reason: error.message || 'Unknown error', status: null };
1161
+ return { type: 'UNKNOWN', reason: errMsg || 'Unknown error', status: null };
1140
1162
  }
1141
1163
  /**
1142
1164
  * Fetches a resource with intelligent retry logic for handling transient errors.
@@ -2370,7 +2392,7 @@ const safeJSON = (text) => {
2370
2392
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2371
2393
  const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
2372
2394
 
2373
- const VERSION = '5.12.1'; // x-release-please-version
2395
+ const VERSION = '5.12.2'; // x-release-please-version
2374
2396
 
2375
2397
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2376
2398
  const isRunningInBrowser = () => {
@@ -9165,7 +9187,8 @@ function fixBrokenJson(jsonStr) {
9165
9187
  return parse();
9166
9188
  }
9167
9189
  catch (error) {
9168
- console.error(`Error parsing JSON at position ${index}: ${error.message}`);
9190
+ const msg = error instanceof Error ? error.message : String(error);
9191
+ console.error(`Error parsing JSON at position ${index}: ${msg}`);
9169
9192
  return null;
9170
9193
  }
9171
9194
  }
@@ -9709,7 +9732,13 @@ async function makeImagesCall(prompt, options = {}) {
9709
9732
  const enhancedResponse = {
9710
9733
  ...response,
9711
9734
  usage: {
9712
- ...response.usage,
9735
+ // OpenAI Images response may not include usage details per image; preserve if present
9736
+ ...(response.usage ?? {
9737
+ input_tokens: 0,
9738
+ input_tokens_details: { image_tokens: 0, text_tokens: 0 },
9739
+ output_tokens: 0,
9740
+ total_tokens: 0,
9741
+ }),
9713
9742
  provider: 'openai',
9714
9743
  model: 'gpt-image-1',
9715
9744
  cost,
@@ -9718,7 +9747,8 @@ async function makeImagesCall(prompt, options = {}) {
9718
9747
  return enhancedResponse;
9719
9748
  }
9720
9749
  catch (error) {
9721
- throw new Error(`OpenAI Images API call failed: ${error.message}`);
9750
+ const message = error instanceof Error ? error.message : 'Unknown error';
9751
+ throw new Error(`OpenAI Images API call failed: ${message}`);
9722
9752
  }
9723
9753
  }
9724
9754
 
@@ -9943,14 +9973,15 @@ const makeDeepseekCall = async (content, responseFormat = 'json', options = {})
9943
9973
  const completion = await createDeepseekCompletion(content, responseFormat, mergedOptions);
9944
9974
  // Handle tool calls similarly to OpenAI
9945
9975
  if (completion.tool_calls && completion.tool_calls.length > 0) {
9976
+ const fnCalls = completion.tool_calls
9977
+ .filter((tc) => tc.type === 'function')
9978
+ .map((tc) => ({
9979
+ id: tc.id,
9980
+ name: tc.function.name,
9981
+ arguments: JSON.parse(tc.function.arguments),
9982
+ }));
9946
9983
  return {
9947
- response: {
9948
- tool_calls: completion.tool_calls.map((tc) => ({
9949
- id: tc.id,
9950
- name: tc.function.name,
9951
- arguments: JSON.parse(tc.function.arguments),
9952
- })),
9953
- },
9984
+ response: { tool_calls: fnCalls },
9954
9985
  usage: {
9955
9986
  prompt_tokens: completion.usage.prompt_tokens,
9956
9987
  completion_tokens: completion.usage.completion_tokens,
@@ -10005,6 +10036,122 @@ const makeDeepseekCall = async (content, responseFormat = 'json', options = {})
10005
10036
  }
10006
10037
  };
10007
10038
 
10039
+ // llm-openrouter.ts
10040
+ // Map our ContextMessage to OpenAI chat message
10041
+ function mapContextToMessages(context) {
10042
+ return context.map((msg) => {
10043
+ const role = msg.role === 'developer' ? 'system' : msg.role;
10044
+ return { role, content: msg.content };
10045
+ });
10046
+ }
10047
+ function toOpenRouterModel(model) {
10048
+ if (model && model.includes('/'))
10049
+ return model;
10050
+ const base = normalizeModelName(model || DEFAULT_MODEL);
10051
+ return `openai/${base}`;
10052
+ }
10053
+ // Normalize model name for pricing
10054
+ function normalizeModelForPricing(model) {
10055
+ if (!model)
10056
+ return { provider: 'openai', coreModel: normalizeModelName(DEFAULT_MODEL) };
10057
+ const [maybeProvider, maybeModel] = model.includes('/') ? model.split('/') : ['openai', model];
10058
+ const provider = (maybeProvider === 'deepseek' ? 'deepseek' : 'openai');
10059
+ const coreModel = normalizeModelName(maybeModel || model);
10060
+ return { provider, coreModel };
10061
+ }
10062
+ /**
10063
+ * Make a call through OpenRouter using the OpenAI Chat Completions-compatible API.
10064
+ * Supports: JSON mode, model selection, message history, and tools.
10065
+ */
10066
+ async function makeOpenRouterCall(input, options = {}) {
10067
+ const { apiKey = process.env.OPENROUTER_API_KEY, model, responseFormat = 'text', tools, toolChoice, context, developerPrompt, temperature = 0.2, max_tokens, top_p, frequency_penalty, presence_penalty, stop, seed, referer = process.env.OPENROUTER_SITE_URL, title = process.env.OPENROUTER_SITE_NAME, } = options;
10068
+ if (!apiKey) {
10069
+ throw new Error('OpenRouter API key is not provided and OPENROUTER_API_KEY is not set');
10070
+ }
10071
+ const client = new OpenAI({
10072
+ apiKey,
10073
+ baseURL: 'https://openrouter.ai/api/v1',
10074
+ defaultHeaders: {
10075
+ ...(referer ? { 'HTTP-Referer': referer } : {}),
10076
+ ...(title ? { 'X-Title': title } : {}),
10077
+ },
10078
+ });
10079
+ const messages = [];
10080
+ if (developerPrompt && developerPrompt.trim()) {
10081
+ messages.push({ role: 'system', content: developerPrompt });
10082
+ }
10083
+ if (context && context.length > 0) {
10084
+ messages.push(...mapContextToMessages(context));
10085
+ }
10086
+ messages.push({ role: 'user', content: input });
10087
+ // Configure response_format
10088
+ let response_format;
10089
+ let parsingFormat = 'text';
10090
+ if (responseFormat === 'json') {
10091
+ response_format = { type: 'json_object' };
10092
+ parsingFormat = 'json';
10093
+ }
10094
+ else if (typeof responseFormat === 'object') {
10095
+ response_format = { type: 'json_object' };
10096
+ parsingFormat = responseFormat;
10097
+ }
10098
+ const modelId = toOpenRouterModel(model);
10099
+ const completion = await client.chat.completions.create({
10100
+ model: modelId,
10101
+ messages,
10102
+ response_format,
10103
+ tools,
10104
+ tool_choice: toolChoice,
10105
+ temperature,
10106
+ max_tokens,
10107
+ top_p,
10108
+ frequency_penalty,
10109
+ presence_penalty,
10110
+ stop,
10111
+ seed,
10112
+ });
10113
+ const choice = completion.choices && completion.choices.length > 0 ? completion.choices[0] : undefined;
10114
+ const message = (choice && 'message' in choice ? choice.message : undefined);
10115
+ const { provider: pricingProvider, coreModel } = normalizeModelForPricing(modelId);
10116
+ const promptTokens = completion.usage?.prompt_tokens ?? 0;
10117
+ const completionTokens = completion.usage?.completion_tokens ?? 0;
10118
+ const cost = calculateCost(pricingProvider, coreModel, promptTokens, completionTokens);
10119
+ // Tool calls branch: return empty string response and expose tool_calls on LLMResponse
10120
+ const hasToolCalls = Array.isArray(message?.tool_calls) && message.tool_calls.length > 0;
10121
+ if (hasToolCalls) {
10122
+ const usageModel = isOpenRouterModel(modelId) ? modelId : DEFAULT_MODEL;
10123
+ return {
10124
+ response: '',
10125
+ usage: {
10126
+ prompt_tokens: promptTokens,
10127
+ completion_tokens: completionTokens,
10128
+ provider: 'openrouter',
10129
+ model: usageModel,
10130
+ cost,
10131
+ },
10132
+ tool_calls: message.tool_calls,
10133
+ };
10134
+ }
10135
+ const rawText = typeof message?.content === 'string' ? message.content : '';
10136
+ const parsed = await parseResponse(rawText, parsingFormat);
10137
+ if (parsed === null) {
10138
+ throw new Error('Failed to parse OpenRouter response');
10139
+ }
10140
+ // Ensure the model value conforms to LLMModel; otherwise fall back to DEFAULT_MODEL
10141
+ const usageModel = isOpenRouterModel(modelId) ? modelId : DEFAULT_MODEL;
10142
+ return {
10143
+ response: parsed,
10144
+ usage: {
10145
+ prompt_tokens: promptTokens,
10146
+ completion_tokens: completionTokens,
10147
+ provider: 'openrouter',
10148
+ model: usageModel,
10149
+ cost,
10150
+ },
10151
+ ...(hasToolCalls ? { tool_calls: message.tool_calls } : {}),
10152
+ };
10153
+ }
10154
+
10008
10155
  /**
10009
10156
  * A class to measure performance of code execution.
10010
10157
  *
@@ -18314,6 +18461,7 @@ const disco = {
18314
18461
  call: makeLLMCall,
18315
18462
  seek: makeDeepseekCall,
18316
18463
  images: makeImagesCall,
18464
+ open: makeOpenRouterCall,
18317
18465
  },
18318
18466
  polygon: {
18319
18467
  fetchTickerInfo: fetchTickerInfo,
@@ -18362,4 +18510,5 @@ const disco = {
18362
18510
  exports.AlpacaMarketDataAPI = AlpacaMarketDataAPI;
18363
18511
  exports.AlpacaTradingAPI = AlpacaTradingAPI;
18364
18512
  exports.disco = disco;
18513
+ exports.isOpenRouterModel = isOpenRouterModel;
18365
18514
  //# sourceMappingURL=index.cjs.map