@discomedia/utils 1.0.25 → 1.0.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/dist/alpaca-trading-api-6NxNgQBn.js +1413 -0
  2. package/dist/alpaca-trading-api-6NxNgQBn.js.map +1 -0
  3. package/dist/index-frontend.cjs +105 -12
  4. package/dist/index-frontend.cjs.map +1 -1
  5. package/dist/index-frontend.mjs +105 -13
  6. package/dist/index-frontend.mjs.map +1 -1
  7. package/dist/index.cjs +257 -43
  8. package/dist/index.cjs.map +1 -1
  9. package/dist/index.mjs +257 -44
  10. package/dist/index.mjs.map +1 -1
  11. package/dist/package.json +3 -3
  12. package/dist/test.js +868 -5517
  13. package/dist/test.js.map +1 -1
  14. package/dist/types/alpaca-trading-api.d.ts +33 -0
  15. package/dist/types/alpaca-trading-api.d.ts.map +1 -1
  16. package/dist/types/index-frontend.d.ts +1 -1
  17. package/dist/types/index.d.ts +3 -1
  18. package/dist/types/index.d.ts.map +1 -1
  19. package/dist/types/json-tools.d.ts.map +1 -1
  20. package/dist/types/llm-deepseek.d.ts +1 -1
  21. package/dist/types/llm-deepseek.d.ts.map +1 -1
  22. package/dist/types/llm-images.d.ts.map +1 -1
  23. package/dist/types/llm-openai.d.ts +2 -2
  24. package/dist/types/llm-openai.d.ts.map +1 -1
  25. package/dist/types/llm-openrouter.d.ts +28 -0
  26. package/dist/types/llm-openrouter.d.ts.map +1 -0
  27. package/dist/types/misc-utils.d.ts.map +1 -1
  28. package/dist/types/types/llm-types.d.ts +26 -3
  29. package/dist/types/types/llm-types.d.ts.map +1 -1
  30. package/dist/types/types/logging-types.d.ts +1 -1
  31. package/dist/types/types/logging-types.d.ts.map +1 -1
  32. package/dist/types-frontend/alpaca-trading-api.d.ts +33 -0
  33. package/dist/types-frontend/alpaca-trading-api.d.ts.map +1 -1
  34. package/dist/types-frontend/index-frontend.d.ts +1 -1
  35. package/dist/types-frontend/index.d.ts +3 -1
  36. package/dist/types-frontend/index.d.ts.map +1 -1
  37. package/dist/types-frontend/json-tools.d.ts.map +1 -1
  38. package/dist/types-frontend/llm-deepseek.d.ts +1 -1
  39. package/dist/types-frontend/llm-deepseek.d.ts.map +1 -1
  40. package/dist/types-frontend/llm-images.d.ts.map +1 -1
  41. package/dist/types-frontend/llm-openai.d.ts +2 -2
  42. package/dist/types-frontend/llm-openai.d.ts.map +1 -1
  43. package/dist/types-frontend/llm-openrouter.d.ts +28 -0
  44. package/dist/types-frontend/llm-openrouter.d.ts.map +1 -0
  45. package/dist/types-frontend/misc-utils.d.ts.map +1 -1
  46. package/dist/types-frontend/types/llm-types.d.ts +26 -3
  47. package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
  48. package/dist/types-frontend/types/logging-types.d.ts +1 -1
  49. package/dist/types-frontend/types/logging-types.d.ts.map +1 -1
  50. package/package.json +3 -3
package/dist/index.cjs CHANGED
@@ -1021,8 +1021,28 @@ function dateTimeForGS(date) {
1021
1021
  .replace(/\./g, '/');
1022
1022
  }
1023
1023
 
1024
+ /**
1025
+ * Type guard to check if a model is an OpenRouter model
1026
+ */
1027
+ function isOpenRouterModel(model) {
1028
+ const openRouterModels = [
1029
+ 'openai/gpt-5',
1030
+ 'openai/gpt-5-mini',
1031
+ 'openai/gpt-5-nano',
1032
+ 'openai/gpt-oss-120b',
1033
+ 'z.ai/glm-4.5',
1034
+ 'z.ai/glm-4.5-air',
1035
+ 'google/gemini-2.5-flash',
1036
+ 'google/gemini-2.5-flash-lite',
1037
+ 'deepseek/deepseek-r1-0528',
1038
+ 'deepseek/deepseek-chat-v3-0324',
1039
+ ];
1040
+ return openRouterModels.includes(model);
1041
+ }
1042
+
1024
1043
  var Types = /*#__PURE__*/Object.freeze({
1025
- __proto__: null
1044
+ __proto__: null,
1045
+ isOpenRouterModel: isOpenRouterModel
1026
1046
  });
1027
1047
 
1028
1048
  // Utility function for debug logging
@@ -1114,29 +1134,31 @@ function hideApiKeyFromurl(url) {
1114
1134
  * @returns Structured error details.
1115
1135
  */
1116
1136
  function extractErrorDetails(error, response) {
1117
- if (error.name === 'TypeError' && error.message.includes('fetch')) {
1137
+ const errMsg = error instanceof Error ? error.message : String(error);
1138
+ const errName = error instanceof Error ? error.name : 'Error';
1139
+ if (errName === 'TypeError' && errMsg.includes('fetch')) {
1118
1140
  return { type: 'NETWORK_ERROR', reason: 'Network connectivity issue', status: null };
1119
1141
  }
1120
- if (error.message.includes('HTTP error: 429')) {
1121
- const match = error.message.match(/RATE_LIMIT: 429:(\d+)/);
1142
+ if (errMsg.includes('HTTP error: 429')) {
1143
+ const match = errMsg.match(/RATE_LIMIT: 429:(\d+)/);
1122
1144
  const retryAfter = match ? parseInt(match[1]) : undefined;
1123
1145
  return { type: 'RATE_LIMIT', reason: 'Rate limit exceeded', status: 429, retryAfter };
1124
1146
  }
1125
- if (error.message.includes('HTTP error: 401') || error.message.includes('AUTH_ERROR: 401')) {
1147
+ if (errMsg.includes('HTTP error: 401') || errMsg.includes('AUTH_ERROR: 401')) {
1126
1148
  return { type: 'AUTH_ERROR', reason: 'Authentication failed - invalid API key', status: 401 };
1127
1149
  }
1128
- if (error.message.includes('HTTP error: 403') || error.message.includes('AUTH_ERROR: 403')) {
1150
+ if (errMsg.includes('HTTP error: 403') || errMsg.includes('AUTH_ERROR: 403')) {
1129
1151
  return { type: 'AUTH_ERROR', reason: 'Access forbidden - insufficient permissions', status: 403 };
1130
1152
  }
1131
- if (error.message.includes('SERVER_ERROR:')) {
1132
- const status = parseInt(error.message.split('SERVER_ERROR: ')[1]) || 500;
1153
+ if (errMsg.includes('SERVER_ERROR:')) {
1154
+ const status = parseInt(errMsg.split('SERVER_ERROR: ')[1]) || 500;
1133
1155
  return { type: 'SERVER_ERROR', reason: `Server error (${status})`, status };
1134
1156
  }
1135
- if (error.message.includes('CLIENT_ERROR:')) {
1136
- const status = parseInt(error.message.split('CLIENT_ERROR: ')[1]) || 400;
1157
+ if (errMsg.includes('CLIENT_ERROR:')) {
1158
+ const status = parseInt(errMsg.split('CLIENT_ERROR: ')[1]) || 400;
1137
1159
  return { type: 'CLIENT_ERROR', reason: `Client error (${status})`, status };
1138
1160
  }
1139
- return { type: 'UNKNOWN', reason: error.message || 'Unknown error', status: null };
1161
+ return { type: 'UNKNOWN', reason: errMsg || 'Unknown error', status: null };
1140
1162
  }
1141
1163
  /**
1142
1164
  * Fetches a resource with intelligent retry logic for handling transient errors.
@@ -1350,51 +1372,47 @@ function pLimit(concurrency) {
1350
1372
  let activeCount = 0;
1351
1373
 
1352
1374
  const resumeNext = () => {
1375
+ // Process the next queued function if we're under the concurrency limit
1353
1376
  if (activeCount < concurrency && queue.size > 0) {
1354
- queue.dequeue()();
1355
- // Since `pendingCount` has been decreased by one, increase `activeCount` by one.
1356
1377
  activeCount++;
1378
+ queue.dequeue()();
1357
1379
  }
1358
1380
  };
1359
1381
 
1360
1382
  const next = () => {
1361
1383
  activeCount--;
1362
-
1363
1384
  resumeNext();
1364
1385
  };
1365
1386
 
1366
1387
  const run = async (function_, resolve, arguments_) => {
1388
+ // Execute the function and capture the result promise
1367
1389
  const result = (async () => function_(...arguments_))();
1368
1390
 
1391
+ // Resolve immediately with the promise (don't wait for completion)
1369
1392
  resolve(result);
1370
1393
 
1394
+ // Wait for the function to complete (success or failure)
1395
+ // We catch errors here to prevent unhandled rejections,
1396
+ // but the original promise rejection is preserved for the caller
1371
1397
  try {
1372
1398
  await result;
1373
1399
  } catch {}
1374
1400
 
1401
+ // Decrement active count and process next queued function
1375
1402
  next();
1376
1403
  };
1377
1404
 
1378
1405
  const enqueue = (function_, resolve, arguments_) => {
1379
- // Queue `internalResolve` instead of the `run` function
1380
- // to preserve asynchronous context.
1381
- new Promise(internalResolve => {
1406
+ // Queue the internal resolve function instead of the run function
1407
+ // to preserve the asynchronous execution context.
1408
+ new Promise(internalResolve => { // eslint-disable-line promise/param-names
1382
1409
  queue.enqueue(internalResolve);
1383
- }).then(
1384
- run.bind(undefined, function_, resolve, arguments_),
1385
- );
1386
-
1387
- (async () => {
1388
- // This function needs to wait until the next microtask before comparing
1389
- // `activeCount` to `concurrency`, because `activeCount` is updated asynchronously
1390
- // after the `internalResolve` function is dequeued and called. The comparison in the if-statement
1391
- // needs to happen asynchronously as well to get an up-to-date value for `activeCount`.
1392
- await Promise.resolve();
1393
-
1394
- if (activeCount < concurrency) {
1395
- resumeNext();
1396
- }
1397
- })();
1410
+ }).then(run.bind(undefined, function_, resolve, arguments_)); // eslint-disable-line promise/prefer-await-to-then
1411
+
1412
+ // Start processing immediately if we haven't reached the concurrency limit
1413
+ if (activeCount < concurrency) {
1414
+ resumeNext();
1415
+ }
1398
1416
  };
1399
1417
 
1400
1418
  const generator = (function_, ...arguments_) => new Promise(resolve => {
@@ -1428,6 +1446,12 @@ function pLimit(concurrency) {
1428
1446
  });
1429
1447
  },
1430
1448
  },
1449
+ map: {
1450
+ async value(array, function_) {
1451
+ const promises = array.map(value => this(function_, value));
1452
+ return Promise.all(promises);
1453
+ },
1454
+ },
1431
1455
  });
1432
1456
 
1433
1457
  return generator;
@@ -2370,7 +2394,7 @@ const safeJSON = (text) => {
2370
2394
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2371
2395
  const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
2372
2396
 
2373
- const VERSION = '5.12.1'; // x-release-please-version
2397
+ const VERSION = '5.12.2'; // x-release-please-version
2374
2398
 
2375
2399
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2376
2400
  const isRunningInBrowser = () => {
@@ -9165,7 +9189,8 @@ function fixBrokenJson(jsonStr) {
9165
9189
  return parse();
9166
9190
  }
9167
9191
  catch (error) {
9168
- console.error(`Error parsing JSON at position ${index}: ${error.message}`);
9192
+ const msg = error instanceof Error ? error.message : String(error);
9193
+ console.error(`Error parsing JSON at position ${index}: ${msg}`);
9169
9194
  return null;
9170
9195
  }
9171
9196
  }
@@ -9709,7 +9734,13 @@ async function makeImagesCall(prompt, options = {}) {
9709
9734
  const enhancedResponse = {
9710
9735
  ...response,
9711
9736
  usage: {
9712
- ...response.usage,
9737
+ // OpenAI Images response may not include usage details per image; preserve if present
9738
+ ...(response.usage ?? {
9739
+ input_tokens: 0,
9740
+ input_tokens_details: { image_tokens: 0, text_tokens: 0 },
9741
+ output_tokens: 0,
9742
+ total_tokens: 0,
9743
+ }),
9713
9744
  provider: 'openai',
9714
9745
  model: 'gpt-image-1',
9715
9746
  cost,
@@ -9718,7 +9749,8 @@ async function makeImagesCall(prompt, options = {}) {
9718
9749
  return enhancedResponse;
9719
9750
  }
9720
9751
  catch (error) {
9721
- throw new Error(`OpenAI Images API call failed: ${error.message}`);
9752
+ const message = error instanceof Error ? error.message : 'Unknown error';
9753
+ throw new Error(`OpenAI Images API call failed: ${message}`);
9722
9754
  }
9723
9755
  }
9724
9756
 
@@ -9943,14 +9975,15 @@ const makeDeepseekCall = async (content, responseFormat = 'json', options = {})
9943
9975
  const completion = await createDeepseekCompletion(content, responseFormat, mergedOptions);
9944
9976
  // Handle tool calls similarly to OpenAI
9945
9977
  if (completion.tool_calls && completion.tool_calls.length > 0) {
9978
+ const fnCalls = completion.tool_calls
9979
+ .filter((tc) => tc.type === 'function')
9980
+ .map((tc) => ({
9981
+ id: tc.id,
9982
+ name: tc.function.name,
9983
+ arguments: JSON.parse(tc.function.arguments),
9984
+ }));
9946
9985
  return {
9947
- response: {
9948
- tool_calls: completion.tool_calls.map((tc) => ({
9949
- id: tc.id,
9950
- name: tc.function.name,
9951
- arguments: JSON.parse(tc.function.arguments),
9952
- })),
9953
- },
9986
+ response: { tool_calls: fnCalls },
9954
9987
  usage: {
9955
9988
  prompt_tokens: completion.usage.prompt_tokens,
9956
9989
  completion_tokens: completion.usage.completion_tokens,
@@ -10005,6 +10038,122 @@ const makeDeepseekCall = async (content, responseFormat = 'json', options = {})
10005
10038
  }
10006
10039
  };
10007
10040
 
10041
+ // llm-openrouter.ts
10042
+ // Map our ContextMessage to OpenAI chat message
10043
+ function mapContextToMessages(context) {
10044
+ return context.map((msg) => {
10045
+ const role = msg.role === 'developer' ? 'system' : msg.role;
10046
+ return { role, content: msg.content };
10047
+ });
10048
+ }
10049
+ function toOpenRouterModel(model) {
10050
+ if (model && model.includes('/'))
10051
+ return model;
10052
+ const base = normalizeModelName(model || DEFAULT_MODEL);
10053
+ return `openai/${base}`;
10054
+ }
10055
+ // Normalize model name for pricing
10056
+ function normalizeModelForPricing(model) {
10057
+ if (!model)
10058
+ return { provider: 'openai', coreModel: normalizeModelName(DEFAULT_MODEL) };
10059
+ const [maybeProvider, maybeModel] = model.includes('/') ? model.split('/') : ['openai', model];
10060
+ const provider = (maybeProvider === 'deepseek' ? 'deepseek' : 'openai');
10061
+ const coreModel = normalizeModelName(maybeModel || model);
10062
+ return { provider, coreModel };
10063
+ }
10064
+ /**
10065
+ * Make a call through OpenRouter using the OpenAI Chat Completions-compatible API.
10066
+ * Supports: JSON mode, model selection, message history, and tools.
10067
+ */
10068
+ async function makeOpenRouterCall(input, options = {}) {
10069
+ const { apiKey = process.env.OPENROUTER_API_KEY, model, responseFormat = 'text', tools, toolChoice, context, developerPrompt, temperature = 0.2, max_tokens, top_p, frequency_penalty, presence_penalty, stop, seed, referer = process.env.OPENROUTER_SITE_URL, title = process.env.OPENROUTER_SITE_NAME, } = options;
10070
+ if (!apiKey) {
10071
+ throw new Error('OpenRouter API key is not provided and OPENROUTER_API_KEY is not set');
10072
+ }
10073
+ const client = new OpenAI({
10074
+ apiKey,
10075
+ baseURL: 'https://openrouter.ai/api/v1',
10076
+ defaultHeaders: {
10077
+ ...(referer ? { 'HTTP-Referer': referer } : {}),
10078
+ ...(title ? { 'X-Title': title } : {}),
10079
+ },
10080
+ });
10081
+ const messages = [];
10082
+ if (developerPrompt && developerPrompt.trim()) {
10083
+ messages.push({ role: 'system', content: developerPrompt });
10084
+ }
10085
+ if (context && context.length > 0) {
10086
+ messages.push(...mapContextToMessages(context));
10087
+ }
10088
+ messages.push({ role: 'user', content: input });
10089
+ // Configure response_format
10090
+ let response_format;
10091
+ let parsingFormat = 'text';
10092
+ if (responseFormat === 'json') {
10093
+ response_format = { type: 'json_object' };
10094
+ parsingFormat = 'json';
10095
+ }
10096
+ else if (typeof responseFormat === 'object') {
10097
+ response_format = { type: 'json_object' };
10098
+ parsingFormat = responseFormat;
10099
+ }
10100
+ const modelId = toOpenRouterModel(model);
10101
+ const completion = await client.chat.completions.create({
10102
+ model: modelId,
10103
+ messages,
10104
+ response_format,
10105
+ tools,
10106
+ tool_choice: toolChoice,
10107
+ temperature,
10108
+ max_tokens,
10109
+ top_p,
10110
+ frequency_penalty,
10111
+ presence_penalty,
10112
+ stop,
10113
+ seed,
10114
+ });
10115
+ const choice = completion.choices && completion.choices.length > 0 ? completion.choices[0] : undefined;
10116
+ const message = (choice && 'message' in choice ? choice.message : undefined);
10117
+ const { provider: pricingProvider, coreModel } = normalizeModelForPricing(modelId);
10118
+ const promptTokens = completion.usage?.prompt_tokens ?? 0;
10119
+ const completionTokens = completion.usage?.completion_tokens ?? 0;
10120
+ const cost = calculateCost(pricingProvider, coreModel, promptTokens, completionTokens);
10121
+ // Tool calls branch: return empty string response and expose tool_calls on LLMResponse
10122
+ const hasToolCalls = Array.isArray(message?.tool_calls) && message.tool_calls.length > 0;
10123
+ if (hasToolCalls) {
10124
+ const usageModel = isOpenRouterModel(modelId) ? modelId : DEFAULT_MODEL;
10125
+ return {
10126
+ response: '',
10127
+ usage: {
10128
+ prompt_tokens: promptTokens,
10129
+ completion_tokens: completionTokens,
10130
+ provider: 'openrouter',
10131
+ model: usageModel,
10132
+ cost,
10133
+ },
10134
+ tool_calls: message.tool_calls,
10135
+ };
10136
+ }
10137
+ const rawText = typeof message?.content === 'string' ? message.content : '';
10138
+ const parsed = await parseResponse(rawText, parsingFormat);
10139
+ if (parsed === null) {
10140
+ throw new Error('Failed to parse OpenRouter response');
10141
+ }
10142
+ // Ensure the model value conforms to LLMModel; otherwise fall back to DEFAULT_MODEL
10143
+ const usageModel = isOpenRouterModel(modelId) ? modelId : DEFAULT_MODEL;
10144
+ return {
10145
+ response: parsed,
10146
+ usage: {
10147
+ prompt_tokens: promptTokens,
10148
+ completion_tokens: completionTokens,
10149
+ provider: 'openrouter',
10150
+ model: usageModel,
10151
+ cost,
10152
+ },
10153
+ ...(hasToolCalls ? { tool_calls: message.tool_calls } : {}),
10154
+ };
10155
+ }
10156
+
10008
10157
  /**
10009
10158
  * A class to measure performance of code execution.
10010
10159
  *
@@ -16964,6 +17113,18 @@ Websocket example
16964
17113
  this.log(`Received trade update: event ${update.event} for an order to ${update.order.side} ${update.order.qty} of ${update.order.symbol}`);
16965
17114
  });
16966
17115
  alpacaAPI.connectWebsocket(); // necessary to connect to the WebSocket
17116
+
17117
+ Portfolio History examples
17118
+ // Get standard portfolio history
17119
+ const portfolioHistory = await alpacaAPI.getPortfolioHistory({
17120
+ timeframe: '1D',
17121
+ period: '1M'
17122
+ });
17123
+
17124
+ // Get daily portfolio history with current day included (if available from hourly data)
17125
+ const dailyHistory = await alpacaAPI.getPortfolioDailyHistory({
17126
+ period: '1M'
17127
+ });
16967
17128
  */
16968
17129
  class AlpacaTradingAPI {
16969
17130
  static new(credentials) {
@@ -17663,6 +17824,57 @@ class AlpacaTradingAPI {
17663
17824
  const response = await this.makeRequest(`/account/portfolio/history?${queryParams.toString()}`);
17664
17825
  return response;
17665
17826
  }
17827
+ /**
17828
+ * Get portfolio daily history for the account, ensuring the most recent day is included
17829
+ * by combining daily and hourly history if needed.
17830
+ *
17831
+ * This function performs two API calls:
17832
+ * 1. Retrieves daily portfolio history
17833
+ * 2. Retrieves hourly portfolio history to check for more recent data
17834
+ *
17835
+ * If hourly history has timestamps more recent than the last timestamp in daily history,
17836
+ * it appends one additional day to the daily history using the most recent hourly values.
17837
+ *
17838
+ * @param params Parameters for the portfolio history request (same as getPortfolioHistory except timeframe is forced to '1D')
17839
+ * @returns Portfolio history data with daily timeframe, including the most recent day if available from hourly data
17840
+ */
17841
+ async getPortfolioDailyHistory(params) {
17842
+ // Get daily history
17843
+ const dailyParams = { ...params, timeframe: '1D' };
17844
+ const dailyHistory = await this.getPortfolioHistory(dailyParams);
17845
+ // Get hourly history for the last day to check for more recent data
17846
+ const hourlyParams = { timeframe: '1H', period: '1D' };
17847
+ const hourlyHistory = await this.getPortfolioHistory(hourlyParams);
17848
+ // If no hourly history, return daily as-is
17849
+ if (!hourlyHistory.timestamp || hourlyHistory.timestamp.length === 0) {
17850
+ return dailyHistory;
17851
+ }
17852
+ // Get the last timestamp from daily history
17853
+ const lastDailyTimestamp = dailyHistory.timestamp[dailyHistory.timestamp.length - 1];
17854
+ // Check if hourly history has more recent data
17855
+ const recentHourlyData = hourlyHistory.timestamp
17856
+ .map((timestamp, index) => ({ timestamp, index }))
17857
+ .filter(({ timestamp }) => timestamp > lastDailyTimestamp);
17858
+ // If no more recent hourly data, return daily history as-is
17859
+ if (recentHourlyData.length === 0) {
17860
+ return dailyHistory;
17861
+ }
17862
+ // Get the most recent hourly data point
17863
+ const mostRecentHourly = recentHourlyData[recentHourlyData.length - 1];
17864
+ const mostRecentIndex = mostRecentHourly.index;
17865
+ // Calculate the timestamp for the new daily entry (most recent day + 1 day worth of seconds)
17866
+ const oneDayInSeconds = 24 * 60 * 60;
17867
+ const newDailyTimestamp = mostRecentHourly.timestamp + oneDayInSeconds;
17868
+ // Create a new daily history entry with the most recent hourly values
17869
+ const updatedDailyHistory = {
17870
+ ...dailyHistory,
17871
+ timestamp: [...dailyHistory.timestamp, newDailyTimestamp],
17872
+ equity: [...dailyHistory.equity, hourlyHistory.equity[mostRecentIndex]],
17873
+ profit_loss: [...dailyHistory.profit_loss, hourlyHistory.profit_loss[mostRecentIndex]],
17874
+ profit_loss_pct: [...dailyHistory.profit_loss_pct, hourlyHistory.profit_loss_pct[mostRecentIndex]],
17875
+ };
17876
+ return updatedDailyHistory;
17877
+ }
17666
17878
  /**
17667
17879
  * Get option contracts based on specified parameters
17668
17880
  * @param params Parameters to filter option contracts
@@ -18314,6 +18526,7 @@ const disco = {
18314
18526
  call: makeLLMCall,
18315
18527
  seek: makeDeepseekCall,
18316
18528
  images: makeImagesCall,
18529
+ open: makeOpenRouterCall,
18317
18530
  },
18318
18531
  polygon: {
18319
18532
  fetchTickerInfo: fetchTickerInfo,
@@ -18362,4 +18575,5 @@ const disco = {
18362
18575
  exports.AlpacaMarketDataAPI = AlpacaMarketDataAPI;
18363
18576
  exports.AlpacaTradingAPI = AlpacaTradingAPI;
18364
18577
  exports.disco = disco;
18578
+ exports.isOpenRouterModel = isOpenRouterModel;
18365
18579
  //# sourceMappingURL=index.cjs.map