@discomedia/utils 1.0.46 → 1.0.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -17,6 +17,15 @@ npm install @discomedia/utils
17
17
  > It is included as a dependency because Rollup and TypeScript emit helpers that require `tslib` at build time.
18
18
  > Do not remove `tslib` from dependencies, or CI builds will fail.
19
19
 
20
+ ## Updating
21
+
22
+ When updating this repo, it auto-publishes to NPM via a workflow in the [gh_workflows repository](https://github.com/discomedia/gh_workflows).
23
+
24
+ For it to publish, you have to make sure this repo has the NPM_TOKEN environment variable.
25
+
26
+ <img width="1157" height="786" alt="image" src="https://github.com/user-attachments/assets/e6aa7b5a-6584-436c-93ff-e8acafed9697" />
27
+
28
+
20
29
  ## Usage
21
30
 
22
31
  This package provides two different entry points depending on your environment:
@@ -119,4 +128,4 @@ Contributions are welcome! Please submit a pull request or open an issue for any
119
128
 
120
129
  ## Author
121
130
 
122
- This project is a product of [Disco Media](https://discomedia.co).
131
+ This project is a product of [Disco Media](https://discomedia.co).
@@ -9,6 +9,10 @@ function isOpenRouterModel(model) {
9
9
  'openai/gpt-5-mini',
10
10
  'openai/gpt-5-nano',
11
11
  'openai/gpt-5.1',
12
+ 'openai/gpt-5.2',
13
+ 'openai/gpt-5.2-pro',
14
+ 'openai/gpt-5.1-codex',
15
+ 'openai/gpt-5.1-codex-max',
12
16
  'openai/gpt-oss-120b',
13
17
  'z.ai/glm-4.5',
14
18
  'z.ai/glm-4.5-air',
@@ -256,7 +260,7 @@ const safeJSON = (text) => {
256
260
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
257
261
  const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
258
262
 
259
- const VERSION = '6.9.1'; // x-release-please-version
263
+ const VERSION = '6.10.0'; // x-release-please-version
260
264
 
261
265
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
262
266
  const isRunningInBrowser = () => {
@@ -6097,6 +6101,17 @@ class Responses extends APIResource {
6097
6101
  cancel(responseID, options) {
6098
6102
  return this._client.post(path `/responses/${responseID}/cancel`, options);
6099
6103
  }
6104
+ /**
6105
+ * Compact conversation
6106
+ *
6107
+ * @example
6108
+ * ```ts
6109
+ * const compactedResponse = await client.responses.compact();
6110
+ * ```
6111
+ */
6112
+ compact(body = {}, options) {
6113
+ return this._client.post('/responses/compact', { body, ...options });
6114
+ }
6100
6115
  }
6101
6116
  Responses.InputItems = InputItems;
6102
6117
  Responses.InputTokens = InputTokens;
@@ -7239,6 +7254,22 @@ const openAiModelCosts = {
7239
7254
  inputCost: 1.25 / 1_000_000,
7240
7255
  outputCost: 10 / 1_000_000,
7241
7256
  },
7257
+ 'gpt-5.2': {
7258
+ inputCost: 1.5 / 1_000_000,
7259
+ outputCost: 12 / 1_000_000,
7260
+ },
7261
+ 'gpt-5.2-pro': {
7262
+ inputCost: 3 / 1_000_000,
7263
+ outputCost: 24 / 1_000_000,
7264
+ },
7265
+ 'gpt-5.1-codex': {
7266
+ inputCost: 1.1 / 1_000_000,
7267
+ outputCost: 8.8 / 1_000_000,
7268
+ },
7269
+ 'gpt-5.1-codex-max': {
7270
+ inputCost: 1.8 / 1_000_000,
7271
+ outputCost: 14.4 / 1_000_000,
7272
+ },
7242
7273
  'o4-mini': {
7243
7274
  inputCost: 1.1 / 1_000_000,
7244
7275
  outputCost: 4.4 / 1_000_000,
@@ -7700,6 +7731,10 @@ const isSupportedModel = (model) => {
7700
7731
  'gpt-5-mini',
7701
7732
  'gpt-5-nano',
7702
7733
  'gpt-5.1',
7734
+ 'gpt-5.2',
7735
+ 'gpt-5.2-pro',
7736
+ 'gpt-5.1-codex',
7737
+ 'gpt-5.1-codex-max',
7703
7738
  'o4-mini',
7704
7739
  'o3',
7705
7740
  ].includes(model);
@@ -7712,7 +7747,21 @@ const isSupportedModel = (model) => {
7712
7747
  function supportsTemperature(model) {
7713
7748
  // Reasoning models don't support temperature
7714
7749
  // GPT-5 models also do not support temperature
7715
- const reasoningAndGPT5Models = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano', 'gpt-5.1'];
7750
+ const reasoningAndGPT5Models = [
7751
+ 'o1',
7752
+ 'o1-mini',
7753
+ 'o3-mini',
7754
+ 'o4-mini',
7755
+ 'o3',
7756
+ 'gpt-5',
7757
+ 'gpt-5-mini',
7758
+ 'gpt-5-nano',
7759
+ 'gpt-5.1',
7760
+ 'gpt-5.2',
7761
+ 'gpt-5.2-pro',
7762
+ 'gpt-5.1-codex',
7763
+ 'gpt-5.1-codex-max',
7764
+ ];
7716
7765
  return !reasoningAndGPT5Models.includes(model);
7717
7766
  }
7718
7767
  /**
@@ -7730,7 +7779,16 @@ function isReasoningModel(model) {
7730
7779
  * @returns True if the model is a GPT-5 model, false otherwise.
7731
7780
  */
7732
7781
  function isGPT5Model(model) {
7733
- const gpt5Models = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano', 'gpt-5.1'];
7782
+ const gpt5Models = [
7783
+ 'gpt-5',
7784
+ 'gpt-5-mini',
7785
+ 'gpt-5-nano',
7786
+ 'gpt-5.1',
7787
+ 'gpt-5.2',
7788
+ 'gpt-5.2-pro',
7789
+ 'gpt-5.1-codex',
7790
+ 'gpt-5.1-codex-max',
7791
+ ];
7734
7792
  return gpt5Models.includes(model);
7735
7793
  }
7736
7794
  /**
@@ -7971,6 +8029,18 @@ async function makeLLMCall(input, options = {}) {
7971
8029
  return await makeResponsesAPICall(processedInput, responsesOptions);
7972
8030
  }
7973
8031
 
8032
+ const MULTIMODAL_VISION_MODELS = new Set([
8033
+ 'gpt-4o-mini',
8034
+ 'gpt-4o',
8035
+ 'gpt-5',
8036
+ 'gpt-5-mini',
8037
+ 'gpt-5-nano',
8038
+ 'gpt-5.1',
8039
+ 'gpt-5.2',
8040
+ 'gpt-5.2-pro',
8041
+ 'gpt-5.1-codex',
8042
+ 'gpt-5.1-codex-max',
8043
+ ]);
7974
8044
  /**
7975
8045
  * Makes a call to the OpenAI Images API to generate images based on a text prompt.
7976
8046
  *
@@ -8007,7 +8077,11 @@ async function makeLLMCall(input, options = {}) {
8007
8077
  * @throws Error if the API call fails or invalid parameters are provided
8008
8078
  */
8009
8079
  async function makeImagesCall(prompt, options = {}) {
8010
- const { size = 'auto', outputFormat = 'webp', compression = 50, quality = 'high', count = 1, background = 'auto', moderation = 'auto', apiKey } = options;
8080
+ const { size = 'auto', outputFormat = 'webp', compression = 50, quality = 'high', count = 1, background = 'auto', moderation = 'auto', apiKey, visionModel, } = options;
8081
+ const supportedVisionModel = visionModel && MULTIMODAL_VISION_MODELS.has(visionModel) ? visionModel : undefined;
8082
+ if (visionModel && !supportedVisionModel) {
8083
+ console.warn(`Vision model ${visionModel} is not recognized as a multimodal OpenAI model. Ignoring for image usage metadata.`);
8084
+ }
8011
8085
  // Get API key
8012
8086
  const effectiveApiKey = apiKey || process.env.OPENAI_API_KEY;
8013
8087
  if (!effectiveApiKey) {
@@ -8067,6 +8141,7 @@ async function makeImagesCall(prompt, options = {}) {
8067
8141
  provider: 'openai',
8068
8142
  model: 'gpt-image-1',
8069
8143
  cost,
8144
+ ...(supportedVisionModel ? { visionModel: supportedVisionModel } : {}),
8070
8145
  },
8071
8146
  };
8072
8147
  return enhancedResponse;
@@ -8767,9 +8842,10 @@ function getLastFullTradingDateImpl(currentDate = new Date()) {
8767
8842
  if (calendar.isEarlyCloseDay(prevMarketDay)) {
8768
8843
  prevCloseMinutes = MARKET_CONFIG.TIMES.EARLY_CLOSE.hour * 60 + MARKET_CONFIG.TIMES.EARLY_CLOSE.minute;
8769
8844
  }
8770
- const year = prevMarketDay.getUTCFullYear();
8771
- const month = prevMarketDay.getUTCMonth();
8772
- const day = prevMarketDay.getUTCDate();
8845
+ const prevNYDate = toNYTime(prevMarketDay);
8846
+ const year = prevNYDate.getUTCFullYear();
8847
+ const month = prevNYDate.getUTCMonth();
8848
+ const day = prevNYDate.getUTCDate();
8773
8849
  const closeHour = Math.floor(prevCloseMinutes / 60);
8774
8850
  const closeMinute = prevCloseMinutes % 60;
8775
8851
  return fromNYTime(new Date(Date.UTC(year, month, day, closeHour, closeMinute, 0, 0)));